From 779e913181fc5636193bc762b5ec24faada4870c Mon Sep 17 00:00:00 2001 From: Prakash Date: Thu, 15 Aug 2024 23:23:54 +0530 Subject: [PATCH 01/61] migration number changes (#5692) --- ...an_plugin.down.sql => 270_improved_image_scan_plugin.down.sql} | 0 ...e_scan_plugin.up.sql => 270_improved_image_scan_plugin.up.sql} | 0 ...rent_metadata.down.sql => 271_plugin_parent_metadata.down.sql} | 0 ...n_parent_metadata.up.sql => 271_plugin_parent_metadata.up.sql} | 0 ...lugin_metadata.down.sql => 272_alter_plugin_metadata.down.sql} | 0 ...er_plugin_metadata.up.sql => 272_alter_plugin_metadata.up.sql} | 0 ...ls_support_in_git.down.sql => 273_tls_support_in_git.down.sql} | 0 ...72_tls_support_in_git.up.sql => 273_tls_support_in_git.up.sql} | 0 ..._system_controller.down.sql => 274_system_controller.down.sql} | 0 ...{273_system_controller.up.sql => 274_system_controller.up.sql} | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename scripts/sql/{274_improved_image_scan_plugin.down.sql => 270_improved_image_scan_plugin.down.sql} (100%) rename scripts/sql/{274_improved_image_scan_plugin.up.sql => 270_improved_image_scan_plugin.up.sql} (100%) rename scripts/sql/{270_plugin_parent_metadata.down.sql => 271_plugin_parent_metadata.down.sql} (100%) rename scripts/sql/{270_plugin_parent_metadata.up.sql => 271_plugin_parent_metadata.up.sql} (100%) rename scripts/sql/{271_alter_plugin_metadata.down.sql => 272_alter_plugin_metadata.down.sql} (100%) rename scripts/sql/{271_alter_plugin_metadata.up.sql => 272_alter_plugin_metadata.up.sql} (100%) rename scripts/sql/{272_tls_support_in_git.down.sql => 273_tls_support_in_git.down.sql} (100%) rename scripts/sql/{272_tls_support_in_git.up.sql => 273_tls_support_in_git.up.sql} (100%) rename scripts/sql/{273_system_controller.down.sql => 274_system_controller.down.sql} (100%) rename scripts/sql/{273_system_controller.up.sql => 274_system_controller.up.sql} (100%) diff --git a/scripts/sql/274_improved_image_scan_plugin.down.sql b/scripts/sql/270_improved_image_scan_plugin.down.sql similarity index 100% rename from scripts/sql/274_improved_image_scan_plugin.down.sql rename to scripts/sql/270_improved_image_scan_plugin.down.sql diff --git a/scripts/sql/274_improved_image_scan_plugin.up.sql b/scripts/sql/270_improved_image_scan_plugin.up.sql similarity index 100% rename from scripts/sql/274_improved_image_scan_plugin.up.sql rename to scripts/sql/270_improved_image_scan_plugin.up.sql diff --git a/scripts/sql/270_plugin_parent_metadata.down.sql b/scripts/sql/271_plugin_parent_metadata.down.sql similarity index 100% rename from scripts/sql/270_plugin_parent_metadata.down.sql rename to scripts/sql/271_plugin_parent_metadata.down.sql diff --git a/scripts/sql/270_plugin_parent_metadata.up.sql b/scripts/sql/271_plugin_parent_metadata.up.sql similarity index 100% rename from scripts/sql/270_plugin_parent_metadata.up.sql rename to scripts/sql/271_plugin_parent_metadata.up.sql diff --git a/scripts/sql/271_alter_plugin_metadata.down.sql b/scripts/sql/272_alter_plugin_metadata.down.sql similarity index 100% rename from scripts/sql/271_alter_plugin_metadata.down.sql rename to scripts/sql/272_alter_plugin_metadata.down.sql diff --git a/scripts/sql/271_alter_plugin_metadata.up.sql b/scripts/sql/272_alter_plugin_metadata.up.sql similarity index 100% rename from scripts/sql/271_alter_plugin_metadata.up.sql rename to scripts/sql/272_alter_plugin_metadata.up.sql diff --git a/scripts/sql/272_tls_support_in_git.down.sql b/scripts/sql/273_tls_support_in_git.down.sql similarity index 100% rename from scripts/sql/272_tls_support_in_git.down.sql rename to scripts/sql/273_tls_support_in_git.down.sql diff --git a/scripts/sql/272_tls_support_in_git.up.sql b/scripts/sql/273_tls_support_in_git.up.sql similarity index 100% rename from scripts/sql/272_tls_support_in_git.up.sql rename to scripts/sql/273_tls_support_in_git.up.sql diff --git a/scripts/sql/273_system_controller.down.sql b/scripts/sql/274_system_controller.down.sql similarity index 100% rename from scripts/sql/273_system_controller.down.sql rename to scripts/sql/274_system_controller.down.sql diff --git a/scripts/sql/273_system_controller.up.sql b/scripts/sql/274_system_controller.up.sql similarity index 100% rename from scripts/sql/273_system_controller.up.sql rename to scripts/sql/274_system_controller.up.sql From 4f04d6b5eee794f768f025cf58dea0e9ce5f80f0 Mon Sep 17 00:00:00 2001 From: Prakash Date: Tue, 20 Aug 2024 12:35:33 +0530 Subject: [PATCH 02/61] refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) --- pkg/deployment/manifest/ManifestCreationService.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/deployment/manifest/ManifestCreationService.go b/pkg/deployment/manifest/ManifestCreationService.go index 86641dfeac9..7547e067298 100644 --- a/pkg/deployment/manifest/ManifestCreationService.go +++ b/pkg/deployment/manifest/ManifestCreationService.go @@ -275,10 +275,12 @@ func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(overrideReq // error is not returned as it's not blocking for deployment process // blocking deployments based on this use case can vary for user to user } - mergedValues, err = impl.autoscalingCheckBeforeTrigger(newCtx, appName, envOverride.Namespace, mergedValues, overrideRequest) - if err != nil { - impl.logger.Errorw("error in autoscaling check before trigger", "pipelineId", overrideRequest.PipelineId, "err", err) - return valuesOverrideResponse, err + if !envOverride.Environment.IsVirtualEnvironment { + mergedValues, err = impl.autoscalingCheckBeforeTrigger(newCtx, appName, envOverride.Namespace, mergedValues, overrideRequest) + if err != nil { + impl.logger.Errorw("error in autoscaling check before trigger", "pipelineId", overrideRequest.PipelineId, "err", err) + return valuesOverrideResponse, err + } } // handle image pull secret if access given mergedValues, err = impl.dockerRegistryIpsConfigService.HandleImagePullSecretOnApplicationDeployment(newCtx, envOverride.Environment, artifact, pipeline.CiPipelineId, mergedValues) From 2e58e77959a458671d0b70a7bab4041a9f5c4894 Mon Sep 17 00:00:00 2001 From: Prakash Date: Tue, 20 Aug 2024 12:50:03 +0530 Subject: [PATCH 03/61] fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment --- cmd/external-app/wire_gen.go | 4 +-- pkg/pipeline/ConfigMapService.go | 42 ++++++++++++++++++++++++++------ util/encoding-utils.go | 10 ++++++++ wire_gen.go | 2 +- 4 files changed, 48 insertions(+), 10 deletions(-) diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index c3a4512ff85..72533cab867 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -199,8 +199,8 @@ func InitializeApp() (*App, error) { userAuthServiceImpl := user.NewUserAuthServiceImpl(userAuthRepositoryImpl, sessionManager, loginService, sugaredLogger, userRepositoryImpl, roleGroupRepositoryImpl, userServiceImpl) teamServiceImpl := team.NewTeamServiceImpl(sugaredLogger, teamRepositoryImpl, userAuthServiceImpl) clusterRepositoryImpl := repository2.NewClusterRepositoryImpl(db, sugaredLogger) - v := informer.NewGlobalMapClusterNamespace() - k8sInformerFactoryImpl := informer.NewK8sInformerFactoryImpl(sugaredLogger, v, k8sServiceImpl) + syncMap := informer.NewGlobalMapClusterNamespace() + k8sInformerFactoryImpl := informer.NewK8sInformerFactoryImpl(sugaredLogger, syncMap, k8sServiceImpl) clusterServiceImpl := cluster.NewClusterServiceImpl(clusterRepositoryImpl, sugaredLogger, k8sServiceImpl, k8sInformerFactoryImpl, userAuthRepositoryImpl, userRepositoryImpl, roleGroupRepositoryImpl) appStatusRepositoryImpl := appStatus.NewAppStatusRepositoryImpl(db, sugaredLogger) environmentRepositoryImpl := repository2.NewEnvironmentRepositoryImpl(db, sugaredLogger, appStatusRepositoryImpl) diff --git a/pkg/pipeline/ConfigMapService.go b/pkg/pipeline/ConfigMapService.go index 77e996dcce5..49c08772a15 100644 --- a/pkg/pipeline/ConfigMapService.go +++ b/pkg/pipeline/ConfigMapService.go @@ -34,7 +34,9 @@ import ( util2 "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" + "net/http" "regexp" + "strconv" "time" ) @@ -504,12 +506,19 @@ func (impl ConfigMapServiceImpl) CSGlobalAddUpdate(configMapRequest *bean.Config return nil, fmt.Errorf("invalid request multiple config found for add or update") } configData := configMapRequest.ConfigData[0] + // validating config/secret data at service layer since this func is consumed in multiple flows, hence preventing code duplication valid, err := impl.validateConfigData(configData) if err != nil && !valid { impl.logger.Errorw("error in validating", "error", err) return configMapRequest, err } + valid, err = impl.validateConfigDataForSecretsOnly(configData) + if err != nil && !valid { + impl.logger.Errorw("error in validating secrets only data", "error", err) + return configMapRequest, err + } + valid, err = impl.validateExternalSecretChartCompatibility(configMapRequest.AppId, configMapRequest.EnvironmentId, configData) if err != nil && !valid { impl.logger.Errorw("error in validating", "error", err) @@ -704,11 +713,17 @@ func (impl ConfigMapServiceImpl) CSEnvironmentAddUpdate(configMapRequest *bean.C } configData := configMapRequest.ConfigData[0] + // validating config/secret data at service layer since this func is consumed in multiple flows, hence preventing code duplication valid, err := impl.validateConfigData(configData) if err != nil && !valid { impl.logger.Errorw("error in validating", "error", err) return configMapRequest, err } + valid, err = impl.validateConfigDataForSecretsOnly(configData) + if err != nil && !valid { + impl.logger.Errorw("error in validating secrets only data", "error", err) + return configMapRequest, err + } valid, err = impl.validateExternalSecretChartCompatibility(configMapRequest.AppId, configMapRequest.EnvironmentId, configData) if err != nil && !valid { @@ -795,13 +810,6 @@ func (impl ConfigMapServiceImpl) CSEnvironmentAddUpdate(configMapRequest *bean.C } configMapRequest.Id = configMap.Id } - //VARIABLE_MAPPING_UPDATE - //sl := bean.SecretsList{} - //data, err := sl.GetTransformedDataForSecretList(model.SecretData, util2.DecodeSecret) - //if err != nil { - // return nil, err - //} - //err = impl.extractAndMapVariables(data, model.Id, repository5.EntityTypeSecretEnvLevel, configMapRequest.UserId) err = impl.scopedVariableManager.CreateVariableMappingsForSecretEnv(model) if err != nil { return nil, err @@ -1545,6 +1553,26 @@ func (impl ConfigMapServiceImpl) validateConfigData(configData *bean.ConfigData) return true, nil } +func (impl ConfigMapServiceImpl) validateConfigDataForSecretsOnly(configData *bean.ConfigData) (bool, error) { + + // check encoding in base64 for secret data + if len(configData.Data) > 0 { + dataMap := make(map[string]string) + err := json.Unmarshal(configData.Data, &dataMap) + if err != nil { + impl.logger.Errorw("error while unmarshalling secret data ", "error", err) + return false, err + } + err = util2.ValidateEncodedDataByDecoding(dataMap) + if err != nil { + impl.logger.Errorw("error in decoding secret data", "error", err) + return false, util.NewApiError().WithHttpStatusCode(http.StatusUnprocessableEntity).WithCode(strconv.Itoa(http.StatusUnprocessableEntity)). + WithUserMessage("error in decoding data, make sure the secret data is encoded properly") + } + } + return true, nil +} + func (impl ConfigMapServiceImpl) updateConfigData(configData *bean.ConfigData, syncRequest *bean.BulkPatchRequest) (*bean.ConfigData, error) { dataMap := make(map[string]string) var updatedData json.RawMessage diff --git a/util/encoding-utils.go b/util/encoding-utils.go index 88064a26bd5..82837c229cb 100644 --- a/util/encoding-utils.go +++ b/util/encoding-utils.go @@ -53,3 +53,13 @@ func GetDecodedAndEncodedData(data json.RawMessage, transformer SecretTransformM } return marshal, nil } + +func ValidateEncodedDataByDecoding(dataMap map[string]string) error { + for _, value := range dataMap { + _, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return err + } + } + return nil +} diff --git a/wire_gen.go b/wire_gen.go index b1ef4f1028b..d83a360fd09 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -714,7 +714,7 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - installedAppResourceServiceImpl := resource.NewInstalledAppResourceServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, applicationServiceClientImpl, acdAuthConfig, installedAppVersionHistoryRepositoryImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, appStatusServiceImpl, k8sCommonServiceImpl, k8sApplicationServiceImpl, k8sServiceImpl, deploymentConfigServiceImpl) + installedAppResourceServiceImpl := resource.NewInstalledAppResourceServiceImpl(sugaredLogger, installedAppRepositoryImpl, appStoreApplicationVersionRepositoryImpl, applicationServiceClientImpl, acdAuthConfig, installedAppVersionHistoryRepositoryImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, appStatusServiceImpl, k8sCommonServiceImpl, k8sApplicationServiceImpl, k8sServiceImpl, deploymentConfigServiceImpl, ociRegistryConfigRepositoryImpl) chartGroupEntriesRepositoryImpl := repository17.NewChartGroupEntriesRepositoryImpl(db, sugaredLogger) chartGroupReposotoryImpl := repository17.NewChartGroupReposotoryImpl(db, sugaredLogger) chartGroupDeploymentRepositoryImpl := repository17.NewChartGroupDeploymentRepositoryImpl(db, sugaredLogger) From bf2351544aa57137980cbc3b6561b88d21e359b6 Mon Sep 17 00:00:00 2001 From: Prakash Date: Tue, 20 Aug 2024 12:55:08 +0530 Subject: [PATCH 04/61] saving pco concurrency case handled (#5688) --- .../sql/repository/chartConfig/PipelineOverrideRepository.go | 5 +++++ pkg/deployment/manifest/ManifestCreationService.go | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/sql/repository/chartConfig/PipelineOverrideRepository.go b/internal/sql/repository/chartConfig/PipelineOverrideRepository.go index aba3d7554c6..db8476d9132 100644 --- a/internal/sql/repository/chartConfig/PipelineOverrideRepository.go +++ b/internal/sql/repository/chartConfig/PipelineOverrideRepository.go @@ -58,6 +58,7 @@ type PipelineConfigOverrideMetadata struct { type PipelineOverrideRepository interface { Save(*PipelineOverride) error + Update(pipelineOverride *PipelineOverride) error UpdateStatusByRequestIdentifier(requestId string, newStatus models.ChartStatus) (int, error) GetLatestConfigByRequestIdentifier(requestIdentifier string) (pipelineOverride *PipelineOverride, err error) GetLatestConfigByEnvironmentConfigOverrideId(envConfigOverrideId int) (pipelineOverride *PipelineOverride, err error) @@ -85,6 +86,10 @@ func (impl PipelineOverrideRepositoryImpl) Save(pipelineOverride *PipelineOverri return impl.dbConnection.Insert(pipelineOverride) } +func (impl PipelineOverrideRepositoryImpl) Update(pipelineOverride *PipelineOverride) error { + return impl.dbConnection.Update(pipelineOverride) +} + func (impl PipelineOverrideRepositoryImpl) UpdatePipelineMergedValues(ctx context.Context, tx *pg.Tx, id int, pipelineMergedValues string, userId int32) error { _, span := otel.Tracer("orchestrator").Start(ctx, "PipelineOverrideRepositoryImpl.UpdatePipelineMergedValues") defer span.End() diff --git a/pkg/deployment/manifest/ManifestCreationService.go b/pkg/deployment/manifest/ManifestCreationService.go index 7547e067298..9b2cc199274 100644 --- a/pkg/deployment/manifest/ManifestCreationService.go +++ b/pkg/deployment/manifest/ManifestCreationService.go @@ -808,7 +808,7 @@ func (impl *ManifestCreationServiceImpl) checkAndFixDuplicateReleaseNo(override return err } override.PipelineReleaseCounter = currentReleaseNo + 1 - err = impl.pipelineOverrideRepository.Save(override) + err = impl.pipelineOverrideRepository.Update(override) if err != nil { return err } From 694831c209a2a28883b299cb8c073ef2441dddca Mon Sep 17 00:00:00 2001 From: Prakash Date: Wed, 21 Aug 2024 13:30:04 +0530 Subject: [PATCH 05/61] fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix --- pkg/deployment/gitOps/git/GitOpsHelper.go | 1 + pkg/deployment/gitOps/git/GitServiceGithub.go | 1 + pkg/deployment/gitOps/git/commandManager/GitCliManager.go | 3 +++ pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go | 3 +++ scripts/sql/276_alter_pipeline_stage_step_variable.down.sql | 3 +++ scripts/sql/276_alter_pipeline_stage_step_variable.up.sql | 2 ++ 6 files changed, 13 insertions(+) create mode 100644 scripts/sql/276_alter_pipeline_stage_step_variable.down.sql create mode 100644 scripts/sql/276_alter_pipeline_stage_step_variable.up.sql diff --git a/pkg/deployment/gitOps/git/GitOpsHelper.go b/pkg/deployment/gitOps/git/GitOpsHelper.go index 2d204ef8a0d..bc7add2ff92 100644 --- a/pkg/deployment/gitOps/git/GitOpsHelper.go +++ b/pkg/deployment/gitOps/git/GitOpsHelper.go @@ -89,6 +89,7 @@ func (impl *GitOpsHelper) Clone(url, targetDir string) (clonedDir string, err er } } if errMsg != "" { + impl.logger.Errorw("error in git fetch command", "errMsg", errMsg, "err", err) return "", fmt.Errorf(errMsg) } return clonedDir, nil diff --git a/pkg/deployment/gitOps/git/GitServiceGithub.go b/pkg/deployment/gitOps/git/GitServiceGithub.go index 589d79c2ac7..b48d8b5ab43 100644 --- a/pkg/deployment/gitOps/git/GitServiceGithub.go +++ b/pkg/deployment/gitOps/git/GitServiceGithub.go @@ -259,6 +259,7 @@ func (impl GitHubClient) GetRepoUrl(config *bean2.GitOpsConfigDto) (repoUrl stri ctx := context.Background() repo, _, err := impl.client.Repositories.Get(ctx, impl.org, config.GitRepoName) if err != nil { + impl.logger.Errorw("error in getting repo url by repo name", "org", impl.org, "gitRepoName", config.GitRepoName, "err", err) return "", err } return *repo.CloneURL, nil diff --git a/pkg/deployment/gitOps/git/commandManager/GitCliManager.go b/pkg/deployment/gitOps/git/commandManager/GitCliManager.go index 0501f67cb34..b5b3a3a146d 100644 --- a/pkg/deployment/gitOps/git/commandManager/GitCliManager.go +++ b/pkg/deployment/gitOps/git/commandManager/GitCliManager.go @@ -77,6 +77,9 @@ func (impl *GitCliManagerImpl) Pull(ctx GitContext, repoRoot string) (err error) return err } response, errMsg, err := impl.PullCli(ctx, repoRoot, "origin/master") + if err != nil { + impl.logger.Errorw("error in git pull from cli", "errMsg", errMsg, "err", err) + } if strings.Contains(response, "already up-to-date") || strings.Contains(errMsg, "already up-to-date") { err = nil diff --git a/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go b/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go index 950a0bf4516..3d70a73c311 100644 --- a/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go +++ b/pkg/deployment/gitOps/git/commandManager/GoGitSdkManager.go @@ -56,6 +56,9 @@ func (impl GoGitSDKManagerImpl) Pull(ctx GitContext, repoRoot string) (err error } err = workTree.PullContext(ctx, pullOptions) + if err != nil { + impl.logger.Errorw("error in git pull from go-git", "err", err) + } if err != nil && err.Error() == "already up-to-date" { err = nil return nil diff --git a/scripts/sql/276_alter_pipeline_stage_step_variable.down.sql b/scripts/sql/276_alter_pipeline_stage_step_variable.down.sql new file mode 100644 index 00000000000..bad0b4c4928 --- /dev/null +++ b/scripts/sql/276_alter_pipeline_stage_step_variable.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE pipeline_stage_step_variable ALTER COLUMN default_value TYPE VARCHAR(255); +ALTER TABLE pipeline_stage_step_variable ALTER COLUMN value TYPE VARCHAR(255); + diff --git a/scripts/sql/276_alter_pipeline_stage_step_variable.up.sql b/scripts/sql/276_alter_pipeline_stage_step_variable.up.sql new file mode 100644 index 00000000000..cbcf6515c90 --- /dev/null +++ b/scripts/sql/276_alter_pipeline_stage_step_variable.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE pipeline_stage_step_variable ALTER COLUMN value TYPE text; +ALTER TABLE pipeline_stage_step_variable ALTER COLUMN default_value TYPE text; From 3e31f49f95d373f92b13afbe1806606ac4a39d85 Mon Sep 17 00:00:00 2001 From: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Date: Wed, 21 Aug 2024 17:09:24 +0530 Subject: [PATCH 06/61] fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review --- pkg/app/AppCrudOperationService.go | 78 +++++++++++++++++-- pkg/app/helper.go | 23 +++++- .../service/AppStoreDeploymentDBService.go | 1 - .../service/EAMode/InstalledAppDBService.go | 54 +++++++++++++ 4 files changed, 149 insertions(+), 7 deletions(-) diff --git a/pkg/app/AppCrudOperationService.go b/pkg/app/AppCrudOperationService.go index a7035d7603f..3c62046235f 100644 --- a/pkg/app/AppCrudOperationService.go +++ b/pkg/app/AppCrudOperationService.go @@ -19,6 +19,7 @@ package app import ( "context" "encoding/json" + "errors" "fmt" "github.com/caarlos0/env" client "github.com/devtron-labs/devtron/api/helm-app/service" @@ -457,13 +458,67 @@ func convertUrlToHttpsIfSshType(url string) string { return httpsURL } +// handleDuplicateAppEntries identifies and resolves duplicate app entries based on creation time. +// It marks the most recent duplicate entry as inactive and updates the corresponding installed app. +func (impl AppCrudOperationServiceImpl) handleDuplicateAppEntries(appNameUniqueIdentifier string) (*appRepository.App, error) { + // Fetch app IDs by name + appIds, err := impl.getAppIdsByName(appNameUniqueIdentifier) + if err != nil { + impl.logger.Errorw("error in fetching app Ids by appIdentifier", "appNameUniqueIdentifier", appNameUniqueIdentifier, "err", err) + return nil, err + } + + // Fetch apps by IDs from App table for duplicated entries + apps, err := impl.appRepository.FindByIds(appIds) + if err != nil || errors.Is(err, pg.ErrNoRows) { + impl.logger.Errorw("error in fetching app List by appIds", "appIds", appIds, "err", err) + return nil, err + } + + // Identify the earliest and duplicated app entries + earliestApp, duplicatedApp := identifyDuplicateApps(apps) + + // Fetch the installed app associated with the duplicated app + installedApp, err := impl.installedAppRepository.GetInstalledAppsByAppId(duplicatedApp.Id) + if err != nil { + impl.logger.Errorw("error in fetching installed app by appId", "appId", duplicatedApp.Id, "err", err) + return nil, err + } + // Update duplicated app entries + err = impl.installedAppDbService.UpdateDuplicatedEntriesInAppAndInstalledApps(earliestApp, duplicatedApp, &installedApp) + if err != nil { + impl.logger.Errorw("error in updating duplicated entries", "earliestApp", earliestApp, "duplicatedApp", duplicatedApp, "err", err) + return nil, err + } + + impl.logger.Debug("Successfully resolved duplicate app entries", "earliestApp", earliestApp, "duplicatedApp", duplicatedApp) + return earliestApp, nil + +} + +// getAppIdsByName fetches app IDs by the app name unique identifier [for duplicated active app] +func (impl AppCrudOperationServiceImpl) getAppIdsByName(appNameUniqueIdentifier string) ([]*int, error) { + slice := []string{appNameUniqueIdentifier} + appIds, err := impl.appRepository.FindIdsByNames(slice) + if err != nil { + return nil, err + } + + // Convert each element to a pointer and store in a slice of pointers + ids := make([]*int, len(appIds)) + for i := range appIds { + ids[i] = &appIds[i] + } + return ids, nil +} + // getAppAndProjectForAppIdentifier, returns app db model for an app unique identifier or from display_name if both exists else it throws pg.ErrNoRows func (impl AppCrudOperationServiceImpl) getAppAndProjectForAppIdentifier(appIdentifier *helmBean.AppIdentifier) (*appRepository.App, error) { app := &appRepository.App{} var err error appNameUniqueIdentifier := appIdentifier.GetUniqueAppNameIdentifier() app, err = impl.appRepository.FindAppAndProjectByAppName(appNameUniqueIdentifier) - if err != nil && err != pg.ErrNoRows { + if err != nil && !errors.Is(err, pg.ErrNoRows) && !errors.Is(err, pg.ErrMultiRows) { impl.logger.Errorw("error in fetching app meta data by unique app identifier", "appNameUniqueIdentifier", appNameUniqueIdentifier, "err", err) return app, err } @@ -475,6 +530,14 @@ func (impl AppCrudOperationServiceImpl) getAppAndProjectForAppIdentifier(appIden return app, err } } + if errors.Is(err, pg.ErrMultiRows) { + + app, err = impl.handleDuplicateAppEntries(appNameUniqueIdentifier) + if err != nil { + impl.logger.Errorw("error in handling Duplicate entries in the app", "appNameUniqueIdentifier", appNameUniqueIdentifier, "err", err) + return app, err + } + } return app, nil } @@ -532,17 +595,17 @@ func (impl AppCrudOperationServiceImpl) GetHelmAppMetaInfo(appId string) (*bean. return nil, err } // if app.DisplayName is empty then that app_name is not yet migrated to app name unique identifier - if app.Id > 0 && len(app.DisplayName) == 0 { + if app != nil && app.Id > 0 && len(app.DisplayName) == 0 { err = impl.updateAppNameToUniqueAppIdentifierInApp(app, appIdDecoded) if err != nil { impl.logger.Errorw("GetHelmAppMetaInfo, error in migrating displayName and appName to unique identifier for external apps", "appIdentifier", appIdDecoded, "err", err) //not returning from here as we need to show helm app metadata even if migration of app_name fails, then migration can happen on project update } } - if app.Id == 0 { + if app != nil && app.Id == 0 { app.AppName = appIdDecoded.ReleaseName } - if util2.IsExternalChartStoreApp(app.DisplayName) { + if app != nil && util2.IsExternalChartStoreApp(app.DisplayName) { displayName = app.DisplayName } @@ -568,9 +631,14 @@ func (impl AppCrudOperationServiceImpl) GetHelmAppMetaInfo(appId string) (*bean. displayName = InstalledApp.App.DisplayName } } + // Safeguard against nil app cases + if app == nil { + impl.logger.Errorw("no rows found for the requested app", "appId", appId, "error", err) + return nil, fmt.Errorf("no rows found for the requested app, %q", pg.ErrNoRows) + } user, err := impl.userRepository.GetByIdIncludeDeleted(app.CreatedBy) - if err != nil && err != pg.ErrNoRows { + if err != nil && !errors.Is(err, pg.ErrNoRows) { impl.logger.Errorw("error in fetching user for app meta info", "error", err) return nil, err } diff --git a/pkg/app/helper.go b/pkg/app/helper.go index 2ca99c6b149..0e8fff65a43 100644 --- a/pkg/app/helper.go +++ b/pkg/app/helper.go @@ -16,7 +16,10 @@ package app -import "strings" +import ( + appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" + "strings" +) // LabelMatchingRegex is the official k8s label matching regex, pls refer https://github.com/kubernetes/apimachinery/blob/bfd2aff97e594f6aad77acbe2cbbe190acc93cbc/pkg/util/validation/validation.go#L167 const LabelMatchingRegex = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$" @@ -43,3 +46,21 @@ func sanitizeLabels(extraAppLabels map[string]string) map[string]string { } return extraAppLabels } + +// identifyDuplicateApps identifies the earliest created app and the most recent duplicate app. +func identifyDuplicateApps(apps []*appRepository.App) (earliestApp *appRepository.App, duplicatedApp *appRepository.App) { + if len(apps) == 0 { + return nil, nil + } + earliestApp = apps[0] + duplicatedApp = apps[0] + for _, app := range apps[1:] { + if app.AuditLog.CreatedOn.Before(earliestApp.AuditLog.CreatedOn) { + earliestApp = app + } + if app.AuditLog.CreatedOn.After(duplicatedApp.AuditLog.CreatedOn) { + duplicatedApp = app + } + } + return earliestApp, duplicatedApp +} diff --git a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go index 33f1a7b9a19..53d9b8b5e83 100644 --- a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go +++ b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go @@ -133,7 +133,6 @@ func (impl *AppStoreDeploymentDBServiceImpl) AppStoreDeployOperationDB(installRe } // setting additional env data required in appStoreBean.InstallAppVersionDTO adapter.UpdateAdditionalEnvDetails(installRequest, environment) - impl.appStoreValidator.Validate(installRequest, environment) // Stage 1: Create App in tx (Only if AppId is not set already) diff --git a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go index 6eb4156e245..810f3c3b8c1 100644 --- a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go +++ b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go @@ -58,6 +58,7 @@ type InstalledAppDBService interface { GetReleaseInfo(appIdentifier *helmBean.AppIdentifier) (*appStoreBean.InstallAppVersionDTO, error) IsExternalAppLinkedToChartStore(appId int) (bool, []*appStoreRepo.InstalledApps, error) CreateNewAppEntryForAllInstalledApps(installedApps []*appStoreRepo.InstalledApps) error + UpdateDuplicatedEntriesInAppAndInstalledApps(earlyApp *app.App, duplicatedApp *app.App, installedApp *appStoreRepo.InstalledApps) error } type InstalledAppDBServiceImpl struct { @@ -399,6 +400,17 @@ func (impl *InstalledAppDBServiceImpl) CreateNewAppEntryForAllInstalledApps(inst // Rollback tx on error. defer tx.Rollback() for _, installedApp := range installedApps { + + //check if there is any app from its appName is exits and active ...if yes then we will not insert any extra entry in the db + appMetadataByAppName, err := impl.AppRepository.FindActiveByName(installedApp.App.AppName) + if err != nil && !util.IsErrNoRows(err) { + impl.Logger.Errorw("error in fetching app by unique app identifier", "appNameUniqueIdentifier", installedApp.GetUniqueAppNameIdentifier(), "err", err) + return err + } + if appMetadataByAppName != nil && appMetadataByAppName.Id > 0 { + //app already exists for this unique identifier hence not creating new app entry for this as it will get modified after this function + continue + } //check if for this unique identifier name an app already exists, if yes then continue appMetadata, err := impl.AppRepository.FindActiveByName(installedApp.GetUniqueAppNameIdentifier()) if err != nil && !util.IsErrNoRows(err) { @@ -437,3 +449,45 @@ func (impl *InstalledAppDBServiceImpl) CreateNewAppEntryForAllInstalledApps(inst tx.Commit() return nil } + +// UpdateDuplicatedEntriesInAppAndInstalledApps performs the updation in app table and installedApps table for the cases when multiple active app found [typically two due to migration], here we are updating the db with its previous value in the installedApps table and early created app id +func (impl *InstalledAppDBServiceImpl) UpdateDuplicatedEntriesInAppAndInstalledApps(earlyApp *app.App, duplicatedApp *app.App, installedApp *appStoreRepo.InstalledApps) error { + // db operations + dbConnection := impl.InstalledAppRepository.GetConnection() + tx, err := dbConnection.Begin() + if err != nil { + return err + } + // Rollback tx on error. + defer func(tx *pg.Tx) { + err := tx.Rollback() + if err != nil { + impl.Logger.Errorw("Rollback error", "err", err) + } + }(tx) + + //updated the app table with active column as false for the duplicated app + duplicatedApp.Active = false + duplicatedApp.CreateAuditLog(bean3.SystemUserId) + err = impl.AppRepository.UpdateWithTxn(duplicatedApp, tx) + if err != nil { + impl.Logger.Errorw("error saving appModel", "err", err) + return err + } + + // updating the installedApps table with its appId column with the previous app + installedApp.AppId = earlyApp.Id + installedApp.UpdateAuditLog(bean3.SystemUserId) + _, err = impl.InstalledAppRepository.UpdateInstalledApp(installedApp, tx) + if err != nil { + impl.Logger.Errorw("error saving updating installed app with new appId", "installedAppId", installedApp.Id, "err", err) + return err + } + + err = tx.Commit() + if err != nil { + impl.Logger.Errorw("error saving appModel", "err", err) + return err + } + return nil +} From 8de88d77a1cb5d0db32714473062cd88a8b20039 Mon Sep 17 00:00:00 2001 From: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Date: Thu, 22 Aug 2024 12:38:03 +0530 Subject: [PATCH 07/61] Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. --- pkg/app/AppCrudOperationService.go | 78 ++----------------- pkg/app/helper.go | 23 +----- .../service/AppStoreDeploymentDBService.go | 1 + .../service/EAMode/InstalledAppDBService.go | 54 ------------- 4 files changed, 7 insertions(+), 149 deletions(-) diff --git a/pkg/app/AppCrudOperationService.go b/pkg/app/AppCrudOperationService.go index 3c62046235f..a7035d7603f 100644 --- a/pkg/app/AppCrudOperationService.go +++ b/pkg/app/AppCrudOperationService.go @@ -19,7 +19,6 @@ package app import ( "context" "encoding/json" - "errors" "fmt" "github.com/caarlos0/env" client "github.com/devtron-labs/devtron/api/helm-app/service" @@ -458,67 +457,13 @@ func convertUrlToHttpsIfSshType(url string) string { return httpsURL } -// handleDuplicateAppEntries identifies and resolves duplicate app entries based on creation time. -// It marks the most recent duplicate entry as inactive and updates the corresponding installed app. -func (impl AppCrudOperationServiceImpl) handleDuplicateAppEntries(appNameUniqueIdentifier string) (*appRepository.App, error) { - // Fetch app IDs by name - appIds, err := impl.getAppIdsByName(appNameUniqueIdentifier) - if err != nil { - impl.logger.Errorw("error in fetching app Ids by appIdentifier", "appNameUniqueIdentifier", appNameUniqueIdentifier, "err", err) - return nil, err - } - - // Fetch apps by IDs from App table for duplicated entries - apps, err := impl.appRepository.FindByIds(appIds) - if err != nil || errors.Is(err, pg.ErrNoRows) { - impl.logger.Errorw("error in fetching app List by appIds", "appIds", appIds, "err", err) - return nil, err - } - - // Identify the earliest and duplicated app entries - earliestApp, duplicatedApp := identifyDuplicateApps(apps) - - // Fetch the installed app associated with the duplicated app - installedApp, err := impl.installedAppRepository.GetInstalledAppsByAppId(duplicatedApp.Id) - if err != nil { - impl.logger.Errorw("error in fetching installed app by appId", "appId", duplicatedApp.Id, "err", err) - return nil, err - } - // Update duplicated app entries - err = impl.installedAppDbService.UpdateDuplicatedEntriesInAppAndInstalledApps(earliestApp, duplicatedApp, &installedApp) - if err != nil { - impl.logger.Errorw("error in updating duplicated entries", "earliestApp", earliestApp, "duplicatedApp", duplicatedApp, "err", err) - return nil, err - } - - impl.logger.Debug("Successfully resolved duplicate app entries", "earliestApp", earliestApp, "duplicatedApp", duplicatedApp) - return earliestApp, nil - -} - -// getAppIdsByName fetches app IDs by the app name unique identifier [for duplicated active app] -func (impl AppCrudOperationServiceImpl) getAppIdsByName(appNameUniqueIdentifier string) ([]*int, error) { - slice := []string{appNameUniqueIdentifier} - appIds, err := impl.appRepository.FindIdsByNames(slice) - if err != nil { - return nil, err - } - - // Convert each element to a pointer and store in a slice of pointers - ids := make([]*int, len(appIds)) - for i := range appIds { - ids[i] = &appIds[i] - } - return ids, nil -} - // getAppAndProjectForAppIdentifier, returns app db model for an app unique identifier or from display_name if both exists else it throws pg.ErrNoRows func (impl AppCrudOperationServiceImpl) getAppAndProjectForAppIdentifier(appIdentifier *helmBean.AppIdentifier) (*appRepository.App, error) { app := &appRepository.App{} var err error appNameUniqueIdentifier := appIdentifier.GetUniqueAppNameIdentifier() app, err = impl.appRepository.FindAppAndProjectByAppName(appNameUniqueIdentifier) - if err != nil && !errors.Is(err, pg.ErrNoRows) && !errors.Is(err, pg.ErrMultiRows) { + if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching app meta data by unique app identifier", "appNameUniqueIdentifier", appNameUniqueIdentifier, "err", err) return app, err } @@ -530,14 +475,6 @@ func (impl AppCrudOperationServiceImpl) getAppAndProjectForAppIdentifier(appIden return app, err } } - if errors.Is(err, pg.ErrMultiRows) { - - app, err = impl.handleDuplicateAppEntries(appNameUniqueIdentifier) - if err != nil { - impl.logger.Errorw("error in handling Duplicate entries in the app", "appNameUniqueIdentifier", appNameUniqueIdentifier, "err", err) - return app, err - } - } return app, nil } @@ -595,17 +532,17 @@ func (impl AppCrudOperationServiceImpl) GetHelmAppMetaInfo(appId string) (*bean. return nil, err } // if app.DisplayName is empty then that app_name is not yet migrated to app name unique identifier - if app != nil && app.Id > 0 && len(app.DisplayName) == 0 { + if app.Id > 0 && len(app.DisplayName) == 0 { err = impl.updateAppNameToUniqueAppIdentifierInApp(app, appIdDecoded) if err != nil { impl.logger.Errorw("GetHelmAppMetaInfo, error in migrating displayName and appName to unique identifier for external apps", "appIdentifier", appIdDecoded, "err", err) //not returning from here as we need to show helm app metadata even if migration of app_name fails, then migration can happen on project update } } - if app != nil && app.Id == 0 { + if app.Id == 0 { app.AppName = appIdDecoded.ReleaseName } - if app != nil && util2.IsExternalChartStoreApp(app.DisplayName) { + if util2.IsExternalChartStoreApp(app.DisplayName) { displayName = app.DisplayName } @@ -631,14 +568,9 @@ func (impl AppCrudOperationServiceImpl) GetHelmAppMetaInfo(appId string) (*bean. displayName = InstalledApp.App.DisplayName } } - // Safeguard against nil app cases - if app == nil { - impl.logger.Errorw("no rows found for the requested app", "appId", appId, "error", err) - return nil, fmt.Errorf("no rows found for the requested app, %q", pg.ErrNoRows) - } user, err := impl.userRepository.GetByIdIncludeDeleted(app.CreatedBy) - if err != nil && !errors.Is(err, pg.ErrNoRows) { + if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching user for app meta info", "error", err) return nil, err } diff --git a/pkg/app/helper.go b/pkg/app/helper.go index 0e8fff65a43..2ca99c6b149 100644 --- a/pkg/app/helper.go +++ b/pkg/app/helper.go @@ -16,10 +16,7 @@ package app -import ( - appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" - "strings" -) +import "strings" // LabelMatchingRegex is the official k8s label matching regex, pls refer https://github.com/kubernetes/apimachinery/blob/bfd2aff97e594f6aad77acbe2cbbe190acc93cbc/pkg/util/validation/validation.go#L167 const LabelMatchingRegex = "^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$" @@ -46,21 +43,3 @@ func sanitizeLabels(extraAppLabels map[string]string) map[string]string { } return extraAppLabels } - -// identifyDuplicateApps identifies the earliest created app and the most recent duplicate app. -func identifyDuplicateApps(apps []*appRepository.App) (earliestApp *appRepository.App, duplicatedApp *appRepository.App) { - if len(apps) == 0 { - return nil, nil - } - earliestApp = apps[0] - duplicatedApp = apps[0] - for _, app := range apps[1:] { - if app.AuditLog.CreatedOn.Before(earliestApp.AuditLog.CreatedOn) { - earliestApp = app - } - if app.AuditLog.CreatedOn.After(duplicatedApp.AuditLog.CreatedOn) { - duplicatedApp = app - } - } - return earliestApp, duplicatedApp -} diff --git a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go index 53d9b8b5e83..33f1a7b9a19 100644 --- a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go +++ b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go @@ -133,6 +133,7 @@ func (impl *AppStoreDeploymentDBServiceImpl) AppStoreDeployOperationDB(installRe } // setting additional env data required in appStoreBean.InstallAppVersionDTO adapter.UpdateAdditionalEnvDetails(installRequest, environment) + impl.appStoreValidator.Validate(installRequest, environment) // Stage 1: Create App in tx (Only if AppId is not set already) diff --git a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go index 810f3c3b8c1..6eb4156e245 100644 --- a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go +++ b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go @@ -58,7 +58,6 @@ type InstalledAppDBService interface { GetReleaseInfo(appIdentifier *helmBean.AppIdentifier) (*appStoreBean.InstallAppVersionDTO, error) IsExternalAppLinkedToChartStore(appId int) (bool, []*appStoreRepo.InstalledApps, error) CreateNewAppEntryForAllInstalledApps(installedApps []*appStoreRepo.InstalledApps) error - UpdateDuplicatedEntriesInAppAndInstalledApps(earlyApp *app.App, duplicatedApp *app.App, installedApp *appStoreRepo.InstalledApps) error } type InstalledAppDBServiceImpl struct { @@ -400,17 +399,6 @@ func (impl *InstalledAppDBServiceImpl) CreateNewAppEntryForAllInstalledApps(inst // Rollback tx on error. defer tx.Rollback() for _, installedApp := range installedApps { - - //check if there is any app from its appName is exits and active ...if yes then we will not insert any extra entry in the db - appMetadataByAppName, err := impl.AppRepository.FindActiveByName(installedApp.App.AppName) - if err != nil && !util.IsErrNoRows(err) { - impl.Logger.Errorw("error in fetching app by unique app identifier", "appNameUniqueIdentifier", installedApp.GetUniqueAppNameIdentifier(), "err", err) - return err - } - if appMetadataByAppName != nil && appMetadataByAppName.Id > 0 { - //app already exists for this unique identifier hence not creating new app entry for this as it will get modified after this function - continue - } //check if for this unique identifier name an app already exists, if yes then continue appMetadata, err := impl.AppRepository.FindActiveByName(installedApp.GetUniqueAppNameIdentifier()) if err != nil && !util.IsErrNoRows(err) { @@ -449,45 +437,3 @@ func (impl *InstalledAppDBServiceImpl) CreateNewAppEntryForAllInstalledApps(inst tx.Commit() return nil } - -// UpdateDuplicatedEntriesInAppAndInstalledApps performs the updation in app table and installedApps table for the cases when multiple active app found [typically two due to migration], here we are updating the db with its previous value in the installedApps table and early created app id -func (impl *InstalledAppDBServiceImpl) UpdateDuplicatedEntriesInAppAndInstalledApps(earlyApp *app.App, duplicatedApp *app.App, installedApp *appStoreRepo.InstalledApps) error { - // db operations - dbConnection := impl.InstalledAppRepository.GetConnection() - tx, err := dbConnection.Begin() - if err != nil { - return err - } - // Rollback tx on error. - defer func(tx *pg.Tx) { - err := tx.Rollback() - if err != nil { - impl.Logger.Errorw("Rollback error", "err", err) - } - }(tx) - - //updated the app table with active column as false for the duplicated app - duplicatedApp.Active = false - duplicatedApp.CreateAuditLog(bean3.SystemUserId) - err = impl.AppRepository.UpdateWithTxn(duplicatedApp, tx) - if err != nil { - impl.Logger.Errorw("error saving appModel", "err", err) - return err - } - - // updating the installedApps table with its appId column with the previous app - installedApp.AppId = earlyApp.Id - installedApp.UpdateAuditLog(bean3.SystemUserId) - _, err = impl.InstalledAppRepository.UpdateInstalledApp(installedApp, tx) - if err != nil { - impl.Logger.Errorw("error saving updating installed app with new appId", "installedAppId", installedApp.Id, "err", err) - return err - } - - err = tx.Commit() - if err != nil { - impl.Logger.Errorw("error saving appModel", "err", err) - return err - } - return nil -} From 378c2d9805e38892aa5a999858a3da04235b99e4 Mon Sep 17 00:00:00 2001 From: Prakash Date: Thu, 22 Aug 2024 14:12:41 +0530 Subject: [PATCH 08/61] fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor --- env_gen.md | 1 + pkg/pipeline/CiService.go | 30 ++++++++++++------- pkg/pipeline/bean/CiPipeline/CiBuildConfig.go | 5 ++++ pkg/pipeline/types/CiCdConfig.go | 1 + 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/env_gen.md b/env_gen.md index 6e3c3e4f9f8..2f4d37a3d21 100644 --- a/env_gen.md +++ b/env_gen.md @@ -231,6 +231,7 @@ | SCOPED_VARIABLE_HANDLE_PRIMITIVES | false | | | SCOPED_VARIABLE_NAME_REGEX | ^[a-zA-Z][a-zA-Z0-9_-]{0,62}[a-zA-Z0-9]$ | | | SHOW_DOCKER_BUILD_ARGS | true | | + | SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL | false | | | SKIP_CREATING_ECR_REPO | false | | | SOCKET_DISCONNECT_DELAY_SECONDS | 5 | | | SOCKET_HEARTBEAT_SECONDS | 25 | | diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index 2d8701d4dd3..d16dd55199d 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -158,27 +158,33 @@ func (impl *CiServiceImpl) GetCiMaterials(pipelineId int, ciMaterials []*pipelin } } -func (impl *CiServiceImpl) TriggerCiPipeline(trigger types.Trigger) (int, error) { - impl.Logger.Debug("ci pipeline manual trigger") - ciMaterials, err := impl.GetCiMaterials(trigger.PipelineId, trigger.CiMaterials) - if err != nil { - return 0, err - } - +func (impl *CiServiceImpl) handleRuntimeParamsValidations(trigger types.Trigger, ciMaterials []*pipelineConfig.CiPipelineMaterial) error { // checking if user has given run time parameters for externalCiArtifact, if given then sending git material to Ci-Runner - externalCiArtifact, exists := trigger.ExtraEnvironmentVariables["externalCiArtifact"] + externalCiArtifact, exists := trigger.ExtraEnvironmentVariables[CiPipeline.ExtraEnvVarExternalCiArtifactKey] // validate externalCiArtifact as docker image if exists { if !strings.Contains(externalCiArtifact, ":") { impl.Logger.Errorw("validation error", "externalCiArtifact", externalCiArtifact) - return 0, fmt.Errorf("invalid image name given in externalCiArtifact") + return fmt.Errorf("invalid image name given in externalCiArtifact") } } if trigger.PipelineType == string(CiPipeline.CI_JOB) && len(ciMaterials) != 0 && !exists && externalCiArtifact == "" { - ciMaterials = []*pipelineConfig.CiPipelineMaterial{ciMaterials[0]} ciMaterials[0].GitMaterial = nil ciMaterials[0].GitMaterialId = 0 } + return nil +} + +func (impl *CiServiceImpl) TriggerCiPipeline(trigger types.Trigger) (int, error) { + impl.Logger.Debug("ci pipeline manual trigger") + ciMaterials, err := impl.GetCiMaterials(trigger.PipelineId, trigger.CiMaterials) + if err != nil { + return 0, err + } + err = impl.handleRuntimeParamsValidations(trigger, ciMaterials) + if err != nil { + return 0, err + } ciPipelineScripts, err := impl.ciPipelineRepository.FindCiScriptsByCiPipelineId(trigger.PipelineId) if err != nil && !util.IsErrNoRows(err) { return 0, err @@ -741,6 +747,10 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if pipeline.App.AppType == helper.Job { workflowRequest.AppName = pipeline.App.DisplayName } + if trigger.PipelineType == string(CiPipeline.CI_JOB) { + workflowRequest.IgnoreDockerCachePush = impl.config.SkipCiJobBuildCachePushPull + workflowRequest.IgnoreDockerCachePull = impl.config.SkipCiJobBuildCachePushPull + } if dockerRegistry != nil { workflowRequest.DockerRegistryId = dockerRegistry.Id diff --git a/pkg/pipeline/bean/CiPipeline/CiBuildConfig.go b/pkg/pipeline/bean/CiPipeline/CiBuildConfig.go index ef24b43410f..0050dc19f3f 100644 --- a/pkg/pipeline/bean/CiPipeline/CiBuildConfig.go +++ b/pkg/pipeline/bean/CiPipeline/CiBuildConfig.go @@ -85,3 +85,8 @@ func (pType PipelineType) IsValidPipelineType() bool { return false } } + +const ( + ExtraEnvVarExternalCiArtifactKey = "externalCiArtifact" + ExtraEnvVarImageDigestKey = "imageDigest" +) diff --git a/pkg/pipeline/types/CiCdConfig.go b/pkg/pipeline/types/CiCdConfig.go index bf7e82fe3e7..50e7d272712 100644 --- a/pkg/pipeline/types/CiCdConfig.go +++ b/pkg/pipeline/types/CiCdConfig.go @@ -82,6 +82,7 @@ type CiCdConfig struct { ImageScanRetryDelay int `env:"IMAGE_SCAN_RETRY_DELAY" envDefault:"5"` ShowDockerBuildCmdInLogs bool `env:"SHOW_DOCKER_BUILD_ARGS" envDefault:"true"` IgnoreCmCsInCiJob bool `env:"IGNORE_CM_CS_IN_CI_JOB" envDefault:"false"` + SkipCiJobBuildCachePushPull bool `env:"SKIP_CI_JOB_BUILD_CACHE_PUSH_PULL" envDefault:"false"` // from CdConfig CdLimitCpu string `env:"CD_LIMIT_CI_CPU" envDefault:"0.5"` CdLimitMem string `env:"CD_LIMIT_CI_MEM" envDefault:"3G"` From 827608f03c3d9f4d8d0517b5d4051598d91661f1 Mon Sep 17 00:00:00 2001 From: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Date: Fri, 23 Aug 2024 12:14:05 +0530 Subject: [PATCH 09/61] migration syn with ent (#5718) --- .../277_insert_devtron_resource_searchable_key_table.down.sql | 1 + .../277_insert_devtron_resource_searchable_key_table.up.sql | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 scripts/sql/277_insert_devtron_resource_searchable_key_table.down.sql create mode 100644 scripts/sql/277_insert_devtron_resource_searchable_key_table.up.sql diff --git a/scripts/sql/277_insert_devtron_resource_searchable_key_table.down.sql b/scripts/sql/277_insert_devtron_resource_searchable_key_table.down.sql new file mode 100644 index 00000000000..1909420ca70 --- /dev/null +++ b/scripts/sql/277_insert_devtron_resource_searchable_key_table.down.sql @@ -0,0 +1 @@ +DELETE from devtron_resource_searchable_key ds where ds."name" in ('CHART_NAME'); \ No newline at end of file diff --git a/scripts/sql/277_insert_devtron_resource_searchable_key_table.up.sql b/scripts/sql/277_insert_devtron_resource_searchable_key_table.up.sql new file mode 100644 index 00000000000..4a987ea145d --- /dev/null +++ b/scripts/sql/277_insert_devtron_resource_searchable_key_table.up.sql @@ -0,0 +1,3 @@ + +INSERT INTO devtron_resource_searchable_key(name, is_removed, created_on, created_by, updated_on, updated_by) +VALUES ('CHART_NAME', false, now(), 1, now(), 1); \ No newline at end of file From 5f43eb28bcf7044b591c7ea737bfa9a45e71c1c6 Mon Sep 17 00:00:00 2001 From: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:41:50 +0530 Subject: [PATCH 10/61] doc: Edit Deployment Chart Schema (#5735) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated --- .gitbook.yaml | 5 +- docs/SUMMARY.md | 2 +- docs/reference/glossary.md | 4 +- .../deployment-template.md | 20 ++++-- .../global-configurations/README.md | 2 +- ...{custom-charts.md => deployment-charts.md} | 68 +++++++++++++++++-- 6 files changed, 81 insertions(+), 20 deletions(-) rename docs/user-guide/global-configurations/{custom-charts.md => deployment-charts.md} (68%) diff --git a/.gitbook.yaml b/.gitbook.yaml index 2f04cbca957..7ea252955d7 100644 --- a/.gitbook.yaml +++ b/.gitbook.yaml @@ -16,7 +16,7 @@ redirects: setup/upgrade/devtron-upgrade-0.2.x-0.3.x: getting-started/upgrade/devtron-upgrade-0.2.x-0.3.x setup/global-configurations: user-guide/global-configurations/README.md setup/global-configurations/gitops: user-guide/global-configurations/gitops.md - setup/global-configurations/custom-charts: user-guide/global-configurations/custom-charts.md + setup/global-configurations/custom-charts: user-guide/global-configurations/deployment-charts.md setup/global-configurations/user-access: user-guide/global-configurations/authorization/user-access.md setup/global-configurations/external-links: user-guide/global-configurations/external-links.md setup/global-configurations/projects: user-guide/global-configurations/projects.md @@ -127,4 +127,5 @@ redirects: user-guide/clusters: user-guide/resource-browser.md usage/clusters: user-guide/resource-browser.md global-configurations/authorization/sso-login/okta: user-guide/global-configurations/authorization/sso/okta.md - usage/applications/creating-application/ci-pipeline/ci-build-pre-post-plugins: user-guide/creating-application/workflow/ci-build-pre-post-plugins.md \ No newline at end of file + usage/applications/creating-application/ci-pipeline/ci-build-pre-post-plugins: user-guide/creating-application/workflow/ci-build-pre-post-plugins.md + global-configurations/custom-charts: user-guide/global-configurations/deployment-charts.md \ No newline at end of file diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 74125266132..8dd29c1efb5 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -24,7 +24,7 @@ * [Git Accounts](user-guide/global-configurations/git-accounts.md) * [Container/OCI Registry](user-guide/global-configurations/container-registries.md) * [Chart Repositories](user-guide/global-configurations/chart-repo.md) - * [Custom Charts](user-guide/global-configurations/custom-charts.md) + * [Deployment Charts](user-guide/global-configurations/deployment-charts.md) * [Authorization](user-guide/global-configurations/authorization/README.md) * [SSO Login Services](user-guide/global-configurations/sso-login.md) * [Google](user-guide/global-configurations/authorization/sso/google.md) diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md index 69402187cba..c219ea5bd12 100644 --- a/docs/reference/glossary.md +++ b/docs/reference/glossary.md @@ -64,9 +64,9 @@ Temporarily marking a node as unschedulable, preventing new pods from being assi CronJob is used to create Jobs on a repeating schedule. It is commonly used for running periodic tasks with no manual intervention. In Devtron, you can view a list of cronjobs by going to Resource Browser → (choose a cluster) → Workloads → CronJob. [Read More...](../user-guide/creating-application/deployment-template/job-and-cronjob.md#2.-cronjob) -### Custom Charts +### Deployment Charts -Devtron offers a variety of ready-made Helm charts for common tasks and functions. If you have a specific need that isn't met by these preconfigured charts, super-admins have the permission to upload their own custom charts. Once uploaded, these custom charts become accessible for use by all users on the Devtron platform. [Read More...](../user-guide/global-configurations/custom-charts.md) +Devtron offers a variety of ready-made Helm charts for common tasks and functions. If you have a specific need that isn't met by these preconfigured charts, super-admins have the permission to upload their own charts. Once uploaded, these charts become accessible for use by all users on the Devtron platform. [Read More...](../user-guide/global-configurations/deployment-charts.md) ### DaemonSet diff --git a/docs/user-guide/creating-application/deployment-template.md b/docs/user-guide/creating-application/deployment-template.md index c3e74382362..ab117b9a3ce 100644 --- a/docs/user-guide/creating-application/deployment-template.md +++ b/docs/user-guide/creating-application/deployment-template.md @@ -23,7 +23,7 @@ Users need to have [Admin role](../user-guide/global-configurations/authorizatio {% hint style="warning" %} ### Note -After you select and save a chart type for a given application, you won't be able to change it later. Make sure to choose the correct chart type before saving. You can select a chart from [Devtron Charts](#from-devtron-charts) or [Custom Charts](#from-custom-charts). +After you select and save a chart type for a given application, you won't be able to change it later. Make sure to choose the correct chart type before saving. You can select a chart from [Devtron Charts](#from-devtron-charts) or other [Deployment Charts](#from-deployment-charts). {% endhint %} ### From Devtron Charts @@ -37,10 +37,10 @@ You can select a default deployment chart from the following options: ![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/deployment-template/select-devtron-chart.gif) -### From Custom Charts +### From Deployment Charts {% hint style="warning" %} -This option will be available only if a custom chart exists. If it doesn't, a user with `super admin` permission may upload one in [Global Configurations → Custom Charts](../global-configurations/custom-charts.md). +This option will be available only if a custom chart exists. If it doesn't, a user with `super admin` permission may upload one in [Global Configurations → Deployment Charts](../global-configurations/deployment-charts.md). {% endhint %} You can select an available custom chart as shown below. You can also view the description of the custom charts in the list. @@ -110,20 +110,26 @@ Click **Save Changes**. If you want to do additional configurations, then click {% hint style="warning" %} ### Who Can Perform This Action? -Superadmin can define and apply custom deployment schema using API +Superadmin can define and apply custom deployment schema. {% endhint %} By default, the `Basic (GUI)` section comes with multiple predefined fields as seen earlier [in the table](#2-basic-configuration). However, if you wish to display a different set of fields to your team, you can modify the whole section as per your requirement. -{% embed url="https://www.youtube.com/watch?v=09VP1I-WvUs" caption="JSON-driven Deployment Schema" %} - This is useful in scenarios where: * Your team members find it difficult to understand and edit the [Advanced (YAML)](#3-advanced-yaml) section. * You frequently edit certain fields in Advanced (YAML), which you expect to remain easily accessible in Basic (GUI) section. * You don't require some fields in Basic (GUI) section. * You need the autonomy to keep the Basic (GUI) unique for applications/clusters/environments/charts, or display the same Basic (GUI) everywhere. -This is possible by passing a custom JSON (deployment schema) of your choice through the following API. You may need to run the API with the `POST` method if you are doing it for the first time. +{% hint style="info" %} +There are two ways you can customize the Basic GUI, use any one of the following: +1. From [Deployment Charts](../global-configurations/deployment-charts.md#editing-gui-schema-of-deployment-charts) section +2. Using APIs (explained below) +{% endhint %} + +{% embed url="https://www.youtube.com/watch?v=09VP1I-WvUs" caption="JSON-driven Deployment Schema" %} + +You can pass a custom JSON (deployment schema) of your choice through the following API. You may need to run the API with the `POST` method if you are doing it for the first time. ``` PUT {{DEVTRON_BASEURL}}/orchestrator/deployment/template/schema diff --git a/docs/user-guide/global-configurations/README.md b/docs/user-guide/global-configurations/README.md index 488ed2328b1..6a7aa4cf29a 100644 --- a/docs/user-guide/global-configurations/README.md +++ b/docs/user-guide/global-configurations/README.md @@ -18,7 +18,7 @@ Before you start creating an application, we recommend to provide basic informat [Chart Repositories](chart-repo.md) -[Custom Charts](custom-charts.md) +[Deployment Charts](deployment-charts.md) [Authorization](authorization/README.md) diff --git a/docs/user-guide/global-configurations/custom-charts.md b/docs/user-guide/global-configurations/deployment-charts.md similarity index 68% rename from docs/user-guide/global-configurations/custom-charts.md rename to docs/user-guide/global-configurations/deployment-charts.md index 18fefc87596..256e105cc32 100644 --- a/docs/user-guide/global-configurations/custom-charts.md +++ b/docs/user-guide/global-configurations/deployment-charts.md @@ -1,4 +1,4 @@ -# Custom Charts +# Deployment Charts Devtron includes predefined helm charts that cover the majority of use cases. For any use case not addressed by the default helm charts, you can upload your own helm chart and use it as a custom chart in Devtron. @@ -8,7 +8,7 @@ For any use case not addressed by the default helm charts, you can upload your o > A super admin can upload multiple versions of a custom helm chart. -![Custom charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/custom-charts/custom-charts-lists.png) +![Figure 1: Deployment Charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/deployment-charts/gc-deployment-charts.jpg) ## Prerequisites @@ -99,7 +99,9 @@ helm package my-custom-chart The above command will create a `my-custom-chart-0.1.0.tgz` file. -## Uploading a custom chart +--- + +## Uploading a Deployment Chart > A custom chart can only be uploaded by a super admin. @@ -142,21 +144,73 @@ The following are the validation results: ![Already exists](https://devtron-public-asset.s3.us-east-2.amazonaws.com/custom-charts/List+-+Empty-1.png) -## View the custom charts +--- + +## Viewing Deployment Charts > All users can view the custom charts. -To view a list of available custom charts, go to **Global Configurations > Custom charts** page. +To view the list of available custom charts, go to **Global Configurations → Deployment Charts** page. * The charts can be searched with their name, version, or description. * New [custom charts can be uploaded](#uploading-a-custom-chart) by selecting **Upload chart**. -![Custom charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/custom-charts/custom-charts-lists.png) +![Custom charts](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/deployment-charts/upload-custom-chart.jpg) + +--- -## Use the custom chart in an application +## Using Deployment Chart in Application The custom charts can be used from the [Deployment Template](../creating-application/deployment-template.md) section. > **Info**: > > The deployment strategy for a custom chart is fetched from the custom chart template and cannot be configured in the [CD pipeline](../creating-application/workflow/cd-pipeline.md#deployment-strategies). + +--- + +## Editing GUI Schema of Deployment Charts [![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/elements/EnterpriseTag.svg)](https://devtron.ai/pricing) + +{% hint style="warning" %} +### Who Can Perform This Action? +Only superadmins can edit the GUI schema of deployment charts. +{% endhint %} + +{% hint style="info" %} +### Reference +This section is an extension of [Customize Basic GUI](../creating-application/deployment-template.md#customize-basic-gui) feature within **App Configuration** → **Base Deployment Template**. Refer the document to know more about the significance of having a customizable GUI schema for your deployment templates. +{% endhint %} + +You can edit the GUI schema of both the deployment charts: +1. Charts provided by Devtron (*Deployment*, *Job & CronJob*, *Rollout Deployment*, and *StatefulSet*) +2. Custom charts uploaded by you + +### Tutorial + +{% embed url="https://www.youtube.com/watch?v=93tGIsM1qC8" caption="JSON-driven Deployment Schema" %} + +### Steps + +In this example, we will edit the Deployment chart type provided by Devtron. + +1. Click the edit button next to the chart as shown below. + + ![Edit GUI Schema Button](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/deployment-charts/edit-chart-schema.jpg) + +2. A GUI schema is available for you to edit in case of Devtron charts. In case of custom charts, you may have to define a GUI schema yourself. To know how to create such GUI schema, refer [RJSF JSON Schema Tool](https://rjsf-team.github.io/react-jsonschema-form/). + + ![Editable Schema](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/deployment-charts/gui-schema.jpg) + +3. You may start editing the schema by excluding existing fields/objects or including more of them. Click the **Refer YAML** button to view all the supported fields. + + ![Refer YAML Button](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/deployment-charts/refer-yaml.gif) + +4. While editing the schema, you may use the **Preview GUI** option for a real-time preview of your changes. + + ![Preview GUI Button](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/deployment-charts/preview-gui.gif) + +5. Click **Save Changes**. + + ![Save Changes](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/deployment-charts/save-changes.jpg) + +Next, if you go to **App Configuration** → **Base Deployment Template**, you will be able to see the deployment template fields (in Basic GUI) as per your customized schema. \ No newline at end of file From 16d01d67fe6d55a724efb692d696fd9955045761 Mon Sep 17 00:00:00 2001 From: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Date: Fri, 23 Aug 2024 19:51:30 +0530 Subject: [PATCH 11/61] doc: Redirection of old entry in gitbook.yaml (#5738) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * Redirected Old Entry --- .gitbook.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitbook.yaml b/.gitbook.yaml index 7ea252955d7..777f5fc9a3c 100644 --- a/.gitbook.yaml +++ b/.gitbook.yaml @@ -109,7 +109,7 @@ redirects: getting-started/global-configurations/filter-condition: user-guide/global-configurations/filter-condition.md getting-started/global-configurations/build-infra: user-guide/global-configurations/build-infra.md getting-started/global-configurations/gitops: user-guide/global-configurations/gitops.md - getting-started/global-configurations/custom-charts: user-guide/global-configurations/custom-charts.md + getting-started/global-configurations/custom-charts: user-guide/global-configurations/deployment-charts.md getting-started/global-configurations/external-links: user-guide/global-configurations/external-links.md getting-started/global-configurations/projects: user-guide/global-configurations/projects.md getting-started/global-configurations/manage-notification: user-guide/global-configurations/manage-notification.md From d816deece27ecd118a2319b8cbcffeddeaa04b8a Mon Sep 17 00:00:00 2001 From: Badal Kumar <130441461+badal773@users.noreply.github.com> Date: Fri, 23 Aug 2024 20:44:03 +0530 Subject: [PATCH 12/61] docs: added Documentation for Air-Gapped Installation (#5360) * added docs for air-gapped-installation * added all the images in 7.0.0 * modified yq command in the docs * added an entry in summary.md * added installation commands * modified statements * modified variable name * added steps to navigation * added the latest oss chart images * added a note for docker * Added Intro + Proofreading + Structuring * Other fixes * Lang fix * added docs for ea-mode only * modified lang * Update install-devtron-in-airgapped-environment.md Changed h3 header to fit the ToC on the RHS * added changes * modified changes --------- Co-authored-by: Badal Kumar Prusty Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> --- devtron-images.txt.source | 39 +++ docs/SUMMARY.md | 1 + ...nstall-devtron-in-airgapped-environment.md | 245 ++++++++++++++++++ 3 files changed, 285 insertions(+) create mode 100644 devtron-images.txt.source create mode 100644 docs/setup/install/install-devtron-in-airgapped-environment.md diff --git a/devtron-images.txt.source b/devtron-images.txt.source new file mode 100644 index 00000000000..778292ffed2 --- /dev/null +++ b/devtron-images.txt.source @@ -0,0 +1,39 @@ +quay.io/devtron/image-scanner:137872c2-141-23848 +quay.io/devtron/inception:473deaa4-185-21582 +quay.io/devtron/hyperion:291c4c75-280-23860 +public.ecr.aws/docker/library/redis:7.0.5-alpine +quay.io/argoproj/argocd:v2.5.2 +quay.io/argoproj/workflow-controller:v3.4.3 +quay.io/devtron/authenticator:e414faff-393-13273 +quay.io/devtron/bats:v1.4.1 +quay.io/devtron/busybox:1.31.1 +quay.io/devtron/chart-sync:5a1d0301-150-23845 +quay.io/devtron/curl:7.73.0 +quay.io/devtron/dashboard:5f95d187-690-23841 +quay.io/devtron/devtron-utils:dup-chart-repo-v1.1.0 +quay.io/devtron/devtron:291c4c75-434-23853 +quay.io/devtron/ci-runner:48aca9f4-138-23844 +quay.io/devtron/dex:v2.30.2 +quay.io/devtron/git-sensor:86e13283-200-23847 +quay.io/devtron/grafana:7.3.1 +quay.io/devtron/k8s-sidecar:1.1.0 +quay.io/devtron/k8s-utils:tutum-curl +quay.io/devtron/kubectl:latest +quay.io/devtron/kubelink:0dee6306-564-23843 +quay.io/devtron/kubewatch:850b40d5-419-23840 +quay.io/devtron/lens:56211042-333-23839 +quay.io/devtron/migrator:v4.16.2 +quay.io/devtron/nats-box +quay.io/devtron/nats-server-config-reloader:0.6.2 +quay.io/devtron/nats:2.9.3-alpine +quay.io/devtron/notifier:9639b1ab-372-23850 +quay.io/devtron/postgres:11.9 +quay.io/devtron/postgres_exporter:v0.10.1 +quay.io/devtron/prometheus-nats-exporter:0.9.0 +quay.io/devtron/minio:RELEASE.2021-02-14T04-01-33Z +quay.io/devtron/clair:4.3.6 +quay.io/devtron/postgres:11.9.0-debian-10-r26 +quay.io/devtron/postgres_exporter:v0.4.7 +quay.io/devtron/minio-mc:RELEASE.2021-02-14T04-28-06Z +quay.io/devtron/minideb:latest + diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8dd29c1efb5..2a55ffcad3a 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -7,6 +7,7 @@ * [Install Devtron with CI/CD and GitOps (Argo CD)](setup/install/install-devtron-with-cicd-with-gitops.md) * [Install Devtron without Integrations](setup/install/install-devtron.md) * [Install Devtron on Minikube, Microk8s, K3s, Kind, Cloud VMs](setup/install/Install-devtron-on-Minikube-Microk8s-K3s-Kind.md) + * [Install Devtron on Airgapped Environment](setup/install/install-devtron-in-airgapped-environment.md) * [Demo on Popular Cloud Providers](setup/install/demo-tutorials.md) * [Backup for Disaster Recovery](setup/install/devtron-backup.md) * [Uninstall Devtron](setup/install/uninstall-devtron.md) diff --git a/docs/setup/install/install-devtron-in-airgapped-environment.md b/docs/setup/install/install-devtron-in-airgapped-environment.md new file mode 100644 index 00000000000..8d705fb77a6 --- /dev/null +++ b/docs/setup/install/install-devtron-in-airgapped-environment.md @@ -0,0 +1,245 @@ +# Devtron Installation in an Airgapped Environment + +## Introduction + +In certain scenarios, you may need to deploy Devtron to a Kubernetes cluster that isn’t connected to the internet. Such air-gapped environments are used for various reasons, particularly in industries with strict regulatory requirements like healthcare, banking, and finance. This is because air-gapped environments aren't exposed to the public internet; therefore, they create a controlled and secure space for handling sensitive data and operations. + +### Prerequisites + +1. Install `podman` or `docker` on the VM from where you're executing the installation commands. +2. Clone the Devtron Helm chart: + + ```bash + git clone https://github.com/devtron-labs/devtron.git + cd devtron + ``` + +3. Set the values of `TARGET_REGISTRY`, `TARGET_REGISTRY_USERNAME`, and `TARGET_REGISTRY_TOKEN`. This registry should be accessible from the VM where you are running the cloning script and the K8s cluster where you’re installing Devtron. + +{% hint style="warning" %} +### Note +If you are using Docker, the TARGET_REGISTRY should be in the format `docker.io/` +{% endhint %} + +--- + +## Docker Instructions + +### Platform Selection + +#### For Linux/amd64 + + ```bash + export PLATFORM="linux/amd64" + ``` +#### For Linux/arm64 + + ```bash + export PLATFORM="linux/arm64" + ``` + + + +1. Set the environment variables + + ```bash + # Set the source registry URL + export SOURCE_REGISTRY="quay.io/devtron" + + # Set the target registry URL, username, and token/password + export TARGET_REGISTRY="" + export TARGET_REGISTRY_USERNAME="" + export TARGET_REGISTRY_TOKEN="" + + # Set the source and target image file names with default values if not already set + SOURCE_IMAGES_LIST="${SOURCE_IMAGES_LIST:=devtron-images.txt.source}" + TARGET_IMAGES_LIST="${TARGET_IMAGES_LIST:=devtron-images.txt.target}" + ``` + +2. Log in to the target Docker registry + + ```bash + docker login -u $TARGET_REGISTRY_USERNAME -p $TARGET_REGISTRY_TOKEN $TARGET_REGISTRY + ``` + +3. Clone the images + + ```bash + while IFS= read -r source_image; do + # Check if the source image belongs to the quay.io/devtron registry + if [[ "$source_image" == quay.io/devtron/* ]]; then + # Replace the source registry with the target registry in the image name + target_image="${source_image/quay.io\/devtron/$TARGET_REGISTRY}" + + # Check if the source image belongs to the quay.io/argoproj registry + elif [[ "$source_image" == quay.io/argoproj/* ]]; then + # Replace the source registry with the target registry in the image name + target_image="${source_image/quay.io\/argoproj/$TARGET_REGISTRY}" + + # Check if the source image belongs to the public.ecr.aws/docker/library registry + elif [[ "$source_image" == public.ecr.aws/docker/library/* ]]; then + # Replace the source registry with the target registry in the image name + target_image="${source_image/public.ecr.aws\/docker\/library/$TARGET_REGISTRY}" + fi + + # Pull the image from the source registry + docker pull --platform $PLATFORM $source_image + + # Tag the image with the new target registry name + docker tag $source_image $target_image + + # Push the image to the target registry + docker push $target_image + + # Output the updated image name + echo "Updated image: $target_image" + + # Append the new image name to the target image file + echo "$target_image" >> "$TARGET_IMAGES_LIST" + + done < "$SOURCE_IMAGES_LIST" + ``` +--- + +## Podman Instructions + +### For Multi-arch + +1. Set the environment variables + + ```bash + export SOURCE_REGISTRY="quay.io/devtron" + export SOURCE_REGISTRY_TOKEN=#Enter token provided by Devtron team + export TARGET_REGISTRY=#Enter target registry url + export TARGET_REGISTRY_USERNAME=#Enter target registry username + export TARGET_REGISTRY_TOKEN=#Enter target registry token/password + ``` + +2. Log in to the target Podman registry + + ```bash + podman login -u $TARGET_REGISTRY_USERNAME -p $TARGET_REGISTRY_TOKEN $TARGET_REGISTRY + ``` + +3. Clone the images + + ```bash + SOURCE_REGISTRY="quay.io/devtron" + TARGET_REGISTRY=${TARGET_REGISTRY} + SOURCE_IMAGES_FILE_NAME="${SOURCE_IMAGES_FILE_NAME:=devtron-images.txt.source}" + TARGET_IMAGES_FILE_NAME="${TARGET_IMAGES_FILE_NAME:=devtron-images.txt.target}" + + cp $SOURCE_IMAGES_FILE_NAME $TARGET_IMAGES_FILE_NAME + while read source_image; do + if [[ "$source_image" == *"workflow-controller:"* || "$source_image" == *"argoexec:"* || "$source_image" == *"argocd:"* ]] + then + SOURCE_REGISTRY="quay.io/argoproj" + sed -i "s|${SOURCE_REGISTRY}|${TARGET_REGISTRY}|g" $TARGET_IMAGES_FILE_NAME + elif [[ "$source_image" == *"redis:"* ]] + then + SOURCE_REGISTRY="public.ecr.aws/docker/library" + sed -i "s|${SOURCE_REGISTRY}|${TARGET_REGISTRY}|g" $TARGET_IMAGES_FILE_NAME + else + SOURCE_REGISTRY="quay.io/devtron" + sed -i "s|${SOURCE_REGISTRY}|${TARGET_REGISTRY}|g" $TARGET_IMAGES_FILE_NAME + fi + done <$SOURCE_IMAGES_FILE_NAME + echo "Target Images file finalized" + + while read -r -u 3 source_image && read -r -u 4 target_image ; do + echo "Pushing $source_image $target_image" + podman manifest create $source_image + podman manifest add $source_image $source_image --all + podman manifest push $source_image $target_image --all + done 3<"$SOURCE_IMAGES_FILE_NAME" 4<"$TARGET_IMAGES_FILE_NAME" + ``` + +--- + +## Devtron Installation + +Before starting, ensure you have created an image pull secret for your registry if authentication is required. + +1. Create the namespace (if not already created) + ```bash + kubectl create ns devtroncd + ``` + +2. Create the Docker registry secret + ```bash + kubectl create secret docker-registry devtron-imagepull \ + --namespace devtroncd \ + --docker-server=$TARGET_REGISTRY \ + --docker-username=$TARGET_REGISTRY_USERNAME \ + --docker-password=$TARGET_REGISTRY_TOKEN + ``` + If you are installing Devtron with the CI/CD module or using Argo CD, create the secret in the following namespaces else, you can skip this step-: + ```bash + kubectl create secret docker-registry devtron-imagepull \ + --namespace devtron-cd \ + --docker-server=$TARGET_REGISTRY \ + --docker-username=$TARGET_REGISTRY_USERNAME \ + --docker-password=$TARGET_REGISTRY_TOKEN + kubectl create secret docker-registry devtron-imagepull \ + --namespace devtron-ci \ + --docker-server=$TARGET_REGISTRY \ + --docker-username=$TARGET_REGISTRY_USERNAME \ + --docker-password=$TARGET_REGISTRY_TOKEN + kubectl create secret docker-registry devtron-imagepull \ + --namespace argo \ + --docker-server=$TARGET_REGISTRY \ + --docker-username=$TARGET_REGISTRY_USERNAME \ + --docker-password=$TARGET_REGISTRY_TOKEN + ``` + +3. Navigate to the Devtron Helm chart directory + ```bash + cd charts/devtron + ``` + + +### Install Devtron without any Integration + +Use the below command to install Devtron without any Integrations + +1. Without `imagePullSecrets`: + ```bash + helm install devtron . -n devtroncd --set global.containerRegistry="$TARGET_REGISTRY" + ``` + +2. With `imagePullSecrets`: + ```bash + helm install devtron . -n devtroncd --set global.containerRegistry="$TARGET_REGISTRY" --set global.imagePullSecrets[0].name=devtron-imagepull + ``` + +### Installing Devtron with CI/CD Mode +Use the below command to install Devtron with only the CI/CD module + +1. Without `imagePullSecrets`: + ```bash + helm install devtron . -n devtroncd --set installer.modules={cicd} --set global.containerRegistry="$TARGET_REGISTRY" + ``` + +2. With `imagePullSecrets`: + ```bash + helm install devtron . -n devtroncd --set installer.modules={cicd} --set global.containerRegistry="$TARGET_REGISTRY" --set global.imagePullSecrets[0].name=devtron-imagepull + ``` + +### Install Devtron with CICD Mode including Argocd + +Use the below command to install Devtron with the CI/CD module and Argo CD + +1. Without `imagePullSecrets`: + ```bash + helm install devtron . --create-namespace -n devtroncd --set installer.modules={cicd} --set argo-cd.enabled=true --set global.containerRegistry="$TARGET_REGISTRY" --set argo-cd.global.image.repository="${TARGET_REGISTRY}/argocd" --set argo-cd.redis.image.repository="${TARGET_REGISTRY}/redis" + ``` + +2. With `imagePullSecrets`: + ```bash + helm install devtron . --create-namespace -n devtroncd --set installer.modules={cicd} --set argo-cd.enabled=true --set global.containerRegistry="$TARGET_REGISTRY" --set argo-cd.global.image.repository="${TARGET_REGISTRY}/argocd" --set argo-cd.redis.image.repository="${TARGET_REGISTRY}/redis" --set global.imagePullSecrets[0].name=devtron-imagepull + ``` + +--- + +## Next Steps +After installation, refer [Devtron installation documentation](https://docs.devtron.ai/install/install-devtron-with-cicd-with-gitops#devtron-dashboard) for further steps, including obtaining the dashboard URL and the admin password. From 26784d564bc973c1f18c0e542708bc8cfcbd2857 Mon Sep 17 00:00:00 2001 From: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:18:34 +0530 Subject: [PATCH 13/61] feat: Env description handling (#5744) * env description handling added * license handling --- env_gen.md | 2 +- fetchAllEnv/fetchAllEnv.go | 19 +++++++++++-------- pkg/sql/connection.go | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/env_gen.md b/env_gen.md index 2f4d37a3d21..187544e5ce1 100644 --- a/env_gen.md +++ b/env_gen.md @@ -6,7 +6,7 @@ | ACD_NAMESPACE | devtroncd | | | ACD_PASSWORD | | | | ACD_USERNAME | admin | | - | APP | orchestrator | | + | APP | orchestrator | Application name | | APP_SYNC_IMAGE | quay.io/devtron/chart-sync:1227622d-132-3775 | | | APP_SYNC_JOB_RESOURCES_OBJ | | | | APP_SYNC_SERVICE_ACCOUNT | chart-sync | | diff --git a/fetchAllEnv/fetchAllEnv.go b/fetchAllEnv/fetchAllEnv.go index 86585d43856..6df7dcc3175 100644 --- a/fetchAllEnv/fetchAllEnv.go +++ b/fetchAllEnv/fetchAllEnv.go @@ -36,9 +36,10 @@ type EnvField struct { } const ( - envFieldTypeTag = "env" - envDefaultFieldTypeTag = "envDefault" - MARKDOWN_FILENAME = "env_gen.md" + envFieldTypeTag = "env" + envDefaultFieldTypeTag = "envDefault" + envDescriptionFieldTypeTag = "envDescription" + MARKDOWN_FILENAME = "env_gen.md" ) const MarkdownTemplate = ` @@ -97,14 +98,15 @@ func convertTagToStructTag(tag string) reflect.StructTag { return reflect.StructTag(strings.Split(tag, "`")[1]) } -func getEnvKeyAndValue(tag reflect.StructTag) (string, string) { +func getEnvKeyAndValue(tag reflect.StructTag) (string, string, string) { envKey := tag.Get(envFieldTypeTag) envValue := tag.Get(envDefaultFieldTypeTag) + envDescription := tag.Get(envDescriptionFieldTypeTag) // check if there exist any value provided in env for this field if value, ok := os.LookupEnv(envKey); ok { envValue = value } - return envKey, envValue + return envKey, envValue, envDescription } func processGoFile(filePath string, allFields *[]EnvField, uniqueKeys *map[string]bool) error { @@ -122,13 +124,14 @@ func processGoFile(filePath string, allFields *[]EnvField, uniqueKeys *map[strin for _, field := range structType.Fields.List { if field.Tag != nil { strippedTags := convertTagToStructTag(field.Tag.Value) - envKey, envValue := getEnvKeyAndValue(strippedTags) + envKey, envValue, envDescription := getEnvKeyAndValue(strippedTags) if len(envKey) == 0 || (*uniqueKeys)[envKey] { continue } *allFields = append(*allFields, EnvField{ - Env: envKey, - EnvValue: envValue, + Env: envKey, + EnvValue: envValue, + EnvDescription: envDescription, }) (*uniqueKeys)[envKey] = true } diff --git a/pkg/sql/connection.go b/pkg/sql/connection.go index 70ef3b62be8..88af6a963ef 100644 --- a/pkg/sql/connection.go +++ b/pkg/sql/connection.go @@ -33,7 +33,7 @@ type Config struct { Password string `env:"PG_PASSWORD" envDefault:"" secretData:"-"` Database string `env:"PG_DATABASE" envDefault:"orchestrator"` CasbinDatabase string `env:"CASBIN_DATABASE" envDefault:"casbin"` - ApplicationName string `env:"APP" envDefault:"orchestrator"` + ApplicationName string `env:"APP" envDefault:"orchestrator" envDescription:"Application name"` LogQuery bool `env:"PG_LOG_QUERY" envDefault:"true"` LogAllQuery bool `env:"PG_LOG_ALL_QUERY" envDefault:"false"` ExportPromMetrics bool `env:"PG_EXPORT_PROM_METRICS" envDefault:"false"` From e677fbd28d80a826c99edc0e35c09b744df89314 Mon Sep 17 00:00:00 2001 From: kartik-579 <84493919+kartik-579@users.noreply.github.com> Date: Wed, 28 Aug 2024 12:28:01 +0530 Subject: [PATCH 14/61] misc: Main sync rc - branch update (#5753) * added config sql script (#5681) * feat: CVE severity categorisation and scan result listing API enhancements (#5617) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * fix: update script numbers * fix: minor fix * feat: casbin deny policy sql scripts (#5677) * system controller scripts * script additions * sql cript update * sql script number chnage * feat: Config diff phase 2 oss (#5488) * story(configDiffView) : open api spec * story(configDiffView) : open api spec updated * story(configDiffView) : open api spec updated for error state * story(configDiffView) : WIP * story(configDiffView) : WIP "some code changed" * story(configDiffView) : support for names added * story(configDiffView) : iota removed * story(configDiffView) : pg no rows handled * story(configDiffView) : spelling check * story(configDiffView) : code review comment resolved * story(configDiffView) : env id added * story(configDiffView) : intersection added * story(configDiffView) : comments removed * story(configDiffView) : code review comment resolved * story(configDiffView) : comment removed * story(configDiffView) : CMCSNames DTO moved * story(configDiffView) : null case handled * story(configDiffView) : logger added * story(configDiffView) : code refactored * story(configDiffView) : code refactored v2 * story(configDiffView) : spec updated * story(configDiffView) : code refactored * story(configDiffView) : config names * main sync * overridden and global flag introduced in config diff autocomplete api * ent sync * get config data in resthandler * new api for showing all config data in config/data :- Service func -> GetAllConfigData * using a single key instead of global and overridden key in config/autocomplete api * ConfigState made string instead of int * not sending inheriting in case base config * code review comment incorporation * ent sync * code review comment incorp -1 * code review comment incorp -2 * code review comment incorp -3 * small fix in plugin * migration number changes (#5692) * main sync * minor fix * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * scipt number change --------- Co-authored-by: adi6859 Co-authored-by: Vikram Singh * fix: Helm apps entries in Ea mode (#5652) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * executed make after merging with develop branch * feat: refactoring deployment app name usage (#5702) * removing hard coded deployment app name * removing %s-%s usage * wip: query change for enterprise * wip * wip * wip * adding release mode in deployment config * wip: release changes * left join on pco and artifact * handling empty release mode - backward compatibility * fixing panic * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * migration updated * main merge and migration script updated * wip * review changes * fix sql no --------- Co-authored-by: Prakash * migration syn with ent (#5719) * fix: group image vulnerabilities by base/os image (#5680) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * feat: storing target,class and type values in imageScanExecutionResults * feat: add sql script * feat: add sql script * fix: add new columns * fix: update script numbers * fix: correct down script * fix: minor fix * chore: script number update * fix: remove sql script (#5727) * Revert "fix: Helm apps entries in Ea mode (#5652)" (#5733) This reverts commit f1aa1fca0624af32de5e620ceba4548488a07127. * chore: custom argo-workflow dependency (#5731) * bumped github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * reverted main branch changes * reverted main branch changes --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: fix go.sum file (#5734) * misc: Main sync develop (#5737) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * fix: Validate config cm cs (#5750) * validateConfigRequest before CMGlobalAddUpdate and CSGlobalAddUpdate * checkIfConfigDataAlreadyExist --------- Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: adi6859 Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> --- Wire.go | 8 + api/bean/AppView.go | 3 + api/helm-app/gRPC/applist.pb.go | 194 +- api/helm-app/gRPC/applist.proto | 1 + api/helm-app/gRPC/applist_grpc.pb.go | 158 +- api/helm-app/service/HelmAppService.go | 2 +- .../DeploymentConfigurationRestHandler.go | 135 + api/restHandler/ImageScanRestHandler.go | 23 +- api/restHandler/PolicyRestHandler.go | 20 +- .../DeploymentPipelineRestHandler.go | 5 + api/router/DeploymentConfigRouter.go | 31 + api/router/router.go | 8 +- cmd/external-app/wire_gen.go | 2 +- go.mod | 41 +- go.sum | 89 +- .../sql/repository/AppListingRepository.go | 25 +- internal/sql/repository/app/AppRepository.go | 15 + .../chartConfig/ConfigMapRepository.go | 56 + .../repository/deploymentConfig/repository.go | 1 + .../repository/security/CvePolicyControle.go | 134 +- .../repository/security/CveStoreRepository.go | 39 +- .../security/ImageScanDeployInfoRepository.go | 103 +- .../security/ImageScanResultRepository.go | 3 + internal/sql/repository/security/bean/bean.go | 71 + internal/util/ChartTemplateService.go | 4 + pkg/bean/app.go | 1 + pkg/bulkAction/BulkUpdateService.go | 2 +- .../repository/EnvironmentRepository.go | 13 + .../DeploymentConfigurationService.go | 281 + pkg/configDiff/adaptor/adaptor.go | 29 + pkg/configDiff/bean/bean.go | 152 + pkg/configDiff/helper/helper.go | 20 + pkg/configDiff/utils/utils.go | 16 + pkg/deployment/common/adapter.go | 2 + pkg/deployment/common/bean/bean.go | 1 + .../common/deploymentConfigService.go | 19 + .../manifest/ManifestCreationService.go | 2 +- .../trigger/devtronApps/TriggerService.go | 14 - .../DeploymentTemplateService.go | 13 +- pkg/generateManifest/helper.go | 11 +- .../AppDeploymentTypeChangeManager.go | 22 +- pkg/pipeline/ConfigMapService.go | 211 +- .../DeploymentPipelineConfigService.go | 10 +- pkg/pipeline/bean/ConfigMapBean.go | 18 + pkg/plugin/bean/bean.go | 1 + pkg/security/ImageScanService.go | 219 +- pkg/security/bean/bean.go | 122 + pkg/security/policyService.go | 114 +- scripts/sql/278_scan_policies.down.sql | 3 + scripts/sql/278_scan_policies.up.sql | 6 + scripts/sql/279_rbac_role_audit.down.sql | 5 + scripts/sql/279_rbac_role_audit.up.sql | 17 + .../sql/280_link_external_release.down.sql | 3 + scripts/sql/280_link_external_release.up.sql | 6 + .../281_update_scan_tool_metadata.down.sql | 21 + .../sql/281_update_scan_tool_metadata.up.sql | 29 + specs/configDiffView.yaml | 73 + util/rbac/EnforcerUtil.go | 18 + .../Masterminds/goutils/.travis.yml | 18 - .../Masterminds/goutils/CHANGELOG.md | 8 - .../Masterminds/goutils/LICENSE.txt | 202 - .../github.com/Masterminds/goutils/README.md | 70 - .../Masterminds/goutils/appveyor.yml | 21 - .../goutils/cryptorandomstringutils.go | 230 - .../Masterminds/goutils/randomstringutils.go | 248 - .../Masterminds/goutils/stringutils.go | 240 - .../Masterminds/goutils/wordutils.go | 357 - .../Masterminds/sprig/v3/.gitignore | 2 - .../Masterminds/sprig/v3/CHANGELOG.md | 383 - .../Masterminds/sprig/v3/LICENSE.txt | 19 - .../github.com/Masterminds/sprig/v3/Makefile | 9 - .../github.com/Masterminds/sprig/v3/README.md | 100 - .../github.com/Masterminds/sprig/v3/crypto.go | 653 -- .../github.com/Masterminds/sprig/v3/date.go | 152 - .../Masterminds/sprig/v3/defaults.go | 163 - .../github.com/Masterminds/sprig/v3/dict.go | 174 - vendor/github.com/Masterminds/sprig/v3/doc.go | 19 - .../Masterminds/sprig/v3/functions.go | 382 - .../github.com/Masterminds/sprig/v3/list.go | 464 - .../Masterminds/sprig/v3/network.go | 12 - .../Masterminds/sprig/v3/numeric.go | 186 - .../Masterminds/sprig/v3/reflect.go | 28 - .../github.com/Masterminds/sprig/v3/regex.go | 83 - .../github.com/Masterminds/sprig/v3/semver.go | 23 - .../Masterminds/sprig/v3/strings.go | 236 - vendor/github.com/Masterminds/sprig/v3/url.go | 66 - vendor/github.com/antonmedv/expr/.gitignore | 8 - vendor/github.com/antonmedv/expr/LICENSE | 21 - vendor/github.com/antonmedv/expr/README.md | 160 - vendor/github.com/antonmedv/expr/ast/node.go | 169 - vendor/github.com/antonmedv/expr/ast/print.go | 59 - .../github.com/antonmedv/expr/ast/visitor.go | 68 - .../antonmedv/expr/builtin/builtin.go | 101 - .../antonmedv/expr/checker/checker.go | 856 -- .../antonmedv/expr/checker/types.go | 262 - .../antonmedv/expr/compiler/compiler.go | 739 -- .../github.com/antonmedv/expr/conf/config.go | 96 - .../antonmedv/expr/conf/functions.go | 1 - .../antonmedv/expr/conf/operators.go | 59 - .../antonmedv/expr/conf/types_table.go | 123 - vendor/github.com/antonmedv/expr/expr.go | 205 - .../github.com/antonmedv/expr/file/error.go | 69 - .../antonmedv/expr/file/location.go | 10 - .../github.com/antonmedv/expr/file/source.go | 76 - .../antonmedv/expr/optimizer/const_expr.go | 85 - .../antonmedv/expr/optimizer/const_range.go | 40 - .../antonmedv/expr/optimizer/fold.go | 343 - .../antonmedv/expr/optimizer/in_array.go | 64 - .../antonmedv/expr/optimizer/in_range.go | 34 - .../antonmedv/expr/optimizer/optimizer.go | 37 - .../antonmedv/expr/parser/lexer/lexer.go | 221 - .../antonmedv/expr/parser/lexer/state.go | 198 - .../antonmedv/expr/parser/lexer/token.go | 47 - .../antonmedv/expr/parser/lexer/utils.go | 194 - .../antonmedv/expr/parser/parser.go | 610 -- .../github.com/antonmedv/expr/vm/generated.go | 262 - .../github.com/antonmedv/expr/vm/opcodes.go | 71 - .../github.com/antonmedv/expr/vm/program.go | 278 - .../antonmedv/expr/vm/runtime/generated.go | 3288 ------- .../antonmedv/expr/vm/runtime/runtime.go | 517 -- vendor/github.com/antonmedv/expr/vm/vm.go | 523 -- .../argoproj/argo-workflows/v3/.clang-format | 2 - .../argoproj/argo-workflows/v3/.codecov.yml | 17 - .../argoproj/argo-workflows/v3/.dockerignore | 22 - .../argoproj/argo-workflows/v3/.gitattributes | 1 - .../argoproj/argo-workflows/v3/.gitignore | 46 - .../argoproj/argo-workflows/v3/.golangci.yml | 67 - .../argo-workflows/v3/.markdownlint.yaml | 5 - .../argo-workflows/v3/.mlc_config.json | 11 - .../argoproj/argo-workflows/v3/.spelling | 219 - .../argoproj/argo-workflows/v3/CHANGELOG.md | 7175 --------------- .../argoproj/argo-workflows/v3/CODEOWNERS | 2 - .../argoproj/argo-workflows/v3/Dockerfile | 130 - .../argo-workflows/v3/Dockerfile.windows | 64 - .../argoproj/argo-workflows/v3/Makefile | 680 -- .../argoproj/argo-workflows/v3/OWNERS | 16 - .../argoproj/argo-workflows/v3/Procfile | 4 - .../argoproj/argo-workflows/v3/README.md | 165 - .../argoproj/argo-workflows/v3/SECURITY.md | 30 - .../argoproj/argo-workflows/v3/USERS.md | 199 - .../argo-workflows/v3/config/config.go | 292 - .../argo-workflows/v3/config/controller.go | 62 - .../argo-workflows/v3/config/image.go | 6 - .../argo-workflows/v3/config/node_events.go | 10 - .../argoproj/argo-workflows/v3/config/rbac.go | 9 - .../v3/config/retention_policy.go | 7 - .../argoproj/argo-workflows/v3/config/sso.go | 31 - .../argoproj/argo-workflows/v3/config/ttl.go | 59 - .../argoproj/argo-workflows/v3/mkdocs.yml | 246 - .../v3/persist/sqldb/ansi_sql_change.go | 11 - .../persist/sqldb/archived_workflow_labels.go | 99 - .../v3/persist/sqldb/backfill_nodes.go | 77 - .../v3/persist/sqldb/db_type.go | 30 - .../explosive_offload_node_status_repo.go | 38 - .../v3/persist/sqldb/migrate.go | 293 - .../v3/persist/sqldb/null_workflow_archive.go | 50 - .../persist/sqldb/offload_node_status_repo.go | 229 - .../argo-workflows/v3/persist/sqldb/sqldb.go | 119 - .../v3/persist/sqldb/workflow_archive.go | 314 - .../v1alpha1/artifact_repository_types.go | 22 +- .../v1alpha1/container_set_template_types.go | 8 +- .../pkg/apis/workflow/v1alpha1/event_types.go | 2 +- .../apis/workflow/v1alpha1/generated.pb.go | 3902 +++++--- .../apis/workflow/v1alpha1/generated.proto | 116 +- .../v3/pkg/apis/workflow/v1alpha1/info.go | 14 +- .../v3/pkg/apis/workflow/v1alpha1/marshall.go | 4 +- .../apis/workflow/v1alpha1/object_types.go | 2 +- .../workflow/v1alpha1/openapi_generated.go | 8151 ----------------- .../apis/workflow/v1alpha1/workflow_types.go | 314 +- .../v1alpha1/zz_generated.deepcopy.go | 77 +- .../argo-workflows/v3/util/cmd/cmd.go | 109 - .../argo-workflows/v3/util/cmd/glog.go | 17 - .../argo-workflows/v3/util/errors/errors.go | 37 +- .../argo-workflows/v3/util/expand/expand.go | 34 - .../argo-workflows/v3/util/expr/env/env.go | 35 - .../argo-workflows/v3/util/file/fileutil.go | 121 - .../v3/util/instanceid/service.go | 60 - .../v3/util/intstr/parametrizable.go | 55 - .../argo-workflows/v3/util/intstr/parse.go | 9 - .../argo-workflows/v3/util/json/json.go | 36 - .../argo-workflows/v3/util/json/jsonify.go | 12 - .../argo-workflows/v3/util/k8s/parse.go | 50 - .../argo-workflows/v3/util/labels/labeler.go | 29 - .../v3/util/sorting/topological_sorting.go | 59 - .../v3/util/template/expression_template.go | 81 - .../argo-workflows/v3/util/template/kind.go | 29 - .../v3/util/template/replace.go | 29 - .../v3/util/template/resolve_var.go | 31 - .../v3/util/template/simple_template.go | 36 - .../v3/util/template/template.go | 46 - .../v3/util/template/validate.go | 25 - .../argo-workflows/v3/util/tls/tls.go | 151 - .../v3/util/unstructured/unstructured.go | 48 - .../argoproj/argo-workflows/v3/util/util.go | 135 - .../argoproj/argo-workflows/v3/version.go | 59 - .../v3/workflow/artifacts/common/common.go | 35 - .../artifacts/common/load_to_stream.go | 41 - .../v3/workflow/artifacts/hdfs/hdfs.go | 248 - .../v3/workflow/artifacts/hdfs/util.go | 53 - .../workflow/artifacts/resource/resource.go | 8 - .../v3/workflow/common/ancestry.go | 182 - .../v3/workflow/common/common.go | 16 +- .../v3/workflow/common/configmap.go | 36 - .../v3/workflow/common/convert.go | 86 - .../v3/workflow/common/params.go | 24 - .../v3/workflow/common/parse.go | 175 - .../v3/workflow/common/placeholder.go | 27 - .../argo-workflows/v3/workflow/common/util.go | 346 - .../v3/workflow/hydrator/hydrator.go | 127 - .../metrics/k8s_request_total_metric.go | 45 - .../v3/workflow/metrics/metrics.go | 287 - .../v3/workflow/metrics/pod_missing_metric.go | 12 - .../v3/workflow/metrics/server.go | 134 - .../v3/workflow/metrics/util.go | 270 - .../v3/workflow/metrics/work_queue.go | 50 - .../metrics/workflow_condition_metric.go | 15 - .../v3/workflow/packer/packer.go | 98 - .../v3/workflow/templateresolution/context.go | 275 - .../argo-workflows/v3/workflow/util/merge.go | 104 - .../v3/workflow/util/pod_name.go | 92 - .../argo-workflows/v3/workflow/util/util.go | 1213 +-- .../v3/workflow/validate/validate.go | 1461 --- .../github.com/argoproj/pkg/expr/function.go | 108 - vendor/github.com/argoproj/pkg/file/file.go | 27 - vendor/github.com/argoproj/pkg/json/json.go | 36 - vendor/github.com/colinmarc/hdfs/.gitignore | 4 - vendor/github.com/colinmarc/hdfs/.travis.yml | 33 - .../colinmarc/hdfs/CODE_OF_CONDUCT.md | 46 - vendor/github.com/colinmarc/hdfs/LICENSE.txt | 22 - vendor/github.com/colinmarc/hdfs/Makefile | 38 - vendor/github.com/colinmarc/hdfs/README.md | 131 - vendor/github.com/colinmarc/hdfs/client.go | 293 - vendor/github.com/colinmarc/hdfs/conf.go | 91 - .../colinmarc/hdfs/content_summary.go | 84 - .../github.com/colinmarc/hdfs/exceptions.go | 19 - .../github.com/colinmarc/hdfs/file_reader.go | 417 - .../github.com/colinmarc/hdfs/file_writer.go | 309 - vendor/github.com/colinmarc/hdfs/fixtures.sh | 8 - vendor/github.com/colinmarc/hdfs/hdfs.go | 17 - .../github.com/colinmarc/hdfs/minicluster.sh | 54 - vendor/github.com/colinmarc/hdfs/mkdir.go | 55 - vendor/github.com/colinmarc/hdfs/perms.go | 76 - .../GenericRefreshProtocol.pb.go | 127 - .../GenericRefreshProtocol.proto | 61 - .../GetUserMappingsProtocol.pb.go | 76 - .../GetUserMappingsProtocol.proto | 55 - .../hadoop_common/HAServiceProtocol.pb.go | 295 - .../hadoop_common/HAServiceProtocol.proto | 134 - .../hadoop_common/IpcConnectionContext.pb.go | 177 - .../hadoop_common/IpcConnectionContext.proto | 50 - .../hadoop_common/ProtobufRpcEngine.pb.go | 92 - .../hadoop_common/ProtobufRpcEngine.proto | 67 - .../protocol/hadoop_common/ProtocolInfo.pb.go | 194 - .../protocol/hadoop_common/ProtocolInfo.proto | 89 - .../RefreshAuthorizationPolicyProtocol.pb.go | 58 - .../RefreshAuthorizationPolicyProtocol.proto | 52 - .../RefreshCallQueueProtocol.pb.go | 57 - .../RefreshCallQueueProtocol.proto | 52 - .../RefreshUserMappingsProtocol.pb.go | 106 - .../RefreshUserMappingsProtocol.proto | 70 - .../protocol/hadoop_common/RpcHeader.pb.go | 639 -- .../protocol/hadoop_common/RpcHeader.proto | 182 - .../protocol/hadoop_common/Security.pb.go | 193 - .../protocol/hadoop_common/Security.proto | 63 - .../protocol/hadoop_common/TraceAdmin.pb.go | 199 - .../protocol/hadoop_common/TraceAdmin.proto | 73 - .../protocol/hadoop_common/ZKFCProtocol.pb.go | 85 - .../protocol/hadoop_common/ZKFCProtocol.proto | 59 - .../hadoop_hdfs/ClientDatanodeProtocol.pb.go | 407 - .../hadoop_hdfs/ClientDatanodeProtocol.proto | 210 - .../hadoop_hdfs/ClientNamenodeProtocol.pb.go | 3910 -------- .../hadoop_hdfs/ClientNamenodeProtocol.proto | 902 -- .../hadoop_hdfs/ReconfigurationProtocol.pb.go | 513 -- .../hadoop_hdfs/ReconfigurationProtocol.proto | 74 - .../hdfs/protocol/hadoop_hdfs/acl.pb.go | 487 - .../hdfs/protocol/hadoop_hdfs/acl.proto | 108 - .../protocol/hadoop_hdfs/datatransfer.pb.go | 1419 --- .../protocol/hadoop_hdfs/datatransfer.proto | 315 - .../protocol/hadoop_hdfs/encryption.pb.go | 217 - .../protocol/hadoop_hdfs/encryption.proto | 67 - .../protocol/hadoop_hdfs/erasurecoding.pb.go | 228 - .../protocol/hadoop_hdfs/erasurecoding.proto | 60 - .../hdfs/protocol/hadoop_hdfs/hdfs.pb.go | 2289 ----- .../hdfs/protocol/hadoop_hdfs/hdfs.proto | 500 - .../hdfs/protocol/hadoop_hdfs/inotify.pb.go | 663 -- .../hdfs/protocol/hadoop_hdfs/inotify.proto | 133 - .../hdfs/protocol/hadoop_hdfs/xattr.pb.go | 323 - .../hdfs/protocol/hadoop_hdfs/xattr.proto | 75 - vendor/github.com/colinmarc/hdfs/readdir.go | 17 - vendor/github.com/colinmarc/hdfs/remove.go | 41 - vendor/github.com/colinmarc/hdfs/rename.go | 35 - .../colinmarc/hdfs/rpc/block_read_stream.go | 183 - .../colinmarc/hdfs/rpc/block_reader.go | 233 - .../colinmarc/hdfs/rpc/block_write_stream.go | 317 - .../colinmarc/hdfs/rpc/block_writer.go | 241 - .../colinmarc/hdfs/rpc/checksum_reader.go | 151 - .../colinmarc/hdfs/rpc/datanode_failover.go | 71 - .../github.com/colinmarc/hdfs/rpc/kerberos.go | 150 - .../github.com/colinmarc/hdfs/rpc/namenode.go | 410 - vendor/github.com/colinmarc/hdfs/rpc/rpc.go | 179 - vendor/github.com/colinmarc/hdfs/stat.go | 111 - vendor/github.com/colinmarc/hdfs/stat_fs.go | 45 - .../colinmarc/hdfs/travis-setup-cdh5.sh | 158 - .../colinmarc/hdfs/travis-setup-hdp2.sh | 51 - .../github.com/colinmarc/hdfs/travis-setup.sh | 19 - vendor/github.com/colinmarc/hdfs/walk.go | 52 - vendor/github.com/doublerebel/bellows/LICENSE | 21 - .../github.com/doublerebel/bellows/README.md | 45 - vendor/github.com/doublerebel/bellows/main.go | 94 - vendor/github.com/go-logr/logr/README.md | 73 +- vendor/github.com/go-logr/logr/context.go | 33 + .../github.com/go-logr/logr/context_noslog.go | 49 + .../github.com/go-logr/logr/context_slog.go | 83 + vendor/github.com/go-logr/logr/funcr/funcr.go | 185 +- .../github.com/go-logr/logr/funcr/slogsink.go | 105 + vendor/github.com/go-logr/logr/logr.go | 43 - .../go-logr/logr/{slogr => }/sloghandler.go | 98 +- vendor/github.com/go-logr/logr/slogr.go | 100 + vendor/github.com/go-logr/logr/slogr/slogr.go | 77 +- .../go-logr/logr/{slogr => }/slogsink.go | 24 +- .../go-openapi/jsonpointer/.golangci.yml | 61 + .../go-openapi/jsonpointer/README.md | 8 +- .../go-openapi/jsonpointer/pointer.go | 189 +- .../go-openapi/jsonreference/.golangci.yml | 57 +- .../go-openapi/jsonreference/README.md | 14 +- vendor/github.com/go-openapi/swag/.gitignore | 1 + .../github.com/go-openapi/swag/.golangci.yml | 54 +- vendor/github.com/go-openapi/swag/README.md | 8 +- .../{post_go19.go => initialism_index.go} | 3 - vendor/github.com/go-openapi/swag/loading.go | 105 +- .../github.com/go-openapi/swag/post_go18.go | 24 - vendor/github.com/go-openapi/swag/pre_go18.go | 24 - vendor/github.com/go-openapi/swag/pre_go19.go | 70 - vendor/github.com/go-openapi/swag/util.go | 18 +- vendor/github.com/go-openapi/swag/yaml.go | 36 +- .../github.com/go-sql-driver/mysql/.gitignore | 9 - vendor/github.com/go-sql-driver/mysql/AUTHORS | 117 - .../go-sql-driver/mysql/CHANGELOG.md | 232 - vendor/github.com/go-sql-driver/mysql/LICENSE | 373 - .../github.com/go-sql-driver/mysql/README.md | 520 -- vendor/github.com/go-sql-driver/mysql/auth.go | 425 - .../github.com/go-sql-driver/mysql/buffer.go | 182 - .../go-sql-driver/mysql/collations.go | 265 - .../go-sql-driver/mysql/conncheck.go | 54 - .../go-sql-driver/mysql/conncheck_dummy.go | 17 - .../go-sql-driver/mysql/connection.go | 650 -- .../go-sql-driver/mysql/connector.go | 146 - .../github.com/go-sql-driver/mysql/const.go | 174 - .../github.com/go-sql-driver/mysql/driver.go | 107 - vendor/github.com/go-sql-driver/mysql/dsn.go | 560 -- .../github.com/go-sql-driver/mysql/errors.go | 65 - .../github.com/go-sql-driver/mysql/fields.go | 194 - vendor/github.com/go-sql-driver/mysql/fuzz.go | 24 - .../github.com/go-sql-driver/mysql/infile.go | 182 - .../go-sql-driver/mysql/nulltime.go | 50 - .../go-sql-driver/mysql/nulltime_go113.go | 40 - .../go-sql-driver/mysql/nulltime_legacy.go | 39 - .../github.com/go-sql-driver/mysql/packets.go | 1349 --- .../github.com/go-sql-driver/mysql/result.go | 22 - vendor/github.com/go-sql-driver/mysql/rows.go | 223 - .../go-sql-driver/mysql/statement.go | 220 - .../go-sql-driver/mysql/transaction.go | 31 - .../github.com/go-sql-driver/mysql/utils.go | 868 -- vendor/github.com/google/uuid/CHANGELOG.md | 31 + vendor/github.com/google/uuid/CONTRIBUTING.md | 2 +- vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 79 +- vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 104 + .../github.com/hashicorp/go-uuid/.travis.yml | 12 - vendor/github.com/hashicorp/go-uuid/LICENSE | 363 - vendor/github.com/hashicorp/go-uuid/README.md | 8 - vendor/github.com/hashicorp/go-uuid/uuid.go | 83 - vendor/github.com/huandu/xstrings/.gitignore | 24 - .../huandu/xstrings/CONTRIBUTING.md | 23 - vendor/github.com/huandu/xstrings/LICENSE | 22 - vendor/github.com/huandu/xstrings/README.md | 117 - vendor/github.com/huandu/xstrings/common.go | 21 - vendor/github.com/huandu/xstrings/convert.go | 593 -- vendor/github.com/huandu/xstrings/count.go | 120 - vendor/github.com/huandu/xstrings/doc.go | 8 - vendor/github.com/huandu/xstrings/format.go | 173 - .../github.com/huandu/xstrings/manipulate.go | 220 - .../huandu/xstrings/stringbuilder.go | 8 - .../huandu/xstrings/stringbuilder_go110.go | 10 - .../github.com/huandu/xstrings/translate.go | 552 -- vendor/github.com/jcmturner/gofork/LICENSE | 27 - .../jcmturner/gofork/encoding/asn1/README.md | 5 - .../jcmturner/gofork/encoding/asn1/asn1.go | 1003 -- .../jcmturner/gofork/encoding/asn1/common.go | 173 - .../jcmturner/gofork/encoding/asn1/marshal.go | 659 -- .../gofork/x/crypto/pbkdf2/pbkdf2.go | 98 - vendor/github.com/klauspost/pgzip/.gitignore | 24 - vendor/github.com/klauspost/pgzip/.travis.yml | 24 - vendor/github.com/klauspost/pgzip/GO_LICENSE | 27 - vendor/github.com/klauspost/pgzip/LICENSE | 21 - vendor/github.com/klauspost/pgzip/README.md | 135 - vendor/github.com/klauspost/pgzip/gunzip.go | 584 -- vendor/github.com/klauspost/pgzip/gzip.go | 519 -- .../github.com/oliveagle/jsonpath/.gitignore | 26 - .../github.com/oliveagle/jsonpath/.travis.yml | 8 - .../github.com/oliveagle/jsonpath/jsonpath.go | 722 -- .../github.com/oliveagle/jsonpath/readme.md | 114 - .../github.com/shopspring/decimal/.gitignore | 9 - .../github.com/shopspring/decimal/.travis.yml | 19 - .../shopspring/decimal/CHANGELOG.md | 49 - vendor/github.com/shopspring/decimal/LICENSE | 45 - .../github.com/shopspring/decimal/README.md | 130 - .../shopspring/decimal/decimal-go.go | 415 - .../github.com/shopspring/decimal/decimal.go | 1904 ---- .../github.com/shopspring/decimal/rounding.go | 160 - vendor/github.com/spf13/cast/.gitignore | 25 - vendor/github.com/spf13/cast/LICENSE | 21 - vendor/github.com/spf13/cast/Makefile | 40 - vendor/github.com/spf13/cast/README.md | 75 - vendor/github.com/spf13/cast/cast.go | 176 - vendor/github.com/spf13/cast/caste.go | 1476 --- .../spf13/cast/timeformattype_string.go | 27 - .../valyala/bytebufferpool/.travis.yml | 15 - .../github.com/valyala/bytebufferpool/LICENSE | 22 - .../valyala/bytebufferpool/README.md | 21 - .../valyala/bytebufferpool/bytebuffer.go | 111 - .../github.com/valyala/bytebufferpool/doc.go | 7 - .../github.com/valyala/bytebufferpool/pool.go | 151 - .../github.com/valyala/fasttemplate/LICENSE | 22 - .../github.com/valyala/fasttemplate/README.md | 85 - .../valyala/fasttemplate/template.go | 436 - .../github.com/valyala/fasttemplate/unsafe.go | 21 - .../valyala/fasttemplate/unsafe_gae.go | 11 - vendor/golang.org/x/crypto/md4/md4.go | 122 - vendor/golang.org/x/crypto/md4/md4block.go | 91 - vendor/golang.org/x/time/rate/rate.go | 2 + .../google.golang.org/appengine/.travis.yml | 18 - .../appengine/CONTRIBUTING.md | 6 +- vendor/google.golang.org/appengine/README.md | 6 +- .../google.golang.org/appengine/appengine.go | 23 +- .../appengine/appengine_vm.go | 12 +- .../google.golang.org/appengine/identity.go | 3 +- .../appengine/internal/api.go | 347 +- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../appengine/internal/transaction.go | 10 +- .../google.golang.org/appengine/namespace.go | 3 +- .../appengine/socket/socket_classic.go | 3 +- .../appengine/socket/socket_vm.go | 4 +- vendor/google.golang.org/appengine/timeout.go | 2 +- .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 9 +- .../gopkg.in/jcmturner/aescts.v1/.gitignore | 14 - vendor/gopkg.in/jcmturner/aescts.v1/README.md | 16 - vendor/gopkg.in/jcmturner/aescts.v1/aescts.go | 186 - .../gopkg.in/jcmturner/dnsutils.v1/.gitignore | 14 - .../jcmturner/dnsutils.v1/.travis.yml | 24 - vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE | 201 - vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go | 95 - vendor/gopkg.in/jcmturner/gokrb5.v5/LICENSE | 201 - .../jcmturner/gokrb5.v5/asn1tools/tools.go | 86 - .../jcmturner/gokrb5.v5/client/ASExchange.go | 159 - .../jcmturner/gokrb5.v5/client/TGSExchange.go | 105 - .../jcmturner/gokrb5.v5/client/cache.go | 111 - .../jcmturner/gokrb5.v5/client/client.go | 223 - .../jcmturner/gokrb5.v5/client/http.go | 46 - .../jcmturner/gokrb5.v5/client/network.go | 213 - .../jcmturner/gokrb5.v5/client/passwd.go | 94 - .../jcmturner/gokrb5.v5/client/session.go | 191 - .../jcmturner/gokrb5.v5/config/hosts.go | 137 - .../jcmturner/gokrb5.v5/config/krb5conf.go | 665 -- .../jcmturner/gokrb5.v5/credentials/ccache.go | 351 - .../gokrb5.v5/credentials/credentials.go | 257 - .../crypto/aes128-cts-hmac-sha1-96.go | 173 - .../crypto/aes128-cts-hmac-sha256-128.go | 135 - .../crypto/aes256-cts-hmac-sha1-96.go | 173 - .../crypto/aes256-cts-hmac-sha384-192.go | 135 - .../gokrb5.v5/crypto/common/common.go | 143 - .../jcmturner/gokrb5.v5/crypto/crypto.go | 175 - .../gokrb5.v5/crypto/des3-cbc-sha1-kd.go | 174 - .../jcmturner/gokrb5.v5/crypto/etype/etype.go | 29 - .../jcmturner/gokrb5.v5/crypto/rc4-hmac.go | 137 - .../gokrb5.v5/crypto/rfc3961/encryption.go | 125 - .../gokrb5.v5/crypto/rfc3961/keyDerivation.go | 134 - .../gokrb5.v5/crypto/rfc3961/nfold.go | 128 - .../gokrb5.v5/crypto/rfc3962/encryption.go | 89 - .../gokrb5.v5/crypto/rfc3962/keyDerivation.go | 58 - .../gokrb5.v5/crypto/rfc4757/checksum.go | 40 - .../gokrb5.v5/crypto/rfc4757/encryption.go | 83 - .../gokrb5.v5/crypto/rfc4757/keyDerivation.go | 55 - .../gokrb5.v5/crypto/rfc4757/msgtype.go | 20 - .../gokrb5.v5/crypto/rfc8009/encryption.go | 128 - .../gokrb5.v5/crypto/rfc8009/keyDerivation.go | 144 - .../gokrb5.v5/gssapi/ContextFlags.go | 36 - .../jcmturner/gokrb5.v5/gssapi/MechType.go | 9 - .../gokrb5.v5/gssapi/NegotiationToken.go | 149 - .../jcmturner/gokrb5.v5/gssapi/WrapToken.go | 234 - .../jcmturner/gokrb5.v5/gssapi/gssapi.go | 102 - .../jcmturner/gokrb5.v5/gssapi/krb5Token.go | 202 - .../gokrb5.v5/iana/addrtype/constants.go | 15 - .../gokrb5.v5/iana/adtype/constants.go | 23 - .../gokrb5.v5/iana/asnAppTag/constants.go | 24 - .../gokrb5.v5/iana/chksumtype/constants.go | 32 - .../jcmturner/gokrb5.v5/iana/constants.go | 5 - .../gokrb5.v5/iana/errorcode/constants.go | 155 - .../gokrb5.v5/iana/etypeID/constants.go | 101 - .../gokrb5.v5/iana/flags/constants.go | 30 - .../gokrb5.v5/iana/keyusage/constants.go | 42 - .../gokrb5.v5/iana/msgtype/constants.go | 18 - .../gokrb5.v5/iana/nametype/constants.go | 15 - .../gokrb5.v5/iana/patype/constants.go | 77 - .../gokrb5.v5/kadmin/changepasswddata.go | 23 - .../jcmturner/gokrb5.v5/kadmin/message.go | 114 - .../jcmturner/gokrb5.v5/kadmin/passwd.go | 68 - .../jcmturner/gokrb5.v5/keytab/keytab.go | 369 - .../jcmturner/gokrb5.v5/krberror/error.go | 67 - .../jcmturner/gokrb5.v5/messages/APRep.go | 64 - .../jcmturner/gokrb5.v5/messages/APReq.go | 150 - .../jcmturner/gokrb5.v5/messages/KDCRep.go | 312 - .../jcmturner/gokrb5.v5/messages/KDCReq.go | 402 - .../jcmturner/gokrb5.v5/messages/KRBCred.go | 102 - .../jcmturner/gokrb5.v5/messages/KRBError.go | 83 - .../jcmturner/gokrb5.v5/messages/KRBPriv.go | 108 - .../jcmturner/gokrb5.v5/messages/KRBSafe.go | 61 - .../jcmturner/gokrb5.v5/messages/Ticket.go | 251 - .../jcmturner/gokrb5.v5/mstypes/claims.go | 312 - .../jcmturner/gokrb5.v5/mstypes/filetime.go | 65 - .../gokrb5.v5/mstypes/group_membership.go | 53 - .../mstypes/kerb_sid_and_attributes.go | 42 - .../gokrb5.v5/mstypes/rpc_unicode_string.go | 36 - .../jcmturner/gokrb5.v5/mstypes/sid.go | 70 - .../gokrb5.v5/mstypes/user_session_key.go | 30 - .../jcmturner/gokrb5.v5/pac/client_claims.go | 41 - .../jcmturner/gokrb5.v5/pac/client_info.go | 40 - .../gokrb5.v5/pac/credentials_info.go | 131 - .../jcmturner/gokrb5.v5/pac/device_claims.go | 39 - .../jcmturner/gokrb5.v5/pac/device_info.go | 94 - .../gokrb5.v5/pac/kerb_validation_info.go | 296 - .../gokrb5.v5/pac/pac_info_buffer.go | 39 - .../jcmturner/gokrb5.v5/pac/pac_type.go | 209 - .../gokrb5.v5/pac/s4u_delegation_info.go | 55 - .../jcmturner/gokrb5.v5/pac/signature_data.go | 74 - .../jcmturner/gokrb5.v5/pac/upn_dns_info.go | 66 - .../gokrb5.v5/types/Authenticator.go | 100 - .../gokrb5.v5/types/AuthorizationData.go | 123 - .../jcmturner/gokrb5.v5/types/Cryptosystem.go | 55 - .../jcmturner/gokrb5.v5/types/HostAddress.go | 206 - .../gokrb5.v5/types/KerberosFlags.go | 124 - .../jcmturner/gokrb5.v5/types/PAData.go | 155 - .../gokrb5.v5/types/PrincipalName.go | 64 - .../jcmturner/gokrb5.v5/types/TypedData.go | 18 - vendor/gopkg.in/jcmturner/rpc.v0/LICENSE | 201 - vendor/gopkg.in/jcmturner/rpc.v0/ndr/error.go | 13 - vendor/gopkg.in/jcmturner/rpc.v0/ndr/ndr.go | 444 - vendor/k8s.io/utils/env/env.go | 74 - vendor/modules.txt | 175 +- vendor/sigs.k8s.io/yaml/LICENSE | 256 + vendor/sigs.k8s.io/yaml/OWNERS | 8 +- vendor/sigs.k8s.io/yaml/fields.go | 55 +- .../yaml/goyaml.v2}/LICENSE | 0 .../yaml/goyaml.v2/LICENSE.libyaml} | 27 +- .../yaml/goyaml.v2/NOTICE} | 7 +- vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 + vendor/sigs.k8s.io/yaml/goyaml.v2/README.md | 143 + vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go | 744 ++ vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go | 815 ++ vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go | 1685 ++++ vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go | 390 + vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go | 1095 +++ vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go | 412 + vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go | 258 + vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go | 2711 ++++++ vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go | 113 + vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go | 26 + vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go | 478 + vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go | 739 ++ .../yaml/goyaml.v2/yamlprivateh.go | 173 + vendor/sigs.k8s.io/yaml/yaml.go | 145 +- vendor/sigs.k8s.io/yaml/yaml_go110.go | 17 + vendor/upper.io/db.v3/.gitignore | 4 - vendor/upper.io/db.v3/CHANGELOG.md | 31 - vendor/upper.io/db.v3/LICENSE | 20 - vendor/upper.io/db.v3/Makefile | 50 - vendor/upper.io/db.v3/README.md | 127 - vendor/upper.io/db.v3/collection.go | 60 - vendor/upper.io/db.v3/comparison.go | 334 - vendor/upper.io/db.v3/compound.go | 131 - vendor/upper.io/db.v3/cond.go | 110 - vendor/upper.io/db.v3/connection_url.go | 29 - vendor/upper.io/db.v3/constraint.go | 67 - vendor/upper.io/db.v3/database.go | 66 - vendor/upper.io/db.v3/db.go | 74 - vendor/upper.io/db.v3/env.go | 34 - vendor/upper.io/db.v3/errors.go | 52 - vendor/upper.io/db.v3/function.go | 67 - vendor/upper.io/db.v3/internal/cache/cache.go | 152 - .../internal/cache/hashstructure/LICENSE | 21 - .../internal/cache/hashstructure/README.md | 61 - .../cache/hashstructure/hashstructure.go | 325 - .../internal/cache/hashstructure/include.go | 15 - .../db.v3/internal/cache/interface.go | 34 - .../db.v3/internal/immutable/immutable.go | 28 - .../db.v3/internal/sqladapter/collection.go | 328 - .../db.v3/internal/sqladapter/compat/query.go | 72 - .../internal/sqladapter/compat/query_go18.go | 72 - .../db.v3/internal/sqladapter/database.go | 734 -- .../db.v3/internal/sqladapter/exql/column.go | 81 - .../internal/sqladapter/exql/column_value.go | 106 - .../db.v3/internal/sqladapter/exql/columns.go | 76 - .../internal/sqladapter/exql/database.go | 31 - .../db.v3/internal/sqladapter/exql/default.go | 192 - .../internal/sqladapter/exql/group_by.go | 54 - .../db.v3/internal/sqladapter/exql/hash.go | 26 - .../internal/sqladapter/exql/interfaces.go | 20 - .../db.v3/internal/sqladapter/exql/join.go | 181 - .../internal/sqladapter/exql/order_by.go | 164 - .../db.v3/internal/sqladapter/exql/raw.go | 38 - .../internal/sqladapter/exql/returning.go | 35 - .../internal/sqladapter/exql/statement.go | 111 - .../db.v3/internal/sqladapter/exql/table.go | 94 - .../internal/sqladapter/exql/template.go | 136 - .../internal/sqladapter/exql/utilities.go | 151 - .../db.v3/internal/sqladapter/exql/value.go | 155 - .../db.v3/internal/sqladapter/exql/where.go | 137 - .../db.v3/internal/sqladapter/result.go | 519 -- .../db.v3/internal/sqladapter/sqladapter.go | 44 - .../db.v3/internal/sqladapter/statement.go | 85 - .../upper.io/db.v3/internal/sqladapter/tx.go | 114 - vendor/upper.io/db.v3/intersection.go | 72 - vendor/upper.io/db.v3/lib/reflectx/LICENSE | 23 - vendor/upper.io/db.v3/lib/reflectx/README.md | 17 - vendor/upper.io/db.v3/lib/reflectx/reflect.go | 407 - vendor/upper.io/db.v3/lib/sqlbuilder/batch.go | 82 - .../upper.io/db.v3/lib/sqlbuilder/builder.go | 595 -- .../db.v3/lib/sqlbuilder/comparison.go | 127 - .../upper.io/db.v3/lib/sqlbuilder/convert.go | 144 - .../db.v3/lib/sqlbuilder/custom_types.go | 61 - .../upper.io/db.v3/lib/sqlbuilder/delete.go | 194 - .../upper.io/db.v3/lib/sqlbuilder/errors.go | 14 - vendor/upper.io/db.v3/lib/sqlbuilder/fetch.go | 232 - .../upper.io/db.v3/lib/sqlbuilder/insert.go | 283 - .../db.v3/lib/sqlbuilder/interfaces.go | 659 -- .../upper.io/db.v3/lib/sqlbuilder/paginate.go | 340 - .../upper.io/db.v3/lib/sqlbuilder/scanner.go | 38 - .../upper.io/db.v3/lib/sqlbuilder/select.go | 523 -- .../upper.io/db.v3/lib/sqlbuilder/template.go | 311 - .../upper.io/db.v3/lib/sqlbuilder/update.go | 241 - .../upper.io/db.v3/lib/sqlbuilder/wrapper.go | 199 - vendor/upper.io/db.v3/logger.go | 146 - vendor/upper.io/db.v3/marshal.go | 37 - vendor/upper.io/db.v3/mysql/Makefile | 40 - vendor/upper.io/db.v3/mysql/README.md | 7 - vendor/upper.io/db.v3/mysql/collection.go | 105 - vendor/upper.io/db.v3/mysql/connection.go | 265 - vendor/upper.io/db.v3/mysql/custom_types.go | 214 - vendor/upper.io/db.v3/mysql/database.go | 305 - .../upper.io/db.v3/mysql/docker-compose.yml | 14 - vendor/upper.io/db.v3/mysql/mysql.go | 87 - vendor/upper.io/db.v3/mysql/template.go | 219 - vendor/upper.io/db.v3/mysql/tx.go | 43 - vendor/upper.io/db.v3/postgresql/Makefile | 40 - vendor/upper.io/db.v3/postgresql/README.md | 6 - .../upper.io/db.v3/postgresql/collection.go | 97 - .../upper.io/db.v3/postgresql/connection.go | 292 - .../upper.io/db.v3/postgresql/custom_types.go | 328 - vendor/upper.io/db.v3/postgresql/database.go | 340 - .../db.v3/postgresql/docker-compose.yml | 13 - .../upper.io/db.v3/postgresql/postgresql.go | 90 - vendor/upper.io/db.v3/postgresql/template.go | 210 - vendor/upper.io/db.v3/postgresql/tx.go | 43 - vendor/upper.io/db.v3/raw.go | 95 - vendor/upper.io/db.v3/result.go | 191 - vendor/upper.io/db.v3/settings.go | 191 - vendor/upper.io/db.v3/tx.go | 31 - vendor/upper.io/db.v3/union.go | 63 - vendor/upper.io/db.v3/wrapper.go | 81 - wire_gen.go | 9 +- 681 files changed, 16842 insertions(+), 109674 deletions(-) create mode 100644 api/restHandler/DeploymentConfigurationRestHandler.go create mode 100644 api/router/DeploymentConfigRouter.go create mode 100644 internal/sql/repository/security/bean/bean.go create mode 100644 pkg/configDiff/DeploymentConfigurationService.go create mode 100644 pkg/configDiff/adaptor/adaptor.go create mode 100644 pkg/configDiff/bean/bean.go create mode 100644 pkg/configDiff/helper/helper.go create mode 100644 pkg/configDiff/utils/utils.go create mode 100644 pkg/security/bean/bean.go create mode 100644 scripts/sql/278_scan_policies.down.sql create mode 100644 scripts/sql/278_scan_policies.up.sql create mode 100644 scripts/sql/279_rbac_role_audit.down.sql create mode 100644 scripts/sql/279_rbac_role_audit.up.sql create mode 100644 scripts/sql/280_link_external_release.down.sql create mode 100644 scripts/sql/280_link_external_release.up.sql create mode 100644 scripts/sql/281_update_scan_tool_metadata.down.sql create mode 100644 scripts/sql/281_update_scan_tool_metadata.up.sql create mode 100644 specs/configDiffView.yaml delete mode 100644 vendor/github.com/Masterminds/goutils/.travis.yml delete mode 100644 vendor/github.com/Masterminds/goutils/CHANGELOG.md delete mode 100644 vendor/github.com/Masterminds/goutils/LICENSE.txt delete mode 100644 vendor/github.com/Masterminds/goutils/README.md delete mode 100644 vendor/github.com/Masterminds/goutils/appveyor.yml delete mode 100644 vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go delete mode 100644 vendor/github.com/Masterminds/goutils/randomstringutils.go delete mode 100644 vendor/github.com/Masterminds/goutils/stringutils.go delete mode 100644 vendor/github.com/Masterminds/goutils/wordutils.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/.gitignore delete mode 100644 vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md delete mode 100644 vendor/github.com/Masterminds/sprig/v3/LICENSE.txt delete mode 100644 vendor/github.com/Masterminds/sprig/v3/Makefile delete mode 100644 vendor/github.com/Masterminds/sprig/v3/README.md delete mode 100644 vendor/github.com/Masterminds/sprig/v3/crypto.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/date.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/defaults.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/dict.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/doc.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/functions.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/list.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/network.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/numeric.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/reflect.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/regex.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/semver.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/strings.go delete mode 100644 vendor/github.com/Masterminds/sprig/v3/url.go delete mode 100644 vendor/github.com/antonmedv/expr/.gitignore delete mode 100644 vendor/github.com/antonmedv/expr/LICENSE delete mode 100644 vendor/github.com/antonmedv/expr/README.md delete mode 100644 vendor/github.com/antonmedv/expr/ast/node.go delete mode 100644 vendor/github.com/antonmedv/expr/ast/print.go delete mode 100644 vendor/github.com/antonmedv/expr/ast/visitor.go delete mode 100644 vendor/github.com/antonmedv/expr/builtin/builtin.go delete mode 100644 vendor/github.com/antonmedv/expr/checker/checker.go delete mode 100644 vendor/github.com/antonmedv/expr/checker/types.go delete mode 100644 vendor/github.com/antonmedv/expr/compiler/compiler.go delete mode 100644 vendor/github.com/antonmedv/expr/conf/config.go delete mode 100644 vendor/github.com/antonmedv/expr/conf/functions.go delete mode 100644 vendor/github.com/antonmedv/expr/conf/operators.go delete mode 100644 vendor/github.com/antonmedv/expr/conf/types_table.go delete mode 100644 vendor/github.com/antonmedv/expr/expr.go delete mode 100644 vendor/github.com/antonmedv/expr/file/error.go delete mode 100644 vendor/github.com/antonmedv/expr/file/location.go delete mode 100644 vendor/github.com/antonmedv/expr/file/source.go delete mode 100644 vendor/github.com/antonmedv/expr/optimizer/const_expr.go delete mode 100644 vendor/github.com/antonmedv/expr/optimizer/const_range.go delete mode 100644 vendor/github.com/antonmedv/expr/optimizer/fold.go delete mode 100644 vendor/github.com/antonmedv/expr/optimizer/in_array.go delete mode 100644 vendor/github.com/antonmedv/expr/optimizer/in_range.go delete mode 100644 vendor/github.com/antonmedv/expr/optimizer/optimizer.go delete mode 100644 vendor/github.com/antonmedv/expr/parser/lexer/lexer.go delete mode 100644 vendor/github.com/antonmedv/expr/parser/lexer/state.go delete mode 100644 vendor/github.com/antonmedv/expr/parser/lexer/token.go delete mode 100644 vendor/github.com/antonmedv/expr/parser/lexer/utils.go delete mode 100644 vendor/github.com/antonmedv/expr/parser/parser.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/generated.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/opcodes.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/program.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/runtime/generated.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/runtime/runtime.go delete mode 100644 vendor/github.com/antonmedv/expr/vm/vm.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.clang-format delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.codecov.yml delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.dockerignore delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.gitattributes delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.gitignore delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.golangci.yml delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.markdownlint.yaml delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.mlc_config.json delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/.spelling delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/CHANGELOG.md delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/CODEOWNERS delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/Dockerfile delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/Dockerfile.windows delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/Makefile delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/OWNERS delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/Procfile delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/README.md delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/SECURITY.md delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/USERS.md delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/config.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/controller.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/image.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/node_events.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/rbac.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/retention_policy.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/sso.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/config/ttl.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/mkdocs.yml delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/ansi_sql_change.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/archived_workflow_labels.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/backfill_nodes.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/db_type.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/explosive_offload_node_status_repo.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/migrate.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/null_workflow_archive.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/offload_node_status_repo.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/sqldb.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/workflow_archive.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/cmd/cmd.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/cmd/glog.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/expand/expand.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/expr/env/env.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/file/fileutil.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/instanceid/service.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parametrizable.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parse.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/k8s/parse.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/labels/labeler.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/sorting/topological_sorting.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/template/expression_template.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/template/kind.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/template/replace.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/template/resolve_var.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/template/simple_template.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/template/template.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/template/validate.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/tls/tls.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/unstructured/unstructured.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/util/util.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/version.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/common.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/load_to_stream.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/hdfs.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/util.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource/resource.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/common/ancestry.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/common/configmap.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/common/convert.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/common/params.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/common/parse.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/common/placeholder.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/common/util.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/hydrator/hydrator.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/k8s_request_total_metric.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/metrics.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/pod_missing_metric.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/server.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/util.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/work_queue.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/workflow_condition_metric.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/packer/packer.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/templateresolution/context.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/util/merge.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/util/pod_name.go delete mode 100644 vendor/github.com/argoproj/argo-workflows/v3/workflow/validate/validate.go delete mode 100644 vendor/github.com/argoproj/pkg/expr/function.go delete mode 100644 vendor/github.com/argoproj/pkg/file/file.go delete mode 100644 vendor/github.com/argoproj/pkg/json/json.go delete mode 100644 vendor/github.com/colinmarc/hdfs/.gitignore delete mode 100644 vendor/github.com/colinmarc/hdfs/.travis.yml delete mode 100644 vendor/github.com/colinmarc/hdfs/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/colinmarc/hdfs/LICENSE.txt delete mode 100644 vendor/github.com/colinmarc/hdfs/Makefile delete mode 100644 vendor/github.com/colinmarc/hdfs/README.md delete mode 100644 vendor/github.com/colinmarc/hdfs/client.go delete mode 100644 vendor/github.com/colinmarc/hdfs/conf.go delete mode 100644 vendor/github.com/colinmarc/hdfs/content_summary.go delete mode 100644 vendor/github.com/colinmarc/hdfs/exceptions.go delete mode 100644 vendor/github.com/colinmarc/hdfs/file_reader.go delete mode 100644 vendor/github.com/colinmarc/hdfs/file_writer.go delete mode 100644 vendor/github.com/colinmarc/hdfs/fixtures.sh delete mode 100644 vendor/github.com/colinmarc/hdfs/hdfs.go delete mode 100644 vendor/github.com/colinmarc/hdfs/minicluster.sh delete mode 100644 vendor/github.com/colinmarc/hdfs/mkdir.go delete mode 100644 vendor/github.com/colinmarc/hdfs/perms.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.pb.go delete mode 100644 vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.proto delete mode 100644 vendor/github.com/colinmarc/hdfs/readdir.go delete mode 100644 vendor/github.com/colinmarc/hdfs/remove.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rename.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/block_read_stream.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/block_reader.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/block_write_stream.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/block_writer.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/checksum_reader.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/datanode_failover.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/kerberos.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/namenode.go delete mode 100644 vendor/github.com/colinmarc/hdfs/rpc/rpc.go delete mode 100644 vendor/github.com/colinmarc/hdfs/stat.go delete mode 100644 vendor/github.com/colinmarc/hdfs/stat_fs.go delete mode 100644 vendor/github.com/colinmarc/hdfs/travis-setup-cdh5.sh delete mode 100644 vendor/github.com/colinmarc/hdfs/travis-setup-hdp2.sh delete mode 100644 vendor/github.com/colinmarc/hdfs/travis-setup.sh delete mode 100644 vendor/github.com/colinmarc/hdfs/walk.go delete mode 100644 vendor/github.com/doublerebel/bellows/LICENSE delete mode 100644 vendor/github.com/doublerebel/bellows/README.md delete mode 100644 vendor/github.com/doublerebel/bellows/main.go create mode 100644 vendor/github.com/go-logr/logr/context.go create mode 100644 vendor/github.com/go-logr/logr/context_noslog.go create mode 100644 vendor/github.com/go-logr/logr/context_slog.go create mode 100644 vendor/github.com/go-logr/logr/funcr/slogsink.go rename vendor/github.com/go-logr/logr/{slogr => }/sloghandler.go (63%) create mode 100644 vendor/github.com/go-logr/logr/slogr.go rename vendor/github.com/go-logr/logr/{slogr => }/slogsink.go (82%) create mode 100644 vendor/github.com/go-openapi/jsonpointer/.golangci.yml rename vendor/github.com/go-openapi/swag/{post_go19.go => initialism_index.go} (98%) delete mode 100644 vendor/github.com/go-openapi/swag/post_go18.go delete mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go delete mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/.gitignore delete mode 100644 vendor/github.com/go-sql-driver/mysql/AUTHORS delete mode 100644 vendor/github.com/go-sql-driver/mysql/CHANGELOG.md delete mode 100644 vendor/github.com/go-sql-driver/mysql/LICENSE delete mode 100644 vendor/github.com/go-sql-driver/mysql/README.md delete mode 100644 vendor/github.com/go-sql-driver/mysql/auth.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/buffer.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/collations.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/connection.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/connector.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/const.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/driver.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/dsn.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/errors.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/fields.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/fuzz.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/infile.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_go113.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/packets.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/result.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/rows.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/statement.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/transaction.go delete mode 100644 vendor/github.com/go-sql-driver/mysql/utils.go create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go delete mode 100644 vendor/github.com/hashicorp/go-uuid/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-uuid/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-uuid/README.md delete mode 100644 vendor/github.com/hashicorp/go-uuid/uuid.go delete mode 100644 vendor/github.com/huandu/xstrings/.gitignore delete mode 100644 vendor/github.com/huandu/xstrings/CONTRIBUTING.md delete mode 100644 vendor/github.com/huandu/xstrings/LICENSE delete mode 100644 vendor/github.com/huandu/xstrings/README.md delete mode 100644 vendor/github.com/huandu/xstrings/common.go delete mode 100644 vendor/github.com/huandu/xstrings/convert.go delete mode 100644 vendor/github.com/huandu/xstrings/count.go delete mode 100644 vendor/github.com/huandu/xstrings/doc.go delete mode 100644 vendor/github.com/huandu/xstrings/format.go delete mode 100644 vendor/github.com/huandu/xstrings/manipulate.go delete mode 100644 vendor/github.com/huandu/xstrings/stringbuilder.go delete mode 100644 vendor/github.com/huandu/xstrings/stringbuilder_go110.go delete mode 100644 vendor/github.com/huandu/xstrings/translate.go delete mode 100644 vendor/github.com/jcmturner/gofork/LICENSE delete mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/README.md delete mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go delete mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/common.go delete mode 100644 vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go delete mode 100644 vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 vendor/github.com/klauspost/pgzip/.gitignore delete mode 100644 vendor/github.com/klauspost/pgzip/.travis.yml delete mode 100644 vendor/github.com/klauspost/pgzip/GO_LICENSE delete mode 100644 vendor/github.com/klauspost/pgzip/LICENSE delete mode 100644 vendor/github.com/klauspost/pgzip/README.md delete mode 100644 vendor/github.com/klauspost/pgzip/gunzip.go delete mode 100644 vendor/github.com/klauspost/pgzip/gzip.go delete mode 100644 vendor/github.com/oliveagle/jsonpath/.gitignore delete mode 100644 vendor/github.com/oliveagle/jsonpath/.travis.yml delete mode 100644 vendor/github.com/oliveagle/jsonpath/jsonpath.go delete mode 100644 vendor/github.com/oliveagle/jsonpath/readme.md delete mode 100644 vendor/github.com/shopspring/decimal/.gitignore delete mode 100644 vendor/github.com/shopspring/decimal/.travis.yml delete mode 100644 vendor/github.com/shopspring/decimal/CHANGELOG.md delete mode 100644 vendor/github.com/shopspring/decimal/LICENSE delete mode 100644 vendor/github.com/shopspring/decimal/README.md delete mode 100644 vendor/github.com/shopspring/decimal/decimal-go.go delete mode 100644 vendor/github.com/shopspring/decimal/decimal.go delete mode 100644 vendor/github.com/shopspring/decimal/rounding.go delete mode 100644 vendor/github.com/spf13/cast/.gitignore delete mode 100644 vendor/github.com/spf13/cast/LICENSE delete mode 100644 vendor/github.com/spf13/cast/Makefile delete mode 100644 vendor/github.com/spf13/cast/README.md delete mode 100644 vendor/github.com/spf13/cast/cast.go delete mode 100644 vendor/github.com/spf13/cast/caste.go delete mode 100644 vendor/github.com/spf13/cast/timeformattype_string.go delete mode 100644 vendor/github.com/valyala/bytebufferpool/.travis.yml delete mode 100644 vendor/github.com/valyala/bytebufferpool/LICENSE delete mode 100644 vendor/github.com/valyala/bytebufferpool/README.md delete mode 100644 vendor/github.com/valyala/bytebufferpool/bytebuffer.go delete mode 100644 vendor/github.com/valyala/bytebufferpool/doc.go delete mode 100644 vendor/github.com/valyala/bytebufferpool/pool.go delete mode 100644 vendor/github.com/valyala/fasttemplate/LICENSE delete mode 100644 vendor/github.com/valyala/fasttemplate/README.md delete mode 100644 vendor/github.com/valyala/fasttemplate/template.go delete mode 100644 vendor/github.com/valyala/fasttemplate/unsafe.go delete mode 100644 vendor/github.com/valyala/fasttemplate/unsafe_gae.go delete mode 100644 vendor/golang.org/x/crypto/md4/md4.go delete mode 100644 vendor/golang.org/x/crypto/md4/md4block.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh delete mode 100644 vendor/gopkg.in/jcmturner/aescts.v1/.gitignore delete mode 100644 vendor/gopkg.in/jcmturner/aescts.v1/README.md delete mode 100644 vendor/gopkg.in/jcmturner/aescts.v1/aescts.go delete mode 100644 vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore delete mode 100644 vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml delete mode 100644 vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE delete mode 100644 vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/LICENSE delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/asn1tools/tools.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/ASExchange.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/TGSExchange.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/cache.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/client.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/http.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/network.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/passwd.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/client/session.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/config/hosts.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/config/krb5conf.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/ccache.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/credentials.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha1-96.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha256-128.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha1-96.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha384-192.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/common/common.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/crypto.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/des3-cbc-sha1-kd.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/etype/etype.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rc4-hmac.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/encryption.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/keyDerivation.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/nfold.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/encryption.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/keyDerivation.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/checksum.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/encryption.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/keyDerivation.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/msgtype.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/encryption.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/keyDerivation.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/ContextFlags.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/MechType.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/NegotiationToken.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/WrapToken.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/gssapi.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/krb5Token.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/addrtype/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/adtype/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/errorcode/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/etypeID/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/flags/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/keyusage/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/msgtype/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/nametype/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/iana/patype/constants.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/changepasswddata.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/message.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/passwd.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/keytab/keytab.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/krberror/error.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APRep.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APReq.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCRep.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCReq.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBCred.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBError.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBPriv.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBSafe.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/messages/Ticket.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/claims.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/filetime.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/group_membership.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/kerb_sid_and_attributes.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/rpc_unicode_string.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/sid.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/user_session_key.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_claims.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_info.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/credentials_info.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_claims.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_info.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/kerb_validation_info.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_info_buffer.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_type.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/s4u_delegation_info.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/signature_data.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/pac/upn_dns_info.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/Authenticator.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/AuthorizationData.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/Cryptosystem.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/HostAddress.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/KerberosFlags.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/PAData.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/PrincipalName.go delete mode 100644 vendor/gopkg.in/jcmturner/gokrb5.v5/types/TypedData.go delete mode 100644 vendor/gopkg.in/jcmturner/rpc.v0/LICENSE delete mode 100644 vendor/gopkg.in/jcmturner/rpc.v0/ndr/error.go delete mode 100644 vendor/gopkg.in/jcmturner/rpc.v0/ndr/ndr.go delete mode 100644 vendor/k8s.io/utils/env/env.go rename vendor/{gopkg.in/jcmturner/aescts.v1 => sigs.k8s.io/yaml/goyaml.v2}/LICENSE (100%) rename vendor/{github.com/oliveagle/jsonpath/LICENSE => sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml} (50%) rename vendor/{k8s.io/utils/env/doc.go => sigs.k8s.io/yaml/goyaml.v2/NOTICE} (76%) create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/README.md create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go delete mode 100644 vendor/upper.io/db.v3/.gitignore delete mode 100644 vendor/upper.io/db.v3/CHANGELOG.md delete mode 100644 vendor/upper.io/db.v3/LICENSE delete mode 100644 vendor/upper.io/db.v3/Makefile delete mode 100644 vendor/upper.io/db.v3/README.md delete mode 100644 vendor/upper.io/db.v3/collection.go delete mode 100644 vendor/upper.io/db.v3/comparison.go delete mode 100644 vendor/upper.io/db.v3/compound.go delete mode 100644 vendor/upper.io/db.v3/cond.go delete mode 100644 vendor/upper.io/db.v3/connection_url.go delete mode 100644 vendor/upper.io/db.v3/constraint.go delete mode 100644 vendor/upper.io/db.v3/database.go delete mode 100644 vendor/upper.io/db.v3/db.go delete mode 100644 vendor/upper.io/db.v3/env.go delete mode 100644 vendor/upper.io/db.v3/errors.go delete mode 100644 vendor/upper.io/db.v3/function.go delete mode 100644 vendor/upper.io/db.v3/internal/cache/cache.go delete mode 100644 vendor/upper.io/db.v3/internal/cache/hashstructure/LICENSE delete mode 100644 vendor/upper.io/db.v3/internal/cache/hashstructure/README.md delete mode 100644 vendor/upper.io/db.v3/internal/cache/hashstructure/hashstructure.go delete mode 100644 vendor/upper.io/db.v3/internal/cache/hashstructure/include.go delete mode 100644 vendor/upper.io/db.v3/internal/cache/interface.go delete mode 100644 vendor/upper.io/db.v3/internal/immutable/immutable.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/collection.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/compat/query.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/compat/query_go18.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/database.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/column.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/column_value.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/columns.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/database.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/default.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/group_by.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/hash.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/interfaces.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/join.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/order_by.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/raw.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/returning.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/statement.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/table.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/template.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/utilities.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/value.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/exql/where.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/result.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/sqladapter.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/statement.go delete mode 100644 vendor/upper.io/db.v3/internal/sqladapter/tx.go delete mode 100644 vendor/upper.io/db.v3/intersection.go delete mode 100644 vendor/upper.io/db.v3/lib/reflectx/LICENSE delete mode 100644 vendor/upper.io/db.v3/lib/reflectx/README.md delete mode 100644 vendor/upper.io/db.v3/lib/reflectx/reflect.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/batch.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/builder.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/comparison.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/convert.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/custom_types.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/delete.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/errors.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/fetch.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/insert.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/interfaces.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/paginate.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/scanner.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/select.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/template.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/update.go delete mode 100644 vendor/upper.io/db.v3/lib/sqlbuilder/wrapper.go delete mode 100644 vendor/upper.io/db.v3/logger.go delete mode 100644 vendor/upper.io/db.v3/marshal.go delete mode 100644 vendor/upper.io/db.v3/mysql/Makefile delete mode 100644 vendor/upper.io/db.v3/mysql/README.md delete mode 100644 vendor/upper.io/db.v3/mysql/collection.go delete mode 100644 vendor/upper.io/db.v3/mysql/connection.go delete mode 100644 vendor/upper.io/db.v3/mysql/custom_types.go delete mode 100644 vendor/upper.io/db.v3/mysql/database.go delete mode 100644 vendor/upper.io/db.v3/mysql/docker-compose.yml delete mode 100644 vendor/upper.io/db.v3/mysql/mysql.go delete mode 100644 vendor/upper.io/db.v3/mysql/template.go delete mode 100644 vendor/upper.io/db.v3/mysql/tx.go delete mode 100644 vendor/upper.io/db.v3/postgresql/Makefile delete mode 100644 vendor/upper.io/db.v3/postgresql/README.md delete mode 100644 vendor/upper.io/db.v3/postgresql/collection.go delete mode 100644 vendor/upper.io/db.v3/postgresql/connection.go delete mode 100644 vendor/upper.io/db.v3/postgresql/custom_types.go delete mode 100644 vendor/upper.io/db.v3/postgresql/database.go delete mode 100644 vendor/upper.io/db.v3/postgresql/docker-compose.yml delete mode 100644 vendor/upper.io/db.v3/postgresql/postgresql.go delete mode 100644 vendor/upper.io/db.v3/postgresql/template.go delete mode 100644 vendor/upper.io/db.v3/postgresql/tx.go delete mode 100644 vendor/upper.io/db.v3/raw.go delete mode 100644 vendor/upper.io/db.v3/result.go delete mode 100644 vendor/upper.io/db.v3/settings.go delete mode 100644 vendor/upper.io/db.v3/tx.go delete mode 100644 vendor/upper.io/db.v3/union.go delete mode 100644 vendor/upper.io/db.v3/wrapper.go diff --git a/Wire.go b/Wire.go index 0d279f67c22..7d2cd72d88e 100644 --- a/Wire.go +++ b/Wire.go @@ -125,6 +125,7 @@ import ( "github.com/devtron-labs/devtron/pkg/chart/gitOpsConfig" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" "github.com/devtron-labs/devtron/pkg/commonService" + "github.com/devtron-labs/devtron/pkg/configDiff" delete2 "github.com/devtron-labs/devtron/pkg/delete" deployment2 "github.com/devtron-labs/devtron/pkg/deployment" "github.com/devtron-labs/devtron/pkg/deployment/common" @@ -711,6 +712,13 @@ func InitializeApp() (*App, error) { scopedVariable.NewScopedVariableRestHandlerImpl, wire.Bind(new(scopedVariable.ScopedVariableRestHandler), new(*scopedVariable.ScopedVariableRestHandlerImpl)), + router.NewDeploymentConfigurationRouter, + wire.Bind(new(router.DeploymentConfigurationRouter), new(*router.DeploymentConfigurationRouterImpl)), + restHandler.NewDeploymentConfigurationRestHandlerImpl, + wire.Bind(new(restHandler.DeploymentConfigurationRestHandler), new(*restHandler.DeploymentConfigurationRestHandlerImpl)), + configDiff.NewDeploymentConfigurationServiceImpl, + wire.Bind(new(configDiff.DeploymentConfigurationService), new(*configDiff.DeploymentConfigurationServiceImpl)), + router.NewTelemetryRouterImpl, wire.Bind(new(router.TelemetryRouter), new(*router.TelemetryRouterImpl)), restHandler.NewTelemetryRestHandlerImpl, diff --git a/api/bean/AppView.go b/api/bean/AppView.go index 03ca0809cb8..b1c350690ef 100644 --- a/api/bean/AppView.go +++ b/api/bean/AppView.go @@ -144,6 +144,7 @@ type AppEnvironmentContainer struct { type DeploymentDetailContainer struct { InstalledAppId int `json:"installedAppId,omitempty"` AppId int `json:"appId,omitempty"` + PcoId int `json:"pcoId"` CdPipelineId int `json:"cdPipelineId,omitempty"` TriggerType string `json:"triggerType,omitempty"` ParentEnvironmentName string `json:"parentEnvironmentName"` @@ -183,6 +184,8 @@ type DeploymentDetailContainer struct { HelmPackageName string `json:"helmPackageName"` HelmReleaseInstallStatus string `json:"-"` DeploymentConfig *bean.DeploymentConfig `json:"-"` + IsPipelineTriggered bool `json:"isPipelineTriggered"` + ReleaseMode string `json:"releaseMode"` } type AppDetailContainer struct { diff --git a/api/helm-app/gRPC/applist.pb.go b/api/helm-app/gRPC/applist.pb.go index 6f5d4b7ae40..f2c54b4b67b 100644 --- a/api/helm-app/gRPC/applist.pb.go +++ b/api/helm-app/gRPC/applist.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.31.0 // protoc v3.9.1 // source: api/helm-app/gRPC/applist.proto @@ -4820,7 +4820,7 @@ var file_api_helm_app_gRPC_applist_proto_rawDesc = []byte{ 0x38, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x48, 0x10, 0x01, 0x12, 0x0a, 0x0a, - 0x06, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x02, 0x32, 0xe7, 0x0c, 0x0a, 0x12, 0x41, 0x70, + 0x06, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x02, 0x32, 0xa6, 0x0d, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x0f, 0x2e, 0x41, 0x70, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, @@ -4923,10 +4923,14 @@ var file_api_helm_app_gRPC_applist_proto_rawDesc = []byte{ 0x78, 0x41, 0x70, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x15, 0x2e, 0x46, 0x6c, 0x75, 0x78, 0x41, 0x70, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x46, 0x6c, 0x75, 0x78, 0x41, 0x70, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x22, 0x00, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x64, 0x65, 0x76, 0x74, 0x72, 0x6f, 0x6e, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x6b, - 0x75, 0x62, 0x65, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x62, 0x65, 0x61, 0x6e, 0x2f, 0x67, 0x72, 0x70, - 0x63, 0x2f, 0x67, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6c, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, + 0x73, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x1a, 0x12, 0x2e, 0x44, + 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x65, 0x64, 0x41, 0x70, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x22, 0x00, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x64, 0x65, 0x76, 0x74, 0x72, 0x6f, 0x6e, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x6b, 0x75, + 0x62, 0x65, 0x6c, 0x69, 0x6e, 0x6b, 0x2f, 0x62, 0x65, 0x61, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, + 0x2f, 0x67, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4943,7 +4947,7 @@ func file_api_helm_app_gRPC_applist_proto_rawDescGZIP() []byte { var file_api_helm_app_gRPC_applist_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_api_helm_app_gRPC_applist_proto_msgTypes = make([]protoimpl.MessageInfo, 63) -var file_api_helm_app_gRPC_applist_proto_goTypes = []any{ +var file_api_helm_app_gRPC_applist_proto_goTypes = []interface{}{ (RemoteConnectionMethod)(0), // 0: RemoteConnectionMethod (*ClusterConfig)(nil), // 1: ClusterConfig (*AppListRequest)(nil), // 2: AppListRequest @@ -5095,32 +5099,34 @@ var file_api_helm_app_gRPC_applist_proto_depIdxs = []int32{ 57, // 81: ApplicationService.ValidateOCIRegistry:input_type -> RegistryCredential 3, // 82: ApplicationService.GetResourceTreeForExternalResources:input_type -> ExternalResourceTreeRequest 7, // 83: ApplicationService.GetFluxAppDetail:input_type -> FluxAppDetailRequest - 10, // 84: ApplicationService.ListApplications:output_type -> DeployedAppList - 5, // 85: ApplicationService.ListFluxApplications:output_type -> FluxApplicationList - 14, // 86: ApplicationService.GetAppDetail:output_type -> AppDetail - 15, // 87: ApplicationService.GetAppStatus:output_type -> AppStatus - 29, // 88: ApplicationService.Hibernate:output_type -> HibernateResponse - 29, // 89: ApplicationService.UnHibernate:output_type -> HibernateResponse - 31, // 90: ApplicationService.GetDeploymentHistory:output_type -> HelmAppDeploymentHistory - 32, // 91: ApplicationService.GetValuesYaml:output_type -> ReleaseInfo - 34, // 92: ApplicationService.GetDesiredManifest:output_type -> DesiredManifestResponse - 35, // 93: ApplicationService.UninstallRelease:output_type -> UninstallReleaseResponse - 38, // 94: ApplicationService.UpgradeRelease:output_type -> UpgradeReleaseResponse - 40, // 95: ApplicationService.GetDeploymentDetail:output_type -> DeploymentDetailResponse - 44, // 96: ApplicationService.InstallRelease:output_type -> InstallReleaseResponse - 38, // 97: ApplicationService.UpgradeReleaseWithChartInfo:output_type -> UpgradeReleaseResponse - 45, // 98: ApplicationService.IsReleaseInstalled:output_type -> BooleanResponse - 45, // 99: ApplicationService.RollbackRelease:output_type -> BooleanResponse - 47, // 100: ApplicationService.TemplateChart:output_type -> TemplateChartResponse - 48, // 101: ApplicationService.TemplateChartBulk:output_type -> BulkTemplateChartResponse - 50, // 102: ApplicationService.InstallReleaseWithCustomChart:output_type -> HelmInstallCustomResponse - 56, // 103: ApplicationService.GetNotes:output_type -> ChartNotesResponse - 38, // 104: ApplicationService.UpgradeReleaseWithCustomChart:output_type -> UpgradeReleaseResponse - 61, // 105: ApplicationService.ValidateOCIRegistry:output_type -> OCIRegistryResponse - 18, // 106: ApplicationService.GetResourceTreeForExternalResources:output_type -> ResourceTreeResponse - 8, // 107: ApplicationService.GetFluxAppDetail:output_type -> FluxAppDetail - 84, // [84:108] is the sub-list for method output_type - 60, // [60:84] is the sub-list for method input_type + 36, // 84: ApplicationService.GetReleaseDetails:input_type -> ReleaseIdentifier + 10, // 85: ApplicationService.ListApplications:output_type -> DeployedAppList + 5, // 86: ApplicationService.ListFluxApplications:output_type -> FluxApplicationList + 14, // 87: ApplicationService.GetAppDetail:output_type -> AppDetail + 15, // 88: ApplicationService.GetAppStatus:output_type -> AppStatus + 29, // 89: ApplicationService.Hibernate:output_type -> HibernateResponse + 29, // 90: ApplicationService.UnHibernate:output_type -> HibernateResponse + 31, // 91: ApplicationService.GetDeploymentHistory:output_type -> HelmAppDeploymentHistory + 32, // 92: ApplicationService.GetValuesYaml:output_type -> ReleaseInfo + 34, // 93: ApplicationService.GetDesiredManifest:output_type -> DesiredManifestResponse + 35, // 94: ApplicationService.UninstallRelease:output_type -> UninstallReleaseResponse + 38, // 95: ApplicationService.UpgradeRelease:output_type -> UpgradeReleaseResponse + 40, // 96: ApplicationService.GetDeploymentDetail:output_type -> DeploymentDetailResponse + 44, // 97: ApplicationService.InstallRelease:output_type -> InstallReleaseResponse + 38, // 98: ApplicationService.UpgradeReleaseWithChartInfo:output_type -> UpgradeReleaseResponse + 45, // 99: ApplicationService.IsReleaseInstalled:output_type -> BooleanResponse + 45, // 100: ApplicationService.RollbackRelease:output_type -> BooleanResponse + 47, // 101: ApplicationService.TemplateChart:output_type -> TemplateChartResponse + 48, // 102: ApplicationService.TemplateChartBulk:output_type -> BulkTemplateChartResponse + 50, // 103: ApplicationService.InstallReleaseWithCustomChart:output_type -> HelmInstallCustomResponse + 56, // 104: ApplicationService.GetNotes:output_type -> ChartNotesResponse + 38, // 105: ApplicationService.UpgradeReleaseWithCustomChart:output_type -> UpgradeReleaseResponse + 61, // 106: ApplicationService.ValidateOCIRegistry:output_type -> OCIRegistryResponse + 18, // 107: ApplicationService.GetResourceTreeForExternalResources:output_type -> ResourceTreeResponse + 8, // 108: ApplicationService.GetFluxAppDetail:output_type -> FluxAppDetail + 11, // 109: ApplicationService.GetReleaseDetails:output_type -> DeployedAppDetail + 85, // [85:110] is the sub-list for method output_type + 60, // [60:85] is the sub-list for method input_type 60, // [60:60] is the sub-list for extension type_name 60, // [60:60] is the sub-list for extension extendee 0, // [0:60] is the sub-list for field type_name @@ -5132,7 +5138,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_api_helm_app_gRPC_applist_proto_msgTypes[0].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ClusterConfig); i { case 0: return &v.state @@ -5144,7 +5150,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[1].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppListRequest); i { case 0: return &v.state @@ -5156,7 +5162,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[2].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExternalResourceTreeRequest); i { case 0: return &v.state @@ -5168,7 +5174,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[3].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExternalResourceDetail); i { case 0: return &v.state @@ -5180,7 +5186,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[4].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FluxApplicationList); i { case 0: return &v.state @@ -5192,7 +5198,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[5].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FluxApplication); i { case 0: return &v.state @@ -5204,7 +5210,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[6].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FluxAppDetailRequest); i { case 0: return &v.state @@ -5216,7 +5222,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[7].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FluxAppDetail); i { case 0: return &v.state @@ -5228,7 +5234,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[8].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FluxAppStatusDetail); i { case 0: return &v.state @@ -5240,7 +5246,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[9].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeployedAppList); i { case 0: return &v.state @@ -5252,7 +5258,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[10].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeployedAppDetail); i { case 0: return &v.state @@ -5264,7 +5270,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[11].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnvironmentDetails); i { case 0: return &v.state @@ -5276,7 +5282,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[12].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppDetailRequest); i { case 0: return &v.state @@ -5288,7 +5294,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[13].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppDetail); i { case 0: return &v.state @@ -5300,7 +5306,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[14].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppStatus); i { case 0: return &v.state @@ -5312,7 +5318,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[15].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReleaseStatus); i { case 0: return &v.state @@ -5324,7 +5330,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[16].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ChartMetadata); i { case 0: return &v.state @@ -5336,7 +5342,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[17].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceTreeResponse); i { case 0: return &v.state @@ -5348,7 +5354,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[18].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceNode); i { case 0: return &v.state @@ -5360,7 +5366,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[19].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InfoItem); i { case 0: return &v.state @@ -5372,7 +5378,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[20].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HealthStatus); i { case 0: return &v.state @@ -5384,7 +5390,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[21].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceNetworkingInfo); i { case 0: return &v.state @@ -5396,7 +5402,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[22].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceRef); i { case 0: return &v.state @@ -5408,7 +5414,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[23].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PodMetadata); i { case 0: return &v.state @@ -5420,7 +5426,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[24].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EphemeralContainerData); i { case 0: return &v.state @@ -5432,7 +5438,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[25].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HibernateRequest); i { case 0: return &v.state @@ -5444,7 +5450,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[26].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ObjectIdentifier); i { case 0: return &v.state @@ -5456,7 +5462,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[27].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HibernateStatus); i { case 0: return &v.state @@ -5468,7 +5474,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[28].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HibernateResponse); i { case 0: return &v.state @@ -5480,7 +5486,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[29].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HelmAppDeploymentDetail); i { case 0: return &v.state @@ -5492,7 +5498,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[30].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HelmAppDeploymentHistory); i { case 0: return &v.state @@ -5504,7 +5510,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[31].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReleaseInfo); i { case 0: return &v.state @@ -5516,7 +5522,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[32].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ObjectRequest); i { case 0: return &v.state @@ -5528,7 +5534,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[33].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DesiredManifestResponse); i { case 0: return &v.state @@ -5540,7 +5546,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[34].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UninstallReleaseResponse); i { case 0: return &v.state @@ -5552,7 +5558,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[35].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReleaseIdentifier); i { case 0: return &v.state @@ -5564,7 +5570,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[36].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeReleaseRequest); i { case 0: return &v.state @@ -5576,7 +5582,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[37].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeReleaseResponse); i { case 0: return &v.state @@ -5588,7 +5594,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[38].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeploymentDetailRequest); i { case 0: return &v.state @@ -5600,7 +5606,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[39].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeploymentDetailResponse); i { case 0: return &v.state @@ -5612,7 +5618,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[40].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ChartRepository); i { case 0: return &v.state @@ -5624,7 +5630,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[41].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InstallReleaseRequest); i { case 0: return &v.state @@ -5636,7 +5642,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[42].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BulkInstallReleaseRequest); i { case 0: return &v.state @@ -5648,7 +5654,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[43].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InstallReleaseResponse); i { case 0: return &v.state @@ -5660,7 +5666,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[44].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BooleanResponse); i { case 0: return &v.state @@ -5672,7 +5678,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[45].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RollbackReleaseRequest); i { case 0: return &v.state @@ -5684,7 +5690,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[46].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TemplateChartResponse); i { case 0: return &v.state @@ -5696,7 +5702,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[47].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*BulkTemplateChartResponse); i { case 0: return &v.state @@ -5708,7 +5714,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[48].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HelmInstallCustomRequest); i { case 0: return &v.state @@ -5720,7 +5726,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[49].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*HelmInstallCustomResponse); i { case 0: return &v.state @@ -5732,7 +5738,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[50].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ChartContent); i { case 0: return &v.state @@ -5744,7 +5750,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[51].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Gvk); i { case 0: return &v.state @@ -5756,7 +5762,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[52].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceFilter); i { case 0: return &v.state @@ -5768,7 +5774,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[53].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceIdentifier); i { case 0: return &v.state @@ -5780,7 +5786,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[54].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ResourceTreeFilter); i { case 0: return &v.state @@ -5792,7 +5798,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[55].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ChartNotesResponse); i { case 0: return &v.state @@ -5804,7 +5810,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[56].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RegistryCredential); i { case 0: return &v.state @@ -5816,7 +5822,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[57].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoteConnectionConfig); i { case 0: return &v.state @@ -5828,7 +5834,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[58].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProxyConfig); i { case 0: return &v.state @@ -5840,7 +5846,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[59].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SSHTunnelConfig); i { case 0: return &v.state @@ -5852,7 +5858,7 @@ func file_api_helm_app_gRPC_applist_proto_init() { return nil } } - file_api_helm_app_gRPC_applist_proto_msgTypes[60].Exporter = func(v any, i int) any { + file_api_helm_app_gRPC_applist_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*OCIRegistryResponse); i { case 0: return &v.state diff --git a/api/helm-app/gRPC/applist.proto b/api/helm-app/gRPC/applist.proto index 4d36fd07e53..f904a4b25de 100644 --- a/api/helm-app/gRPC/applist.proto +++ b/api/helm-app/gRPC/applist.proto @@ -44,6 +44,7 @@ service ApplicationService { rpc ValidateOCIRegistry(RegistryCredential) returns(OCIRegistryResponse) {} rpc GetResourceTreeForExternalResources(ExternalResourceTreeRequest) returns(ResourceTreeResponse){} rpc GetFluxAppDetail(FluxAppDetailRequest)returns(FluxAppDetail){} + rpc GetReleaseDetails(ReleaseIdentifier)returns(DeployedAppDetail){} } diff --git a/api/helm-app/gRPC/applist_grpc.pb.go b/api/helm-app/gRPC/applist_grpc.pb.go index 3c8416b9aee..ee3364edc50 100644 --- a/api/helm-app/gRPC/applist_grpc.pb.go +++ b/api/helm-app/gRPC/applist_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 +// - protoc-gen-go-grpc v1.3.0 // - protoc v3.9.1 // source: api/helm-app/gRPC/applist.proto @@ -18,6 +18,34 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + ApplicationService_ListApplications_FullMethodName = "/ApplicationService/ListApplications" + ApplicationService_ListFluxApplications_FullMethodName = "/ApplicationService/ListFluxApplications" + ApplicationService_GetAppDetail_FullMethodName = "/ApplicationService/GetAppDetail" + ApplicationService_GetAppStatus_FullMethodName = "/ApplicationService/GetAppStatus" + ApplicationService_Hibernate_FullMethodName = "/ApplicationService/Hibernate" + ApplicationService_UnHibernate_FullMethodName = "/ApplicationService/UnHibernate" + ApplicationService_GetDeploymentHistory_FullMethodName = "/ApplicationService/GetDeploymentHistory" + ApplicationService_GetValuesYaml_FullMethodName = "/ApplicationService/GetValuesYaml" + ApplicationService_GetDesiredManifest_FullMethodName = "/ApplicationService/GetDesiredManifest" + ApplicationService_UninstallRelease_FullMethodName = "/ApplicationService/UninstallRelease" + ApplicationService_UpgradeRelease_FullMethodName = "/ApplicationService/UpgradeRelease" + ApplicationService_GetDeploymentDetail_FullMethodName = "/ApplicationService/GetDeploymentDetail" + ApplicationService_InstallRelease_FullMethodName = "/ApplicationService/InstallRelease" + ApplicationService_UpgradeReleaseWithChartInfo_FullMethodName = "/ApplicationService/UpgradeReleaseWithChartInfo" + ApplicationService_IsReleaseInstalled_FullMethodName = "/ApplicationService/IsReleaseInstalled" + ApplicationService_RollbackRelease_FullMethodName = "/ApplicationService/RollbackRelease" + ApplicationService_TemplateChart_FullMethodName = "/ApplicationService/TemplateChart" + ApplicationService_TemplateChartBulk_FullMethodName = "/ApplicationService/TemplateChartBulk" + ApplicationService_InstallReleaseWithCustomChart_FullMethodName = "/ApplicationService/InstallReleaseWithCustomChart" + ApplicationService_GetNotes_FullMethodName = "/ApplicationService/GetNotes" + ApplicationService_UpgradeReleaseWithCustomChart_FullMethodName = "/ApplicationService/UpgradeReleaseWithCustomChart" + ApplicationService_ValidateOCIRegistry_FullMethodName = "/ApplicationService/ValidateOCIRegistry" + ApplicationService_GetResourceTreeForExternalResources_FullMethodName = "/ApplicationService/GetResourceTreeForExternalResources" + ApplicationService_GetFluxAppDetail_FullMethodName = "/ApplicationService/GetFluxAppDetail" + ApplicationService_GetReleaseDetails_FullMethodName = "/ApplicationService/GetReleaseDetails" +) + // ApplicationServiceClient is the client API for ApplicationService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -46,6 +74,7 @@ type ApplicationServiceClient interface { ValidateOCIRegistry(ctx context.Context, in *RegistryCredential, opts ...grpc.CallOption) (*OCIRegistryResponse, error) GetResourceTreeForExternalResources(ctx context.Context, in *ExternalResourceTreeRequest, opts ...grpc.CallOption) (*ResourceTreeResponse, error) GetFluxAppDetail(ctx context.Context, in *FluxAppDetailRequest, opts ...grpc.CallOption) (*FluxAppDetail, error) + GetReleaseDetails(ctx context.Context, in *ReleaseIdentifier, opts ...grpc.CallOption) (*DeployedAppDetail, error) } type applicationServiceClient struct { @@ -57,7 +86,7 @@ func NewApplicationServiceClient(cc grpc.ClientConnInterface) ApplicationService } func (c *applicationServiceClient) ListApplications(ctx context.Context, in *AppListRequest, opts ...grpc.CallOption) (ApplicationService_ListApplicationsClient, error) { - stream, err := c.cc.NewStream(ctx, &ApplicationService_ServiceDesc.Streams[0], "/ApplicationService/ListApplications", opts...) + stream, err := c.cc.NewStream(ctx, &ApplicationService_ServiceDesc.Streams[0], ApplicationService_ListApplications_FullMethodName, opts...) if err != nil { return nil, err } @@ -89,7 +118,7 @@ func (x *applicationServiceListApplicationsClient) Recv() (*DeployedAppList, err } func (c *applicationServiceClient) ListFluxApplications(ctx context.Context, in *AppListRequest, opts ...grpc.CallOption) (ApplicationService_ListFluxApplicationsClient, error) { - stream, err := c.cc.NewStream(ctx, &ApplicationService_ServiceDesc.Streams[1], "/ApplicationService/ListFluxApplications", opts...) + stream, err := c.cc.NewStream(ctx, &ApplicationService_ServiceDesc.Streams[1], ApplicationService_ListFluxApplications_FullMethodName, opts...) if err != nil { return nil, err } @@ -122,7 +151,7 @@ func (x *applicationServiceListFluxApplicationsClient) Recv() (*FluxApplicationL func (c *applicationServiceClient) GetAppDetail(ctx context.Context, in *AppDetailRequest, opts ...grpc.CallOption) (*AppDetail, error) { out := new(AppDetail) - err := c.cc.Invoke(ctx, "/ApplicationService/GetAppDetail", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetAppDetail_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -131,7 +160,7 @@ func (c *applicationServiceClient) GetAppDetail(ctx context.Context, in *AppDeta func (c *applicationServiceClient) GetAppStatus(ctx context.Context, in *AppDetailRequest, opts ...grpc.CallOption) (*AppStatus, error) { out := new(AppStatus) - err := c.cc.Invoke(ctx, "/ApplicationService/GetAppStatus", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetAppStatus_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -140,7 +169,7 @@ func (c *applicationServiceClient) GetAppStatus(ctx context.Context, in *AppDeta func (c *applicationServiceClient) Hibernate(ctx context.Context, in *HibernateRequest, opts ...grpc.CallOption) (*HibernateResponse, error) { out := new(HibernateResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/Hibernate", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_Hibernate_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -149,7 +178,7 @@ func (c *applicationServiceClient) Hibernate(ctx context.Context, in *HibernateR func (c *applicationServiceClient) UnHibernate(ctx context.Context, in *HibernateRequest, opts ...grpc.CallOption) (*HibernateResponse, error) { out := new(HibernateResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/UnHibernate", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_UnHibernate_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -158,7 +187,7 @@ func (c *applicationServiceClient) UnHibernate(ctx context.Context, in *Hibernat func (c *applicationServiceClient) GetDeploymentHistory(ctx context.Context, in *AppDetailRequest, opts ...grpc.CallOption) (*HelmAppDeploymentHistory, error) { out := new(HelmAppDeploymentHistory) - err := c.cc.Invoke(ctx, "/ApplicationService/GetDeploymentHistory", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetDeploymentHistory_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -167,7 +196,7 @@ func (c *applicationServiceClient) GetDeploymentHistory(ctx context.Context, in func (c *applicationServiceClient) GetValuesYaml(ctx context.Context, in *AppDetailRequest, opts ...grpc.CallOption) (*ReleaseInfo, error) { out := new(ReleaseInfo) - err := c.cc.Invoke(ctx, "/ApplicationService/GetValuesYaml", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetValuesYaml_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -176,7 +205,7 @@ func (c *applicationServiceClient) GetValuesYaml(ctx context.Context, in *AppDet func (c *applicationServiceClient) GetDesiredManifest(ctx context.Context, in *ObjectRequest, opts ...grpc.CallOption) (*DesiredManifestResponse, error) { out := new(DesiredManifestResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/GetDesiredManifest", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetDesiredManifest_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -185,7 +214,7 @@ func (c *applicationServiceClient) GetDesiredManifest(ctx context.Context, in *O func (c *applicationServiceClient) UninstallRelease(ctx context.Context, in *ReleaseIdentifier, opts ...grpc.CallOption) (*UninstallReleaseResponse, error) { out := new(UninstallReleaseResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/UninstallRelease", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_UninstallRelease_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -194,7 +223,7 @@ func (c *applicationServiceClient) UninstallRelease(ctx context.Context, in *Rel func (c *applicationServiceClient) UpgradeRelease(ctx context.Context, in *UpgradeReleaseRequest, opts ...grpc.CallOption) (*UpgradeReleaseResponse, error) { out := new(UpgradeReleaseResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/UpgradeRelease", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_UpgradeRelease_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -203,7 +232,7 @@ func (c *applicationServiceClient) UpgradeRelease(ctx context.Context, in *Upgra func (c *applicationServiceClient) GetDeploymentDetail(ctx context.Context, in *DeploymentDetailRequest, opts ...grpc.CallOption) (*DeploymentDetailResponse, error) { out := new(DeploymentDetailResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/GetDeploymentDetail", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetDeploymentDetail_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -212,7 +241,7 @@ func (c *applicationServiceClient) GetDeploymentDetail(ctx context.Context, in * func (c *applicationServiceClient) InstallRelease(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*InstallReleaseResponse, error) { out := new(InstallReleaseResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/InstallRelease", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_InstallRelease_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -221,7 +250,7 @@ func (c *applicationServiceClient) InstallRelease(ctx context.Context, in *Insta func (c *applicationServiceClient) UpgradeReleaseWithChartInfo(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*UpgradeReleaseResponse, error) { out := new(UpgradeReleaseResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/UpgradeReleaseWithChartInfo", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_UpgradeReleaseWithChartInfo_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -230,7 +259,7 @@ func (c *applicationServiceClient) UpgradeReleaseWithChartInfo(ctx context.Conte func (c *applicationServiceClient) IsReleaseInstalled(ctx context.Context, in *ReleaseIdentifier, opts ...grpc.CallOption) (*BooleanResponse, error) { out := new(BooleanResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/IsReleaseInstalled", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_IsReleaseInstalled_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -239,7 +268,7 @@ func (c *applicationServiceClient) IsReleaseInstalled(ctx context.Context, in *R func (c *applicationServiceClient) RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*BooleanResponse, error) { out := new(BooleanResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/RollbackRelease", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_RollbackRelease_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -248,7 +277,7 @@ func (c *applicationServiceClient) RollbackRelease(ctx context.Context, in *Roll func (c *applicationServiceClient) TemplateChart(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*TemplateChartResponse, error) { out := new(TemplateChartResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/TemplateChart", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_TemplateChart_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -257,7 +286,7 @@ func (c *applicationServiceClient) TemplateChart(ctx context.Context, in *Instal func (c *applicationServiceClient) TemplateChartBulk(ctx context.Context, in *BulkInstallReleaseRequest, opts ...grpc.CallOption) (*BulkTemplateChartResponse, error) { out := new(BulkTemplateChartResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/TemplateChartBulk", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_TemplateChartBulk_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -266,7 +295,7 @@ func (c *applicationServiceClient) TemplateChartBulk(ctx context.Context, in *Bu func (c *applicationServiceClient) InstallReleaseWithCustomChart(ctx context.Context, in *HelmInstallCustomRequest, opts ...grpc.CallOption) (*HelmInstallCustomResponse, error) { out := new(HelmInstallCustomResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/InstallReleaseWithCustomChart", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_InstallReleaseWithCustomChart_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -275,7 +304,7 @@ func (c *applicationServiceClient) InstallReleaseWithCustomChart(ctx context.Con func (c *applicationServiceClient) GetNotes(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*ChartNotesResponse, error) { out := new(ChartNotesResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/GetNotes", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetNotes_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -284,7 +313,7 @@ func (c *applicationServiceClient) GetNotes(ctx context.Context, in *InstallRele func (c *applicationServiceClient) UpgradeReleaseWithCustomChart(ctx context.Context, in *UpgradeReleaseRequest, opts ...grpc.CallOption) (*UpgradeReleaseResponse, error) { out := new(UpgradeReleaseResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/UpgradeReleaseWithCustomChart", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_UpgradeReleaseWithCustomChart_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -293,7 +322,7 @@ func (c *applicationServiceClient) UpgradeReleaseWithCustomChart(ctx context.Con func (c *applicationServiceClient) ValidateOCIRegistry(ctx context.Context, in *RegistryCredential, opts ...grpc.CallOption) (*OCIRegistryResponse, error) { out := new(OCIRegistryResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/ValidateOCIRegistry", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_ValidateOCIRegistry_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -302,7 +331,7 @@ func (c *applicationServiceClient) ValidateOCIRegistry(ctx context.Context, in * func (c *applicationServiceClient) GetResourceTreeForExternalResources(ctx context.Context, in *ExternalResourceTreeRequest, opts ...grpc.CallOption) (*ResourceTreeResponse, error) { out := new(ResourceTreeResponse) - err := c.cc.Invoke(ctx, "/ApplicationService/GetResourceTreeForExternalResources", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetResourceTreeForExternalResources_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -311,7 +340,16 @@ func (c *applicationServiceClient) GetResourceTreeForExternalResources(ctx conte func (c *applicationServiceClient) GetFluxAppDetail(ctx context.Context, in *FluxAppDetailRequest, opts ...grpc.CallOption) (*FluxAppDetail, error) { out := new(FluxAppDetail) - err := c.cc.Invoke(ctx, "/ApplicationService/GetFluxAppDetail", in, out, opts...) + err := c.cc.Invoke(ctx, ApplicationService_GetFluxAppDetail_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *applicationServiceClient) GetReleaseDetails(ctx context.Context, in *ReleaseIdentifier, opts ...grpc.CallOption) (*DeployedAppDetail, error) { + out := new(DeployedAppDetail) + err := c.cc.Invoke(ctx, ApplicationService_GetReleaseDetails_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -346,6 +384,7 @@ type ApplicationServiceServer interface { ValidateOCIRegistry(context.Context, *RegistryCredential) (*OCIRegistryResponse, error) GetResourceTreeForExternalResources(context.Context, *ExternalResourceTreeRequest) (*ResourceTreeResponse, error) GetFluxAppDetail(context.Context, *FluxAppDetailRequest) (*FluxAppDetail, error) + GetReleaseDetails(context.Context, *ReleaseIdentifier) (*DeployedAppDetail, error) mustEmbedUnimplementedApplicationServiceServer() } @@ -425,6 +464,9 @@ func (UnimplementedApplicationServiceServer) GetResourceTreeForExternalResources func (UnimplementedApplicationServiceServer) GetFluxAppDetail(context.Context, *FluxAppDetailRequest) (*FluxAppDetail, error) { return nil, status.Errorf(codes.Unimplemented, "method GetFluxAppDetail not implemented") } +func (UnimplementedApplicationServiceServer) GetReleaseDetails(context.Context, *ReleaseIdentifier) (*DeployedAppDetail, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetReleaseDetails not implemented") +} func (UnimplementedApplicationServiceServer) mustEmbedUnimplementedApplicationServiceServer() {} // UnsafeApplicationServiceServer may be embedded to opt out of forward compatibility for this service. @@ -490,7 +532,7 @@ func _ApplicationService_GetAppDetail_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetAppDetail", + FullMethod: ApplicationService_GetAppDetail_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetAppDetail(ctx, req.(*AppDetailRequest)) @@ -508,7 +550,7 @@ func _ApplicationService_GetAppStatus_Handler(srv interface{}, ctx context.Conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetAppStatus", + FullMethod: ApplicationService_GetAppStatus_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetAppStatus(ctx, req.(*AppDetailRequest)) @@ -526,7 +568,7 @@ func _ApplicationService_Hibernate_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/Hibernate", + FullMethod: ApplicationService_Hibernate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).Hibernate(ctx, req.(*HibernateRequest)) @@ -544,7 +586,7 @@ func _ApplicationService_UnHibernate_Handler(srv interface{}, ctx context.Contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/UnHibernate", + FullMethod: ApplicationService_UnHibernate_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).UnHibernate(ctx, req.(*HibernateRequest)) @@ -562,7 +604,7 @@ func _ApplicationService_GetDeploymentHistory_Handler(srv interface{}, ctx conte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetDeploymentHistory", + FullMethod: ApplicationService_GetDeploymentHistory_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetDeploymentHistory(ctx, req.(*AppDetailRequest)) @@ -580,7 +622,7 @@ func _ApplicationService_GetValuesYaml_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetValuesYaml", + FullMethod: ApplicationService_GetValuesYaml_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetValuesYaml(ctx, req.(*AppDetailRequest)) @@ -598,7 +640,7 @@ func _ApplicationService_GetDesiredManifest_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetDesiredManifest", + FullMethod: ApplicationService_GetDesiredManifest_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetDesiredManifest(ctx, req.(*ObjectRequest)) @@ -616,7 +658,7 @@ func _ApplicationService_UninstallRelease_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/UninstallRelease", + FullMethod: ApplicationService_UninstallRelease_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).UninstallRelease(ctx, req.(*ReleaseIdentifier)) @@ -634,7 +676,7 @@ func _ApplicationService_UpgradeRelease_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/UpgradeRelease", + FullMethod: ApplicationService_UpgradeRelease_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).UpgradeRelease(ctx, req.(*UpgradeReleaseRequest)) @@ -652,7 +694,7 @@ func _ApplicationService_GetDeploymentDetail_Handler(srv interface{}, ctx contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetDeploymentDetail", + FullMethod: ApplicationService_GetDeploymentDetail_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetDeploymentDetail(ctx, req.(*DeploymentDetailRequest)) @@ -670,7 +712,7 @@ func _ApplicationService_InstallRelease_Handler(srv interface{}, ctx context.Con } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/InstallRelease", + FullMethod: ApplicationService_InstallRelease_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).InstallRelease(ctx, req.(*InstallReleaseRequest)) @@ -688,7 +730,7 @@ func _ApplicationService_UpgradeReleaseWithChartInfo_Handler(srv interface{}, ct } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/UpgradeReleaseWithChartInfo", + FullMethod: ApplicationService_UpgradeReleaseWithChartInfo_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).UpgradeReleaseWithChartInfo(ctx, req.(*InstallReleaseRequest)) @@ -706,7 +748,7 @@ func _ApplicationService_IsReleaseInstalled_Handler(srv interface{}, ctx context } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/IsReleaseInstalled", + FullMethod: ApplicationService_IsReleaseInstalled_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).IsReleaseInstalled(ctx, req.(*ReleaseIdentifier)) @@ -724,7 +766,7 @@ func _ApplicationService_RollbackRelease_Handler(srv interface{}, ctx context.Co } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/RollbackRelease", + FullMethod: ApplicationService_RollbackRelease_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).RollbackRelease(ctx, req.(*RollbackReleaseRequest)) @@ -742,7 +784,7 @@ func _ApplicationService_TemplateChart_Handler(srv interface{}, ctx context.Cont } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/TemplateChart", + FullMethod: ApplicationService_TemplateChart_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).TemplateChart(ctx, req.(*InstallReleaseRequest)) @@ -760,7 +802,7 @@ func _ApplicationService_TemplateChartBulk_Handler(srv interface{}, ctx context. } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/TemplateChartBulk", + FullMethod: ApplicationService_TemplateChartBulk_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).TemplateChartBulk(ctx, req.(*BulkInstallReleaseRequest)) @@ -778,7 +820,7 @@ func _ApplicationService_InstallReleaseWithCustomChart_Handler(srv interface{}, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/InstallReleaseWithCustomChart", + FullMethod: ApplicationService_InstallReleaseWithCustomChart_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).InstallReleaseWithCustomChart(ctx, req.(*HelmInstallCustomRequest)) @@ -796,7 +838,7 @@ func _ApplicationService_GetNotes_Handler(srv interface{}, ctx context.Context, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetNotes", + FullMethod: ApplicationService_GetNotes_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetNotes(ctx, req.(*InstallReleaseRequest)) @@ -814,7 +856,7 @@ func _ApplicationService_UpgradeReleaseWithCustomChart_Handler(srv interface{}, } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/UpgradeReleaseWithCustomChart", + FullMethod: ApplicationService_UpgradeReleaseWithCustomChart_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).UpgradeReleaseWithCustomChart(ctx, req.(*UpgradeReleaseRequest)) @@ -832,7 +874,7 @@ func _ApplicationService_ValidateOCIRegistry_Handler(srv interface{}, ctx contex } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/ValidateOCIRegistry", + FullMethod: ApplicationService_ValidateOCIRegistry_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).ValidateOCIRegistry(ctx, req.(*RegistryCredential)) @@ -850,7 +892,7 @@ func _ApplicationService_GetResourceTreeForExternalResources_Handler(srv interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetResourceTreeForExternalResources", + FullMethod: ApplicationService_GetResourceTreeForExternalResources_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetResourceTreeForExternalResources(ctx, req.(*ExternalResourceTreeRequest)) @@ -868,7 +910,7 @@ func _ApplicationService_GetFluxAppDetail_Handler(srv interface{}, ctx context.C } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ApplicationService/GetFluxAppDetail", + FullMethod: ApplicationService_GetFluxAppDetail_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ApplicationServiceServer).GetFluxAppDetail(ctx, req.(*FluxAppDetailRequest)) @@ -876,6 +918,24 @@ func _ApplicationService_GetFluxAppDetail_Handler(srv interface{}, ctx context.C return interceptor(ctx, in, info, handler) } +func _ApplicationService_GetReleaseDetails_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseIdentifier) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApplicationServiceServer).GetReleaseDetails(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ApplicationService_GetReleaseDetails_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApplicationServiceServer).GetReleaseDetails(ctx, req.(*ReleaseIdentifier)) + } + return interceptor(ctx, in, info, handler) +} + // ApplicationService_ServiceDesc is the grpc.ServiceDesc for ApplicationService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -971,6 +1031,10 @@ var ApplicationService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetFluxAppDetail", Handler: _ApplicationService_GetFluxAppDetail_Handler, }, + { + MethodName: "GetReleaseDetails", + Handler: _ApplicationService_GetReleaseDetails_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/api/helm-app/service/HelmAppService.go b/api/helm-app/service/HelmAppService.go index b9a7afdf664..f0ea1411e86 100644 --- a/api/helm-app/service/HelmAppService.go +++ b/api/helm-app/service/HelmAppService.go @@ -1136,7 +1136,7 @@ func (impl *HelmAppServiceImpl) appListRespProtoTransformer(deployedApps *gRPC.D // do not add app in the list which are created using cd_pipelines (check combination of clusterId, namespace, releaseName) var toExcludeFromList bool for _, helmCdPipeline := range helmCdPipelines { - helmAppReleaseName := util2.BuildDeployedAppName(helmCdPipeline.App.AppName, helmCdPipeline.Environment.Name) + helmAppReleaseName := helmCdPipeline.DeploymentAppName if deployedapp.AppName == helmAppReleaseName && int(deployedapp.EnvironmentDetail.ClusterId) == helmCdPipeline.Environment.ClusterId && deployedapp.EnvironmentDetail.Namespace == helmCdPipeline.Environment.Namespace { toExcludeFromList = true break diff --git a/api/restHandler/DeploymentConfigurationRestHandler.go b/api/restHandler/DeploymentConfigurationRestHandler.go new file mode 100644 index 00000000000..a29776a6b65 --- /dev/null +++ b/api/restHandler/DeploymentConfigurationRestHandler.go @@ -0,0 +1,135 @@ +package restHandler + +import ( + "fmt" + "github.com/devtron-labs/devtron/api/restHandler/common" + "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" + "github.com/devtron-labs/devtron/pkg/auth/user" + "github.com/devtron-labs/devtron/pkg/configDiff" + "github.com/devtron-labs/devtron/pkg/configDiff/bean" + "github.com/devtron-labs/devtron/util/rbac" + "github.com/gorilla/schema" + "go.uber.org/zap" + "gopkg.in/go-playground/validator.v9" + "net/http" +) + +type DeploymentConfigurationRestHandler interface { + ConfigAutoComplete(w http.ResponseWriter, r *http.Request) + GetConfigData(w http.ResponseWriter, r *http.Request) +} +type DeploymentConfigurationRestHandlerImpl struct { + logger *zap.SugaredLogger + userAuthService user.UserService + validator *validator.Validate + enforcerUtil rbac.EnforcerUtil + deploymentConfigurationService configDiff.DeploymentConfigurationService + enforcer casbin.Enforcer +} + +func NewDeploymentConfigurationRestHandlerImpl(logger *zap.SugaredLogger, + userAuthService user.UserService, + enforcerUtil rbac.EnforcerUtil, + deploymentConfigurationService configDiff.DeploymentConfigurationService, + enforcer casbin.Enforcer, +) *DeploymentConfigurationRestHandlerImpl { + return &DeploymentConfigurationRestHandlerImpl{ + logger: logger, + userAuthService: userAuthService, + enforcerUtil: enforcerUtil, + deploymentConfigurationService: deploymentConfigurationService, + enforcer: enforcer, + } +} + +func (handler *DeploymentConfigurationRestHandlerImpl) ConfigAutoComplete(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userAuthService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + appId, err := common.ExtractIntQueryParam(w, r, "appId", 0) + if err != nil { + return + } + envId, err := common.ExtractIntQueryParam(w, r, "envId", 0) + if err != nil { + return + } + + //RBAC START + token := r.Header.Get(common.TokenHeaderKey) + object := handler.enforcerUtil.GetAppRBACNameByAppId(appId) + ok := handler.enforcerUtil.CheckAppRbacForAppOrJob(token, object, casbin.ActionGet) + if !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), nil, http.StatusForbidden) + return + } + //RBAC END + + res, err := handler.deploymentConfigurationService.ConfigAutoComplete(appId, envId) + if err != nil { + handler.logger.Errorw("service err, ConfigAutoComplete ", "appId", appId, "envId", envId, "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, err, res, http.StatusOK) +} + +func (handler *DeploymentConfigurationRestHandlerImpl) GetConfigData(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userAuthService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + configDataQueryParams, err := getConfigDataQueryParams(r) + if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + + //RBAC START + token := r.Header.Get(common.TokenHeaderKey) + object := handler.enforcerUtil.GetAppRBACName(configDataQueryParams.AppName) + ok := handler.enforcerUtil.CheckAppRbacForAppOrJob(token, object, casbin.ActionGet) + if !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), nil, http.StatusForbidden) + return + } + //RBAC END + + res, err := handler.deploymentConfigurationService.GetAllConfigData(r.Context(), configDataQueryParams) + if err != nil { + handler.logger.Errorw("service err, GetAllConfigData ", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + res.IsAppAdmin = handler.enforceForAppAndEnv(configDataQueryParams.AppName, configDataQueryParams.EnvName, token, casbin.ActionUpdate) + + common.WriteJsonResp(w, nil, res, http.StatusOK) +} + +func (handler *DeploymentConfigurationRestHandlerImpl) enforceForAppAndEnv(appName, envName string, token string, action string) bool { + object := handler.enforcerUtil.GetAppRBACNameByAppName(appName) + if ok := handler.enforcer.Enforce(token, casbin.ResourceApplications, action, object); !ok { + return false + } + + object = handler.enforcerUtil.GetEnvRBACNameByAppAndEnvName(appName, envName) + if ok := handler.enforcer.Enforce(token, casbin.ResourceEnvironment, action, object); !ok { + return false + } + return true +} +func getConfigDataQueryParams(r *http.Request) (*bean.ConfigDataQueryParams, error) { + v := r.URL.Query() + var decoder = schema.NewDecoder() + decoder.IgnoreUnknownKeys(true) + queryParams := bean.ConfigDataQueryParams{} + err := decoder.Decode(&queryParams, v) + if err != nil { + return nil, err + } + + return &queryParams, nil +} diff --git a/api/restHandler/ImageScanRestHandler.go b/api/restHandler/ImageScanRestHandler.go index 90b68a88981..eded4a3de4d 100644 --- a/api/restHandler/ImageScanRestHandler.go +++ b/api/restHandler/ImageScanRestHandler.go @@ -19,6 +19,7 @@ package restHandler import ( "encoding/json" "fmt" + securityBean "github.com/devtron-labs/devtron/pkg/security/bean" "net/http" "strconv" @@ -70,7 +71,7 @@ func (impl ImageScanRestHandlerImpl) ScanExecutionList(w http.ResponseWriter, r } decoder := json.NewDecoder(r.Body) - var request *security.ImageScanRequest + var request *securityBean.ImageScanRequest err = decoder.Decode(&request) if err != nil { impl.logger.Errorw("request err, ScanExecutionList", "err", err, "payload", request) @@ -82,8 +83,8 @@ func (impl ImageScanRestHandlerImpl) ScanExecutionList(w http.ResponseWriter, r if err != nil { impl.logger.Errorw("service err, ScanExecutionList", "err", err, "payload", request) if util.IsErrNoRows(err) { - responseList := make([]*security.ImageScanHistoryResponse, 0) - common.WriteJsonResp(w, nil, &security.ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, http.StatusOK) + responseList := make([]*securityBean.ImageScanHistoryResponse, 0) + common.WriteJsonResp(w, nil, &securityBean.ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, http.StatusOK) } else { common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) } @@ -126,8 +127,8 @@ func (impl ImageScanRestHandlerImpl) ScanExecutionList(w http.ResponseWriter, r if err != nil { impl.logger.Errorw("service err, ScanExecutionList", "err", err, "payload", request) if util.IsErrNoRows(err) { - responseList := make([]*security.ImageScanHistoryResponse, 0) - common.WriteJsonResp(w, nil, &security.ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, http.StatusOK) + responseList := make([]*securityBean.ImageScanHistoryResponse, 0) + common.WriteJsonResp(w, nil, &securityBean.ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, http.StatusOK) } else { common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) } @@ -177,7 +178,7 @@ func (impl ImageScanRestHandlerImpl) FetchExecutionDetail(w http.ResponseWriter, } } image := v.Get("image") - request := &security.ImageScanRequest{ + request := &securityBean.ImageScanRequest{ ImageScanDeployInfoId: imageScanDeployInfoId, Image: image, ArtifactId: artifactId, @@ -189,7 +190,7 @@ func (impl ImageScanRestHandlerImpl) FetchExecutionDetail(w http.ResponseWriter, if err != nil { impl.logger.Errorw("service err, FetchExecutionDetail", "err", err, "payload", request) if util.IsErrNoRows(err) { - common.WriteJsonResp(w, nil, &security.ImageScanExecutionDetail{}, http.StatusOK) + common.WriteJsonResp(w, nil, &securityBean.ImageScanExecutionDetail{}, http.StatusOK) } else { common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) } @@ -221,7 +222,7 @@ func (impl ImageScanRestHandlerImpl) FetchExecutionDetail(w http.ResponseWriter, } //RBAC } else { - common.WriteJsonResp(w, err, &security.ImageScanExecutionDetail{}, http.StatusOK) + common.WriteJsonResp(w, err, &securityBean.ImageScanExecutionDetail{}, http.StatusOK) } common.WriteJsonResp(w, err, executionDetail, http.StatusOK) @@ -230,7 +231,7 @@ func (impl ImageScanRestHandlerImpl) FetchExecutionDetail(w http.ResponseWriter, func (impl ImageScanRestHandlerImpl) FetchMinScanResultByAppIdAndEnvId(w http.ResponseWriter, r *http.Request) { v := r.URL.Query() var appId, envId int - request := &security.ImageScanRequest{} + request := &securityBean.ImageScanRequest{} appIds := v.Get("appId") if len(appIds) > 0 { appId, err := strconv.Atoi(appIds) @@ -299,8 +300,8 @@ func (impl ImageScanRestHandlerImpl) VulnerabilityExposure(w http.ResponseWriter if err != nil { impl.logger.Errorw("service err, VulnerabilityExposure", "err", err, "payload", request) if util.IsErrNoRows(err) { - responseList := make([]*security.ImageScanHistoryResponse, 0) - common.WriteJsonResp(w, nil, &security.ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, http.StatusOK) + responseList := make([]*securityBean.ImageScanHistoryResponse, 0) + common.WriteJsonResp(w, nil, &securityBean.ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, http.StatusOK) } else { common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) } diff --git a/api/restHandler/PolicyRestHandler.go b/api/restHandler/PolicyRestHandler.go index fdbc7e9982a..6dce4c30371 100644 --- a/api/restHandler/PolicyRestHandler.go +++ b/api/restHandler/PolicyRestHandler.go @@ -20,12 +20,12 @@ import ( "encoding/json" "errors" "fmt" + securityBean "github.com/devtron-labs/devtron/internal/sql/repository/security/bean" "net/http" "strconv" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/api/restHandler/common" - security2 "github.com/devtron-labs/devtron/internal/sql/repository/security" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" user2 "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/cluster" @@ -221,18 +221,18 @@ func (impl PolicyRestHandlerImpl) GetPolicy(w http.ResponseWriter, r *http.Reque req.Id = ids } var clusterId, environmentId, appId int - var policyLevel security2.PolicyLevel - if level == security2.Global.String() { - policyLevel = security2.Global - } else if level == security2.Cluster.String() { + var policyLevel securityBean.PolicyLevel + if level == securityBean.Global.String() { + policyLevel = securityBean.Global + } else if level == securityBean.Cluster.String() { clusterId = req.Id - policyLevel = security2.Cluster - } else if level == security2.Environment.String() { + policyLevel = securityBean.Cluster + } else if level == securityBean.Environment.String() { environmentId = req.Id - policyLevel = security2.Environment - } else if level == security2.Application.String() { + policyLevel = securityBean.Environment + } else if level == securityBean.Application.String() { appId = req.Id - policyLevel = security2.Application + policyLevel = securityBean.Application } token := r.Header.Get("token") diff --git a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go index 646e5ade7a3..2b6608822d0 100644 --- a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go @@ -207,6 +207,11 @@ func (handler *PipelineConfigRestHandlerImpl) CreateCdPipeline(w http.ResponseWr handler.Logger.Infow("request payload, CreateCdPipeline", "payload", cdPipeline) userUploaded, err := handler.chartService.CheckIfChartRefUserUploadedByAppId(cdPipeline.AppId) if !userUploaded { + for i, p := range cdPipeline.Pipelines { + if len(p.ReleaseMode) == 0 { + cdPipeline.Pipelines[i].ReleaseMode = util.PIPELINE_RELEASE_MODE_CREATE + } + } err = handler.validator.Struct(cdPipeline) if err != nil { handler.Logger.Errorw("validation err, CreateCdPipeline", "err", err, "payload", cdPipeline) diff --git a/api/router/DeploymentConfigRouter.go b/api/router/DeploymentConfigRouter.go new file mode 100644 index 00000000000..a8a568d6046 --- /dev/null +++ b/api/router/DeploymentConfigRouter.go @@ -0,0 +1,31 @@ +package router + +import ( + "github.com/devtron-labs/devtron/api/restHandler" + "github.com/gorilla/mux" +) + +type DeploymentConfigurationRouter interface { + initDeploymentConfigurationRouter(configRouter *mux.Router) +} + +type DeploymentConfigurationRouterImpl struct { + deploymentGroupRestHandler restHandler.DeploymentConfigurationRestHandler +} + +func NewDeploymentConfigurationRouter(deploymentGroupRestHandler restHandler.DeploymentConfigurationRestHandler) *DeploymentConfigurationRouterImpl { + router := &DeploymentConfigurationRouterImpl{ + deploymentGroupRestHandler: deploymentGroupRestHandler, + } + return router +} + +func (router DeploymentConfigurationRouterImpl) initDeploymentConfigurationRouter(configRouter *mux.Router) { + configRouter.Path("/autocomplete"). + HandlerFunc(router.deploymentGroupRestHandler.ConfigAutoComplete). + Methods("GET") + configRouter.Path("/data"). + HandlerFunc(router.deploymentGroupRestHandler.GetConfigData). + Methods("GET") + +} diff --git a/api/router/router.go b/api/router/router.go index 8902c53cafd..cbee85a928a 100644 --- a/api/router/router.go +++ b/api/router/router.go @@ -114,6 +114,7 @@ type MuxRouter struct { rbacRoleRouter user.RbacRoleRouter scopedVariableRouter ScopedVariableRouter ciTriggerCron cron.CiTriggerCron + deploymentConfigurationRouter DeploymentConfigurationRouter infraConfigRouter infraConfig.InfraConfigRouter argoApplicationRouter argoApplication.ArgoApplicationRouter fluxApplicationRouter fluxApplication2.FluxApplicationRouter @@ -146,6 +147,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, scopedVariableRouter ScopedVariableRouter, ciTriggerCron cron.CiTriggerCron, proxyRouter proxy.ProxyRouter, + deploymentConfigurationRouter DeploymentConfigurationRouter, infraConfigRouter infraConfig.InfraConfigRouter, argoApplicationRouter argoApplication.ArgoApplicationRouter, devtronResourceRouter devtronResource.DevtronResourceRouter, @@ -210,6 +212,7 @@ func NewMuxRouter(logger *zap.SugaredLogger, rbacRoleRouter: rbacRoleRouter, scopedVariableRouter: scopedVariableRouter, ciTriggerCron: ciTriggerCron, + deploymentConfigurationRouter: deploymentConfigurationRouter, infraConfigRouter: infraConfigRouter, argoApplicationRouter: argoApplicationRouter, devtronResourceRouter: devtronResourceRouter, @@ -293,8 +296,9 @@ func (r MuxRouter) Init() { chartRefRouter := r.Router.PathPrefix("/orchestrator/chartref").Subrouter() r.ChartRefRouter.initChartRefRouter(chartRefRouter) - configMapRouter := r.Router.PathPrefix("/orchestrator/config").Subrouter() - r.ConfigMapRouter.initConfigMapRouter(configMapRouter) + configRouter := r.Router.PathPrefix("/orchestrator/config").Subrouter() + r.ConfigMapRouter.initConfigMapRouter(configRouter) + r.deploymentConfigurationRouter.initDeploymentConfigurationRouter(configRouter) appStoreRouter := r.Router.PathPrefix("/orchestrator/app-store").Subrouter() r.AppStoreRouter.Init(appStoreRouter) diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index 72533cab867..23dbbb83014 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run -mod=mod github.com/google/wire/cmd/wire +//go:generate go run github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject diff --git a/go.mod b/go.mod index 3ebbc979a94..87c9902056f 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/Masterminds/semver v1.5.0 github.com/Pallinder/go-randomdata v1.2.0 github.com/argoproj/argo-cd/v2 v2.8.19 - github.com/argoproj/argo-workflows/v3 v3.4.3 + github.com/argoproj/argo-workflows/v3 v3.5.10 github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 github.com/aws/aws-sdk-go v1.44.290 github.com/aws/aws-sdk-go-v2/service/ecr v1.20.0 @@ -19,7 +19,7 @@ require ( github.com/casbin/xorm-adapter v1.0.1-0.20190716004226-a317737a1007 github.com/casbin/xorm-adapter/v2 v2.5.1 github.com/coreos/go-oidc/v3 v3.11.0 - github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/deckarep/golang-set v1.8.0 github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 github.com/devtron-labs/common-lib v0.0.25-0.20240812113340-f14be466613d @@ -39,7 +39,7 @@ require ( github.com/google/cel-go v0.17.8 github.com/google/go-cmp v0.6.0 github.com/google/go-github v17.0.0+incompatible - github.com/google/uuid v1.3.1 + github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/schema v1.4.1 @@ -92,7 +92,7 @@ require ( k8s.io/kubernetes v1.27.13 k8s.io/metrics v0.26.4 k8s.io/utils v0.0.0-20230726121419-3b25d923346b - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -113,14 +113,11 @@ require ( github.com/BurntSushi/toml v1.3.2 // indirect github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/agext/levenshtein v1.2.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect - github.com/antonmedv/expr v1.12.5 // indirect github.com/apparentlymart/go-textseg v1.0.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e // indirect @@ -135,11 +132,9 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/colinmarc/hdfs v1.1.4-0.20180805212432-9746310a4d31 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect @@ -150,11 +145,11 @@ require ( github.com/go-errors/errors v1.4.2 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-jose/go-jose/v4 v4.0.2 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.6 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect github.com/go-redis/cache/v9 v9.0.0 // indirect @@ -180,14 +175,11 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect - github.com/huandu/xstrings v1.4.0 // indirect github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect github.com/igm/sockjs-go v3.0.0+incompatible // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect @@ -197,7 +189,6 @@ require ( github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect github.com/leodido/go-urn v1.2.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -218,7 +209,6 @@ require ( github.com/nats-io/nats.go v1.28.0 // indirect github.com/nats-io/nkeys v0.4.6 // indirect github.com/nats-io/nuid v1.0.1 // indirect - github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -230,10 +220,8 @@ require ( github.com/redis/go-redis/v9 v9.0.5 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.1.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/skeema/knownhosts v1.2.2 // indirect - github.com/spf13/cast v1.5.0 // indirect github.com/spf13/cobra v1.8.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect @@ -241,8 +229,6 @@ require ( github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect - github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasttemplate v1.2.2 // indirect github.com/vmihailenco/go-tinylfu v0.2.2 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect @@ -264,20 +250,15 @@ require ( golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/api v0.126.0 // indirect - google.golang.org/appengine v1.6.7 // indirect + google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect - gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect - gopkg.in/jcmturner/goidentity.v2 v2.0.0 // indirect - gopkg.in/jcmturner/gokrb5.v5 v5.3.0 // indirect - gopkg.in/jcmturner/rpc.v0 v0.0.2 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.29.0 // indirect @@ -294,13 +275,13 @@ require ( sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - upper.io/db.v3 v3.8.0+incompatible // indirect xorm.io/builder v0.3.7 // indirect xorm.io/core v0.7.2 // indirect xorm.io/xorm v1.0.3 // indirect ) replace ( + github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.5.7-v3refs k8s.io/api => k8s.io/api v0.26.11 diff --git a/go.sum b/go.sum index aafbebf99ab..fb7bccc7732 100644 --- a/go.sum +++ b/go.sum @@ -44,15 +44,10 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= @@ -82,8 +77,6 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuW github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= -github.com/antonmedv/expr v1.12.5 h1:Fq4okale9swwL3OeLLs9WD9H6GbgBLJyN/NUHRv+n0E= -github.com/antonmedv/expr v1.12.5/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= @@ -92,8 +85,6 @@ github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6 github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/argoproj/argo-cd/v2 v2.8.19 h1:/oY2Hc2PjEK1nujcKnbylyL6XjeB7JrjwXlsNQuKmiE= github.com/argoproj/argo-cd/v2 v2.8.19/go.mod h1:KlJ82U5ON9ZDddDIhXbW522l2T4wyBwKsFHZYHIcl2Y= -github.com/argoproj/argo-workflows/v3 v3.4.3 h1:4pt7+Rjy9Lzq/r6dWp6wL8mr3ucPHSsGIlWwoP3fueM= -github.com/argoproj/argo-workflows/v3 v3.4.3/go.mod h1:Od1rQK5j9/WefqFaUsIwAqTialDhLlhups0RE/WYzz4= github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 h1:oTaLRbCwjnGtScIX2ZRdIEDsiDxonwh9/BbUxdXrjYc= github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814/go.mod h1:1TchqKw9XmYYZluyEHa1dTJQoZgbV6PhabB/e8Wf3KY= github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e h1:kuLQvJqwwRMQTheT4MFyKVM8Txncu21CHT4yBWUl1Mk= @@ -173,8 +164,6 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/colinmarc/hdfs v1.1.4-0.20180805212432-9746310a4d31 h1:ow7T77012NSZVW0uOWoQxz3yj9fHKYeZ4QmNrMtWMbM= -github.com/colinmarc/hdfs v1.1.4-0.20180805212432-9746310a4d31/go.mod h1:vSBumefK4HA5uiRSwNP+3ofgrEoScpCS2MMWcWXEuQ4= github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI= github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -186,13 +175,16 @@ github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc h1:VRRKCwnzqk8QCaRC4os14xoKDdbHqqlJtJA0oc1ZAjg= github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/devtron-labs/argo-workflows/v3 v3.5.10 h1:6rxQOesOzDz6SgQCMDQNHaehsKFW3C7U8CZeEek5kgQ= +github.com/devtron-labs/argo-workflows/v3 v3.5.10/go.mod h1:/vqxcovDPT4zqr4DjR5v7CF8ggpY1l3TSa2CIG3jmjA= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 h1:2+Q7Jdhpo/uMiaQiZZzAh+ZX7wEJIFuMFG6DEiMuo64= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8/go.mod h1:702R6WIf5y9UzKGoCGxQ+x3l5Ws+l0fXg2xlCpSGFZI= github.com/devtron-labs/common-lib v0.0.25-0.20240812113340-f14be466613d h1:+iWXiVOyf9E0bcTia6x2sLFTM7xJc+9Z8q+BfbYr6eM= @@ -207,8 +199,6 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cu github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3 h1:7nllYTGLnq4CqBL27lV6oNfXzM2tJ2mrKF8E+aBXOV0= -github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3/go.mod h1:v/MTKot4he5oRHGirOYGN4/hEOONNnWtDBLAzllSGMw= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -245,8 +235,6 @@ github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -285,23 +273,24 @@ github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.6 h1:dnqg1XfHXL9aBxSbktBqFR5CxVyVI+7fYWhAf1JOeTw= +github.com/go-openapi/swag v0.22.6/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= github.com/go-pg/pg v6.15.1+incompatible h1:vO4P9WoCi+i4qomgcBXWlKgDk4GcHAqDAOIfkEpi7B4= github.com/go-pg/pg v6.15.1+incompatible/go.mod h1:a2oXow+aFOrvwcKs3eIA0lNFmMilrxK2sOkB5NWe0vA= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -414,8 +403,8 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= @@ -460,23 +449,16 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= -github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl2 v0.0.0-20191002203319-fb75b3253c80 h1:PFfGModn55JA0oBsvFghhj0v93me+Ctr3uHC/UmFAls= github.com/hashicorp/hcl2 v0.0.0-20191002203319-fb75b3253c80/go.mod h1:Cxv+IJLuBiEhQ7pBYGEuORa0nr4U994pE8mYLuFd7v0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/igm/sockjs-go v3.0.0+incompatible h1:4w5ztbp2brVLJYz+o3u0m7+zmuup6eZ/Fr1ehbJOsBo= github.com/igm/sockjs-go v3.0.0+incompatible/go.mod h1:Yu6pvqjNniWNJe07LPObeCG6R77Qc97C6Kss0roF8tU= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -487,9 +469,6 @@ github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80s github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= @@ -551,8 +530,6 @@ github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQs github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -613,7 +590,6 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.58/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -622,7 +598,6 @@ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTS github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= @@ -660,7 +635,6 @@ github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 h1:Yl0tPBa8QPjGmesFh1D0rDy+q1Twx6FyU7VWHi8wZbI= github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852/go.mod h1:eqOVx5Vwu4gd2mmMZvVZsgIqNSaW3xxRThUJ0k/TPk4= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -704,7 +678,6 @@ github.com/otiai10/mint v1.3.0 h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -754,9 +727,6 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= -github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -769,9 +739,6 @@ github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= @@ -810,10 +777,6 @@ github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso github.com/tidwall/sjson v1.2.4 h1:cuiLzLnaMeBhRmEv00Lpk3tkYrcxpmbU81tAY4Dw0tc= github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= -github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/vmihailenco/go-tinylfu v0.2.2 h1:H1eiG6HM36iniK6+21n9LLpzx1G9R3DJa2UjUjbynsI= github.com/vmihailenco/go-tinylfu v0.2.2/go.mod h1:CutYi2Q9puTxfcolkliPq4npPuofg9N9t8JVrjzwa3Q= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= @@ -891,7 +854,6 @@ go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -905,7 +867,6 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= @@ -1099,8 +1060,9 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1139,8 +1101,9 @@ google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvy google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1207,18 +1170,7 @@ gopkg.in/igm/sockjs-go.v3 v3.0.0/go.mod h1:4aNFiKYpI9DpJHyToiHfcqxGpWqmjTK9A0FkE gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= -gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= -gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v2 v2.0.0 h1:6Bmcdaxb0dD3HyHbo/MtJ2Q1wXLDuZJFwXZmuZvM+zw= -gopkg.in/jcmturner/goidentity.v2 v2.0.0/go.mod h1:vCwK9HeXksMeUmQ4SxDd1tRz4LejrKh3KRVjQWhjvZI= -gopkg.in/jcmturner/gokrb5.v5 v5.3.0 h1:RS1MYApX27Hx1Xw7NECs7XxGxxrm69/4OmaRuX9kwec= -gopkg.in/jcmturner/gokrb5.v5 v5.3.0/go.mod h1:oQz8Wc5GsctOTgCVyKad1Vw4TCWz5G6gfIQr88RPv4k= -gopkg.in/jcmturner/rpc.v0 v0.0.2 h1:wBTgrbL1qmLBUPsYVCqdJiI5aJgQhexmK+JkTHPUNJI= -gopkg.in/jcmturner/rpc.v0 v0.0.2/go.mod h1:NzMq6cRzR9lipgw7WxRBHNx5N8SifBuaCQsOT1kWY/E= gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -1308,10 +1260,9 @@ sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ih sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -upper.io/db.v3 v3.8.0+incompatible h1:XNeEO2vQRVqq70M98ghzq6M30F5Bzo+99ess5v+eVYw= -upper.io/db.v3 v3.8.0+incompatible/go.mod h1:FgTdD24eBjJAbPKsQSiHUNgXjOR4Lub3u1UMHSIh82Y= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= xorm.io/builder v0.3.6/go.mod h1:LEFAPISnRzG+zxaxj2vPicRwz67BdhFreKg8yv8/TgU= xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= diff --git a/internal/sql/repository/AppListingRepository.go b/internal/sql/repository/AppListingRepository.go index 94193e48361..59340ada08c 100644 --- a/internal/sql/repository/AppListingRepository.go +++ b/internal/sql/repository/AppListingRepository.go @@ -27,6 +27,7 @@ import ( "github.com/devtron-labs/devtron/internal/middleware" appWorkflow2 "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" + "github.com/devtron-labs/devtron/internal/util" repository2 "github.com/devtron-labs/devtron/pkg/cluster/repository" "go.opentelemetry.io/otel" "strings" @@ -365,10 +366,10 @@ func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx contex " p.ci_pipeline_id," + " p.trigger_type" + " FROM pipeline p" + - " INNER JOIN pipeline_config_override pco on pco.pipeline_id=p.id" + + " LEFT JOIN pipeline_config_override pco on pco.pipeline_id=p.id" + " INNER JOIN environment env ON env.id=p.environment_id" + " INNER JOIN cluster cl on cl.id=env.cluster_id" + - " INNER JOIN ci_artifact cia on cia.id = pco.ci_artifact_id" + + " LEFT JOIN ci_artifact cia on cia.id = pco.ci_artifact_id" + " INNER JOIN app a ON a.id=p.app_id" + " WHERE a.app_type = 0 AND a.id=? AND env.id=? AND p.deleted = FALSE AND env.active = TRUE" + " ORDER BY pco.created_on DESC LIMIT 1;" @@ -378,13 +379,18 @@ func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx contex return deploymentDetail, err } deploymentDetail.EnvironmentId = envId - if len(deploymentDetail.DeploymentAppType) == 0 { - dc, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) - if err != nil { - impl.Logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) - return deploymentDetail, err - } + + deploymentDetail.EnvironmentId = envId + dc, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) + if err != nil && err != pg.ErrNoRows { + impl.Logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) + return deploymentDetail, err + } + if err == pg.ErrNoRows { + deploymentDetail.ReleaseMode = util.PIPELINE_RELEASE_MODE_CREATE + } else { deploymentDetail.DeploymentAppType = dc.DeploymentAppType + deploymentDetail.ReleaseMode = dc.ReleaseMode } return deploymentDetail, nil @@ -455,6 +461,9 @@ func (impl AppListingRepositoryImpl) FetchAppDetail(ctx context.Context, appId i if err != nil { impl.Logger.Warn("unable to fetch deployment detail for app") } + if deploymentDetail.PcoId > 0 { + deploymentDetail.IsPipelineTriggered = true + } appWfMapping, _ := impl.appWorkflowRepository.FindWFCDMappingByCDPipelineId(deploymentDetail.CdPipelineId) if appWfMapping.ParentType == appWorkflow2.CDPIPELINE { parentEnvironmentName, _ := impl.getEnvironmentNameFromPipelineId(appWfMapping.ParentId) diff --git a/internal/sql/repository/app/AppRepository.go b/internal/sql/repository/app/AppRepository.go index fef6630bfd7..76d526f1744 100644 --- a/internal/sql/repository/app/AppRepository.go +++ b/internal/sql/repository/app/AppRepository.go @@ -50,6 +50,8 @@ type AppRepository interface { UpdateWithTxn(app *App, tx *pg.Tx) error SetDescription(id int, description string, userId int32) error FindActiveByName(appName string) (pipelineGroup *App, err error) + FindAppIdByName(appName string) (int, error) + FindJobByDisplayName(appName string) (pipelineGroup *App, err error) FindActiveListByName(appName string) ([]*App, error) FindById(id int) (pipelineGroup *App, err error) @@ -137,6 +139,19 @@ func (repo AppRepositoryImpl) FindActiveByName(appName string) (*App, error) { // there is only single active app will be present in db with a same name. return pipelineGroup, err } +func (repo AppRepositoryImpl) FindAppIdByName(appName string) (int, error) { + app := &App{} + err := repo.dbConnection. + Model(app). + Column("app.id"). + Where("app_name = ?", appName). + Where("active = ?", true). + Order("id DESC").Limit(1). + Select() + // there is only single active app will be present in db with a same name. + return app.Id, err +} + func (repo AppRepositoryImpl) FindJobByDisplayName(appName string) (*App, error) { pipelineGroup := &App{} err := repo.dbConnection. diff --git a/internal/sql/repository/chartConfig/ConfigMapRepository.go b/internal/sql/repository/chartConfig/ConfigMapRepository.go index 5cc12311991..764378765c3 100644 --- a/internal/sql/repository/chartConfig/ConfigMapRepository.go +++ b/internal/sql/repository/chartConfig/ConfigMapRepository.go @@ -17,6 +17,7 @@ package chartConfig import ( + "github.com/devtron-labs/devtron/pkg/pipeline/bean" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "github.com/go-pg/pg/orm" @@ -38,6 +39,7 @@ type ConfigMapRepository interface { GetByAppIdAppLevel(appId int) (*ConfigMapAppModel, error) GetByAppIdAndEnvIdEnvLevel(appId int, envId int) (*ConfigMapEnvModel, error) GetEnvLevelByAppId(appId int) ([]*ConfigMapEnvModel, error) + GetConfigNamesForAppAndEnvLevel(appId int, envId int) ([]bean.ConfigNameAndType, error) } type ConfigMapRepositoryImpl struct { @@ -49,6 +51,11 @@ func NewConfigMapRepositoryImpl(Logger *zap.SugaredLogger, dbConnection *pg.DB) return &ConfigMapRepositoryImpl{dbConnection: dbConnection, Logger: Logger} } +const ( + ConfigMapAppLevel string = "config_map_app_level" + ConfigMapEnvLevel string = "config_map_env_level" +) + type ConfigMapAppModel struct { TableName struct{} `sql:"config_map_app_level" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -57,6 +64,55 @@ type ConfigMapAppModel struct { SecretData string `sql:"secret_data"` sql.AuditLog } +type cMCSNames struct { + Id int `json:"id"` + CMName string `json:"cm_name"` + CSName string `json:"cs_name"` +} + +func (impl ConfigMapRepositoryImpl) GetConfigNamesForAppAndEnvLevel(appId int, envId int) ([]bean.ConfigNameAndType, error) { + var cMCSNames []cMCSNames + tableName := ConfigMapEnvLevel + if envId == -1 { + tableName = ConfigMapAppLevel + } + //below query iterates over the cm, cs stored as json element, and fetches cmName and csName, id for a particular appId or envId if provided + query := impl.dbConnection. + Model(). + Table(tableName). + Column("id"). + ColumnExpr("json_array_elements(CASE WHEN (config_map_data::json->'maps')::TEXT != 'null' THEN (config_map_data::json->'maps') ELSE '[]' END )->>'name' AS cm_name"). + ColumnExpr("json_array_elements(CASE WHEN (secret_data::json->'secrets')::TEXT != 'null' THEN (secret_data::json->'secrets') ELSE '[]' END )->>'name' AS cs_name"). + Where("app_id = ?", appId) + + if envId > 0 { + query = query.Where("environment_id=?", envId) + } + if err := query.Select(&cMCSNames); err != nil { + if err != pg.ErrNoRows { + impl.Logger.Errorw("error occurred while fetching CM/CS names", "appId", appId, "err", err) + return nil, err + } + } + var configNames []bean.ConfigNameAndType + for _, name := range cMCSNames { + if name.CMName != "" { + configNames = append(configNames, bean.ConfigNameAndType{ + Id: name.Id, + Name: name.CMName, + Type: bean.CM, + }) + } + if name.CSName != "" { + configNames = append(configNames, bean.ConfigNameAndType{ + Id: name.Id, + Name: name.CSName, + Type: bean.CS, + }) + } + } + return configNames, nil +} func (impl ConfigMapRepositoryImpl) CreateAppLevel(model *ConfigMapAppModel) (*ConfigMapAppModel, error) { err := impl.dbConnection.Insert(model) diff --git a/internal/sql/repository/deploymentConfig/repository.go b/internal/sql/repository/deploymentConfig/repository.go index ecaf1f59bc2..0dac42b98b3 100644 --- a/internal/sql/repository/deploymentConfig/repository.go +++ b/internal/sql/repository/deploymentConfig/repository.go @@ -29,6 +29,7 @@ type DeploymentConfig struct { ConfigType string `sql:"config_type"` RepoUrl string `sql:"repo_url"` RepoName string `sql:"repo_name"` + ReleaseMode string `json:"release_mode"` Active bool `sql:"active,notnull"` sql.AuditLog } diff --git a/internal/sql/repository/security/CvePolicyControle.go b/internal/sql/repository/security/CvePolicyControle.go index 45728ca1b6c..262e863a7cf 100644 --- a/internal/sql/repository/security/CvePolicyControle.go +++ b/internal/sql/repository/security/CvePolicyControle.go @@ -18,6 +18,7 @@ package security import ( "fmt" + securityBean "github.com/devtron-labs/devtron/internal/sql/repository/security/bean" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "github.com/go-pg/pg/orm" @@ -25,92 +26,29 @@ import ( ) type CvePolicy struct { - tableName struct{} `sql:"cve_policy_control" pg:",discard_unknown_columns"` - Id int `sql:"id,pk"` - Global bool `sql:"global,notnull"` - ClusterId int `sql:"cluster_id"` - EnvironmentId int `sql:"env_id"` - AppId int `sql:"app_id"` - CVEStoreId string `sql:"cve_store_id"` - Action PolicyAction `sql:"action, notnull"` - Severity *Severity `sql:"severity, notnull "` - Deleted bool `sql:"deleted, notnull"` + tableName struct{} `sql:"cve_policy_control" pg:",discard_unknown_columns"` + Id int `sql:"id,pk"` + Global bool `sql:"global,notnull"` + ClusterId int `sql:"cluster_id"` + EnvironmentId int `sql:"env_id"` + AppId int `sql:"app_id"` + CVEStoreId string `sql:"cve_store_id"` + Action securityBean.PolicyAction `sql:"action, notnull"` + Severity *securityBean.Severity `sql:"severity, notnull "` + Deleted bool `sql:"deleted, notnull"` sql.AuditLog CveStore *CveStore } -type PolicyAction int - -const ( - Inherit PolicyAction = iota - Allow - Block - Blockiffixed -) - -func (d PolicyAction) String() string { - return [...]string{"inherit", "allow", "block", "blockiffixed"}[d] -} - -// ------------------ -type Severity int - -const ( - Low Severity = iota - Medium - Critical - High - Safe -) -const ( - HIGH string = "high" - CRITICAL string = "critical" - SAFE string = "safe" - LOW string = "low" - MEDIUM string = "medium" - MODERATE string = "moderate" -) - -// Handling for future use -func (d Severity) ValuesOf(severity string) Severity { - if severity == CRITICAL || severity == HIGH { - return Critical - } else if severity == MODERATE || severity == MEDIUM { - return Medium - } else if severity == LOW || severity == SAFE { - return Low - } - return Low -} - -// Updating it for future use(not in use for standard severity) -func (d Severity) String() string { - return [...]string{"low", "moderate", "critical", "high", "safe"}[d] -} - -// ---------------- -type PolicyLevel int - -const ( - Global PolicyLevel = iota - Cluster - Environment - Application -) - -func (d PolicyLevel) String() string { - return [...]string{"global", "cluster", "environment", "application"}[d] -} - -func (policy *CvePolicy) PolicyLevel() PolicyLevel { +func (policy *CvePolicy) PolicyLevel() securityBean.PolicyLevel { if policy.ClusterId != 0 { - return Cluster + return securityBean.Cluster } else if policy.AppId != 0 { - return Application + return securityBean.Application } else if policy.EnvironmentId != 0 { - return Environment + return securityBean.Environment } else { - return Global + return securityBean.Global } } @@ -250,19 +188,19 @@ func (impl *CvePolicyRepositoryImpl) GetBlockedCVEList(cves []*CveStore, cluster return blockedCve, nil } -func EnforceCvePolicy(cves []*CveStore, cvePolicy map[string]*CvePolicy, severityPolicy map[Severity]*CvePolicy) (blockedCVE []*CveStore) { +func EnforceCvePolicy(cves []*CveStore, cvePolicy map[string]*CvePolicy, severityPolicy map[securityBean.Severity]*CvePolicy) (blockedCVE []*CveStore) { for _, cve := range cves { if policy, ok := cvePolicy[cve.Name]; ok { - if policy.Action == Allow { + if policy.Action == securityBean.Allow { continue - } else if (policy.Action == Block) || (policy.Action == Blockiffixed && cve.FixedVersion != "") { + } else if (policy.Action == securityBean.Block) || (policy.Action == securityBean.Blockiffixed && cve.FixedVersion != "") { blockedCVE = append(blockedCVE, cve) } } else { - if severityPolicy[cve.Severity] != nil && severityPolicy[cve.Severity].Action == Allow { + if severityPolicy[cve.GetSeverity()] != nil && severityPolicy[cve.GetSeverity()].Action == securityBean.Allow { continue - } else if severityPolicy[cve.Severity] != nil && (severityPolicy[cve.Severity].Action == Block || (severityPolicy[cve.Severity].Action == Blockiffixed && cve.FixedVersion != "")) { + } else if severityPolicy[cve.GetSeverity()] != nil && (severityPolicy[cve.GetSeverity()].Action == securityBean.Block || (severityPolicy[cve.GetSeverity()].Action == securityBean.Blockiffixed && cve.FixedVersion != "")) { blockedCVE = append(blockedCVE, cve) } } @@ -270,17 +208,17 @@ func EnforceCvePolicy(cves []*CveStore, cvePolicy map[string]*CvePolicy, severit return blockedCVE } -func (impl *CvePolicyRepositoryImpl) getApplicablePolicy(clusterId, envId, appId int, isAppstore bool) (map[string]*CvePolicy, map[Severity]*CvePolicy, error) { +func (impl *CvePolicyRepositoryImpl) getApplicablePolicy(clusterId, envId, appId int, isAppstore bool) (map[string]*CvePolicy, map[securityBean.Severity]*CvePolicy, error) { - var policyLevel PolicyLevel + var policyLevel securityBean.PolicyLevel if isAppstore && appId > 0 && envId > 0 && clusterId > 0 { - policyLevel = Environment + policyLevel = securityBean.Environment } else if appId > 0 && envId > 0 && clusterId > 0 { - policyLevel = Application + policyLevel = securityBean.Application } else if envId > 0 && clusterId > 0 { - policyLevel = Environment + policyLevel = securityBean.Environment } else if clusterId > 0 { - policyLevel = Cluster + policyLevel = securityBean.Cluster } else { //error in case of global or other policy return nil, nil, fmt.Errorf("policy not identified") @@ -290,16 +228,16 @@ func (impl *CvePolicyRepositoryImpl) getApplicablePolicy(clusterId, envId, appId return cvePolicy, severityPolicy, err } -func (impl *CvePolicyRepositoryImpl) getPolicies(policyLevel PolicyLevel, clusterId, environmentId, appId int) (map[string]*CvePolicy, map[Severity]*CvePolicy, error) { +func (impl *CvePolicyRepositoryImpl) getPolicies(policyLevel securityBean.PolicyLevel, clusterId, environmentId, appId int) (map[string]*CvePolicy, map[securityBean.Severity]*CvePolicy, error) { var policies []*CvePolicy var err error - if policyLevel == Global { + if policyLevel == securityBean.Global { policies, err = impl.GetGlobalPolicies() - } else if policyLevel == Cluster { + } else if policyLevel == securityBean.Cluster { policies, err = impl.GetClusterPolicies(clusterId) - } else if policyLevel == Environment { + } else if policyLevel == securityBean.Environment { policies, err = impl.GetEnvPolicies(clusterId, environmentId) - } else if policyLevel == Application { + } else if policyLevel == securityBean.Application { policies, err = impl.GetAppEnvPolicies(clusterId, environmentId, appId) } else { return nil, nil, fmt.Errorf("unsupported policy level: %s", policyLevel) @@ -314,9 +252,9 @@ func (impl *CvePolicyRepositoryImpl) getPolicies(policyLevel PolicyLevel, cluste return cvePolicy, severityPolicy, nil } -func (impl *CvePolicyRepositoryImpl) getApplicablePolicies(policies []*CvePolicy) (map[string]*CvePolicy, map[Severity]*CvePolicy) { +func (impl *CvePolicyRepositoryImpl) getApplicablePolicies(policies []*CvePolicy) (map[string]*CvePolicy, map[securityBean.Severity]*CvePolicy) { cvePolicy := make(map[string][]*CvePolicy) - severityPolicy := make(map[Severity][]*CvePolicy) + severityPolicy := make(map[securityBean.Severity][]*CvePolicy) for _, policy := range policies { if policy.CVEStoreId != "" { cvePolicy[policy.CveStore.Name] = append(cvePolicy[policy.CveStore.Name], policy) @@ -347,8 +285,8 @@ func (impl *CvePolicyRepositoryImpl) getHighestPolicy(allPolicies map[string][]* return applicablePolicies } -func (impl *CvePolicyRepositoryImpl) getHighestPolicyS(allPolicies map[Severity][]*CvePolicy) map[Severity]*CvePolicy { - applicablePolicies := make(map[Severity]*CvePolicy) +func (impl *CvePolicyRepositoryImpl) getHighestPolicyS(allPolicies map[securityBean.Severity][]*CvePolicy) map[securityBean.Severity]*CvePolicy { + applicablePolicies := make(map[securityBean.Severity]*CvePolicy) for key, policies := range allPolicies { var applicablePolicy *CvePolicy for _, policy := range policies { diff --git a/internal/sql/repository/security/CveStoreRepository.go b/internal/sql/repository/security/CveStoreRepository.go index 64342d20f54..424a7061bc1 100644 --- a/internal/sql/repository/security/CveStoreRepository.go +++ b/internal/sql/repository/security/CveStoreRepository.go @@ -19,6 +19,7 @@ package security import ( "fmt" "github.com/devtron-labs/devtron/internal/sql/repository/helper" + securityBean "github.com/devtron-labs/devtron/internal/sql/repository/security/bean" "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "go.uber.org/zap" @@ -27,15 +28,41 @@ import ( ) type CveStore struct { - tableName struct{} `sql:"cve_store" pg:",discard_unknown_columns"` - Name string `sql:"name,pk"` - Severity Severity `sql:"severity,notnull"` - Package string `sql:"package,notnull"` // deprecated - Version string `sql:"version,notnull"` - FixedVersion string `sql:"fixed_version,notnull"` + tableName struct{} `sql:"cve_store" pg:",discard_unknown_columns"` + Name string `sql:"name,pk"` + + // Deprecated: Severity, use StandardSeverity for all read purposes + Severity securityBean.Severity `sql:"severity,notnull"` + // Deprecated: Package + Package string `sql:"package,notnull"` // deprecated, storing package data in image_scan_execution_result table + // Deprecated: Version + Version string `sql:"version,notnull"` + // Deprecated: FixedVersion + FixedVersion string `sql:"fixed_version,notnull"` + + // StandardSeverity is the actual severity. use GetSeverity method to get severity of the vulnerability + // earlier severity is maintained in Severity column by merging HIGH and CRITICAL severities. + // later we introduced new column StandardSeverity to store raw severity, but didn't migrate the existing Severity data to StandardSeverity. + // currently, we deprecated Severity. + StandardSeverity *securityBean.Severity `sql:"standard_severity"` sql.AuditLog } +// GetSeverity returns the actual severity of the vulnerability. +func (cve *CveStore) GetSeverity() securityBean.Severity { + if cve.StandardSeverity == nil { + // we need this as there was a time when StandardSeverity didn't exist. + // and migration of Severity data to StandardSeverity is not done. + return cve.Severity + } + return *cve.StandardSeverity +} + +func (cve *CveStore) SetStandardSeverity(severity securityBean.Severity) { + cve.Severity = severity + cve.StandardSeverity = &severity +} + type VulnerabilityRequest struct { AppName string `json:"appName"` CveName string `json:"cveName"` diff --git a/internal/sql/repository/security/ImageScanDeployInfoRepository.go b/internal/sql/repository/security/ImageScanDeployInfoRepository.go index ab4d3864726..b3146dd6d8a 100644 --- a/internal/sql/repository/security/ImageScanDeployInfoRepository.go +++ b/internal/sql/repository/security/ImageScanDeployInfoRepository.go @@ -18,6 +18,7 @@ package security import ( "fmt" + securityBean "github.com/devtron-labs/devtron/pkg/security/bean" "github.com/devtron-labs/devtron/pkg/sql" "strconv" "strings" @@ -49,25 +50,6 @@ const ( ScanObjectType_POD string = "pod" ) -type SortBy string -type SortOrder string - -const ( - Asc SortOrder = "ASC" - Desc SortOrder = "DESC" -) - -type ImageScanFilter struct { - Offset int `json:"offset"` - Size int `json:"size"` - CVEName string `json:"cveName"` - AppName string `json:"appName"` - ObjectName string `json:"objectName"` - EnvironmentIds []int `json:"envIds"` - ClusterIds []int `json:"clusterIds"` - Severity []int `json:"severity"` -} - type ImageScanListingResponse struct { Id int `json:"id"` ScanObjectMetaId int `json:"scanObjectMetaId"` @@ -76,6 +58,7 @@ type ImageScanListingResponse struct { SecurityScan string `json:"securityScan"` EnvironmentName string `json:"environmentName"` LastChecked time.Time `json:"lastChecked"` + TotalCount int `json:"totalCount"` } type ImageScanDeployInfoRepository interface { @@ -87,7 +70,7 @@ type ImageScanDeployInfoRepository interface { FetchListingGroupByObject(size int, offset int) ([]*ImageScanDeployInfo, error) FetchByAppIdAndEnvId(appId int, envId int, objectType []string) (*ImageScanDeployInfo, error) FindByTypeMetaAndTypeId(scanObjectMetaId int, objectType string) (*ImageScanDeployInfo, error) - ScanListingWithFilter(request *ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanListingResponse, error) + ScanListingWithFilter(request *securityBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanListingResponse, error) } type ImageScanDeployInfoRepositoryImpl struct { @@ -162,7 +145,7 @@ func (impl ImageScanDeployInfoRepositoryImpl) FindByTypeMetaAndTypeId(scanObject return &model, err } -func (impl ImageScanDeployInfoRepositoryImpl) ScanListingWithFilter(request *ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanListingResponse, error) { +func (impl ImageScanDeployInfoRepositoryImpl) ScanListingWithFilter(request *securityBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanListingResponse, error) { var models []*ImageScanListingResponse query := impl.scanListingQueryBuilder(request, size, offset, deployInfoIds) _, err := impl.dbConnection.Query(&models, query, size, offset) @@ -173,9 +156,9 @@ func (impl ImageScanDeployInfoRepositoryImpl) ScanListingWithFilter(request *Ima return models, err } -func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithoutObject(request *ImageScanFilter, size int, offset int, deployInfoIds []int) string { +func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithoutObject(request *securityBean.ImageScanFilter, size int, offset int, deployInfoIds []int) string { query := "" - query = query + "select info.scan_object_meta_id, info.object_type, env.environment_name, max(info.id) as id" + query = query + "select info.scan_object_meta_id,a.app_name as object_name, info.object_type, env.environment_name, max(info.id) as id, COUNT(*) OVER() AS total_count" query = query + " from image_scan_deploy_info info" if len(request.CVEName) > 0 || len(request.Severity) > 0 { query = query + " INNER JOIN image_scan_execution_history his on his.id = any (info.image_scan_execution_history_id)" @@ -184,18 +167,18 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithoutObject(request } query = query + " INNER JOIN environment env on env.id=info.env_id" query = query + " INNER JOIN cluster clus on clus.id=env.cluster_id" - query = query + " LEFT JOIN app ap on ap.id = info.scan_object_meta_id and info.object_type='app' WHERE ap.active=true" - query = query + " AND info.scan_object_meta_id > 0 and env.active=true and info.image_scan_execution_history_id[1] != -1 " + query = query + " LEFT JOIN app a on a.id = info.scan_object_meta_id and info.object_type='app' WHERE a.active=true" + query = query + " AND info.scan_object_meta_id > 0 and env.active=true and info.image_scan_execution_history_id[1] != -1" if len(deployInfoIds) > 0 { ids := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(deployInfoIds)), ","), "[]") query = query + " AND info.id IN (" + ids + ")" } if len(request.CVEName) > 0 { - query = query + " AND res.cve_store_name like '" + request.CVEName + "'" + query = query + " AND res.cve_store_name ILIKE '%" + request.CVEName + "%'" } if len(request.Severity) > 0 { severities := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.Severity)), ","), "[]") - query = query + " AND cs.severity IN (" + severities + ")" + query = query + fmt.Sprintf(" AND (cs.standard_severity IN (%s) OR (cs.severity IN (%s) AND cs.standard_severity IS NULL))", severities, severities) } if len(request.EnvironmentIds) > 0 { envIds := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.EnvironmentIds)), ","), "[]") @@ -205,46 +188,59 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithoutObject(request clusterIds := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.ClusterIds)), ","), "[]") query = query + " AND clus.id IN (" + clusterIds + ")" } - query = query + " group by info.scan_object_meta_id, info.object_type, env.environment_name" - query = query + " order by id desc" + query = query + " GROUP BY info.scan_object_meta_id, a.app_name, info.object_type, env.environment_name" + //query = query + " order by id desc" + query += getOrderByQueryPart(request.SortBy, request.SortOrder) if size > 0 { - query = query + " limit " + strconv.Itoa(size) + " offset " + strconv.Itoa(offset) + "" + query = query + " LIMIT " + strconv.Itoa(size) + " OFFSET " + strconv.Itoa(offset) + "" } query = query + " ;" return query } -func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithObject(request *ImageScanFilter, size int, offset int, deployInfoIds []int) string { - query := "" - if len(request.AppName) > 0 { - query = query + " select info.scan_object_meta_id, a.app_name as object_name, info.object_type, env.environment_name, max(info.id) as id" - query = query + " from image_scan_deploy_info info" - query = query + " INNER JOIN app a on a.id=info.scan_object_meta_id" - } else if len(request.ObjectName) > 0 { - query = query + " select info.scan_object_meta_id, om.name as object_name,info.object_type, env.environment_name, max(info.id) as id" - query = query + " from image_scan_deploy_info info" - query = query + " INNER JOIN image_scan_object_meta om on om.id=info.scan_object_meta_id" +func getOrderByQueryPart(sortBy securityBean.SortBy, sortOrder securityBean.SortOrder) string { + var sort string + if sortBy == "appName" { + sort = "a.app_name" + } else if sortBy == "envName" { + sort = "environment_name" + } else { + // id is to sort by time. + // id with desc fetches latest scans + sort = "id" } + + if sortOrder != securityBean.Desc { + sortOrder = "" + } + return fmt.Sprintf(" ORDER BY %s %s ", sort, sortOrder) +} + +func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithObject(request *securityBean.ImageScanFilter, size int, offset int, deployInfoIds []int) string { + + query := " select info.scan_object_meta_id, a.app_name as object_name, info.object_type, env.environment_name, max(info.id) as id, COUNT(*) OVER() AS total_count" + query = query + " from image_scan_deploy_info info" + query = query + " INNER JOIN app a on a.id=info.scan_object_meta_id" + if len(request.Severity) > 0 { query = query + " INNER JOIN image_scan_execution_history his on his.id = any (info.image_scan_execution_history_id)" query = query + " INNER JOIN image_scan_execution_result res on res.image_scan_execution_history_id=his.id" query = query + " INNER JOIN cve_store cs on cs.name= res.cve_store_name" } + query = query + " INNER JOIN environment env on env.id=info.env_id" query = query + " INNER JOIN cluster c on c.id=env.cluster_id" query = query + " WHERE info.scan_object_meta_id > 0 and env.active=true and info.image_scan_execution_history_id[1] != -1" + query = query + " AND a.app_name like '%" + request.AppName + "%'" + if len(deployInfoIds) > 0 { ids := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(deployInfoIds)), ","), "[]") query = query + " AND info.id IN (" + ids + ")" } - if len(request.AppName) > 0 { - query = query + " AND a.app_name like '%" + request.AppName + "%'" - } else if len(request.ObjectName) > 0 { - query = query + " AND om.name like '%" + request.ObjectName + "%'" - } + if len(request.Severity) > 0 { severities := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.Severity)), ","), "[]") - query = query + " AND cs.severity IN (" + severities + ")" + query = query + fmt.Sprintf(" AND (cs.standard_severity IN (%s) OR (cs.severity IN (%s) AND cs.standard_severity IS NULL))", severities, severities) } if len(request.EnvironmentIds) > 0 { envIds := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.EnvironmentIds)), ","), "[]") @@ -255,26 +251,23 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithObject(request *I query = query + " AND c.id IN (" + clusterIds + ")" } - if len(request.AppName) > 0 { - query = query + " group by info.scan_object_meta_id, a.app_name, info.object_type, env.environment_name" - } else if len(request.ObjectName) > 0 { - query = query + " group by info.scan_object_meta_id, om.name, info.object_type, env.environment_name" - } - query = query + " order by id desc" + query = query + " GROUP BY info.scan_object_meta_id, a.app_name, info.object_type, env.environment_name" + + query += getOrderByQueryPart(request.SortBy, request.SortOrder) if size > 0 { - query = query + " limit " + strconv.Itoa(size) + " offset " + strconv.Itoa(offset) + "" + query = query + " LIMIT " + strconv.Itoa(size) + " OFFSET " + strconv.Itoa(offset) + "" } query = query + " ;" return query } -func (impl ImageScanDeployInfoRepositoryImpl) scanListingQueryBuilder(request *ImageScanFilter, size int, offset int, deployInfoIds []int) string { +func (impl ImageScanDeployInfoRepositoryImpl) scanListingQueryBuilder(request *securityBean.ImageScanFilter, size int, offset int, deployInfoIds []int) string { query := "" if request.AppName == "" && request.CVEName == "" && request.ObjectName == "" { query = impl.scanListQueryWithoutObject(request, size, offset, deployInfoIds) } else if len(request.CVEName) > 0 { query = impl.scanListQueryWithoutObject(request, size, offset, deployInfoIds) - } else if len(request.AppName) > 0 || len(request.ObjectName) > 0 { + } else if len(request.AppName) > 0 { query = impl.scanListQueryWithObject(request, size, offset, deployInfoIds) } diff --git a/internal/sql/repository/security/ImageScanResultRepository.go b/internal/sql/repository/security/ImageScanResultRepository.go index 3123a19f792..0b22493007b 100644 --- a/internal/sql/repository/security/ImageScanResultRepository.go +++ b/internal/sql/repository/security/ImageScanResultRepository.go @@ -30,6 +30,9 @@ type ImageScanExecutionResult struct { Package string `sql:"package"` Version string `sql:"version"` FixedVersion string `sql:"fixed_version"` + Target string `sql:"target"` + Type string `sql:"type"` + Class string `sql:"class"` CveStore CveStore ImageScanExecutionHistory ImageScanExecutionHistory } diff --git a/internal/sql/repository/security/bean/bean.go b/internal/sql/repository/security/bean/bean.go new file mode 100644 index 00000000000..851d7d02540 --- /dev/null +++ b/internal/sql/repository/security/bean/bean.go @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2024. Devtron Inc. + */ + +package bean + +const ( + HIGH string = "high" + CRITICAL string = "critical" + SAFE string = "safe" + LOW string = "low" + MEDIUM string = "medium" + MODERATE string = "moderate" + UNKNOWN string = "unknown" +) + +type PolicyAction int + +const ( + Inherit PolicyAction = iota + Allow + Block + Blockiffixed +) + +func (d PolicyAction) String() string { + return [...]string{"inherit", "allow", "block", "blockiffixed"}[d] +} + +// ------------------ +type Severity int + +const ( + Low Severity = iota + Medium + Critical + High + Safe + Unknown +) + +//// Handling for future use +//func (d Severity) ValuesOf(severity string) Severity { +// if severity == CRITICAL || severity == HIGH { +// return Critical +// } else if severity == MODERATE || severity == MEDIUM { +// return Medium +// } else if severity == LOW || severity == SAFE { +// return Low +// } +// return Low +//} + +// Updating it for future use(not in use for standard severity) +func (d Severity) String() string { + return [...]string{LOW, MEDIUM, CRITICAL, HIGH, SAFE, UNKNOWN}[d] +} + +// ---------------- +type PolicyLevel int + +const ( + Global PolicyLevel = iota + Cluster + Environment + Application +) + +func (d PolicyLevel) String() string { + return [...]string{"global", "cluster", "environment", "application"}[d] +} diff --git a/internal/util/ChartTemplateService.go b/internal/util/ChartTemplateService.go index fca15a15934..dcf025c608a 100644 --- a/internal/util/ChartTemplateService.go +++ b/internal/util/ChartTemplateService.go @@ -46,6 +46,10 @@ const ( CHART_WORKING_DIR_PATH = "/tmp/charts/" ) +const ( + PIPELINE_RELEASE_MODE_CREATE = "create" +) + type ChartCreateRequest struct { ChartMetaData *chart.Metadata ChartPath string diff --git a/pkg/bean/app.go b/pkg/bean/app.go index bfcc00ce1b6..bbab08513a4 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -621,6 +621,7 @@ type CDPipelineConfigObject struct { ChildPipelineId int `json:"childPipelineId"` IsDigestEnforcedForPipeline bool `json:"isDigestEnforcedForPipeline"` IsDigestEnforcedForEnv bool `json:"isDigestEnforcedForEnv"` + ReleaseMode string `json:"releaseMode" validate:"oneof=create"` } type CDPipelineAddType string diff --git a/pkg/bulkAction/BulkUpdateService.go b/pkg/bulkAction/BulkUpdateService.go index eb831e2dcea..695b91efb6b 100644 --- a/pkg/bulkAction/BulkUpdateService.go +++ b/pkg/bulkAction/BulkUpdateService.go @@ -1080,7 +1080,7 @@ func (impl BulkUpdateServiceImpl) buildHibernateUnHibernateRequestForHelmPipelin return nil, nil, err } var group, kind, version, name string - name = fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) + name = pipeline.DeploymentAppName if chartInfo.Name == bean3.RolloutChartType && chartInfo.UserUploaded == false { // rollout type chart group = "argoproj.io" diff --git a/pkg/cluster/repository/EnvironmentRepository.go b/pkg/cluster/repository/EnvironmentRepository.go index c1864ca2864..048aa83ae76 100644 --- a/pkg/cluster/repository/EnvironmentRepository.go +++ b/pkg/cluster/repository/EnvironmentRepository.go @@ -61,6 +61,7 @@ type EnvironmentRepository interface { FindById(id int) (*Environment, error) Update(mappings *Environment) error FindByName(name string) (*Environment, error) + FindIdByName(name string) (int, error) FindByIdentifier(identifier string) (*Environment, error) FindByNameOrIdentifier(name string, identifier string) (*Environment, error) FindByEnvNameOrIdentifierOrNamespace(clusterId int, envName string, identifier string, namespace string) (*Environment, error) @@ -159,6 +160,18 @@ func (repositoryImpl EnvironmentRepositoryImpl) FindByName(name string) (*Enviro return environment, err } +func (repositoryImpl EnvironmentRepositoryImpl) FindIdByName(name string) (int, error) { + environment := &Environment{} + err := repositoryImpl.dbConnection. + Model(environment). + Column("environment.id"). + Where("environment_name = ?", name). + Where("active = ?", true). + Limit(1). + Select() + return environment.Id, err +} + func (repositoryImpl EnvironmentRepositoryImpl) FindByIdentifier(identifier string) (*Environment, error) { environment := &Environment{} err := repositoryImpl.dbConnection. diff --git a/pkg/configDiff/DeploymentConfigurationService.go b/pkg/configDiff/DeploymentConfigurationService.go new file mode 100644 index 00000000000..360de7f8b35 --- /dev/null +++ b/pkg/configDiff/DeploymentConfigurationService.go @@ -0,0 +1,281 @@ +package configDiff + +import ( + "context" + "encoding/json" + repository2 "github.com/devtron-labs/devtron/internal/sql/repository" + appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/util" + chartService "github.com/devtron-labs/devtron/pkg/chart" + "github.com/devtron-labs/devtron/pkg/cluster/repository" + "github.com/devtron-labs/devtron/pkg/configDiff/adaptor" + bean2 "github.com/devtron-labs/devtron/pkg/configDiff/bean" + "github.com/devtron-labs/devtron/pkg/configDiff/helper" + "github.com/devtron-labs/devtron/pkg/configDiff/utils" + "github.com/devtron-labs/devtron/pkg/generateManifest" + "github.com/devtron-labs/devtron/pkg/pipeline" + "github.com/devtron-labs/devtron/pkg/pipeline/bean" + "go.uber.org/zap" + "net/http" + "strconv" +) + +type DeploymentConfigurationService interface { + ConfigAutoComplete(appId int, envId int) (*bean2.ConfigDataResponse, error) + GetAllConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams) (*bean2.DeploymentAndCmCsConfigDto, error) +} + +type DeploymentConfigurationServiceImpl struct { + logger *zap.SugaredLogger + configMapService pipeline.ConfigMapService + appRepository appRepository.AppRepository + environmentRepository repository.EnvironmentRepository + chartService chartService.ChartService + deploymentTemplateService generateManifest.DeploymentTemplateService +} + +func NewDeploymentConfigurationServiceImpl(logger *zap.SugaredLogger, + configMapService pipeline.ConfigMapService, + appRepository appRepository.AppRepository, + environmentRepository repository.EnvironmentRepository, + chartService chartService.ChartService, + deploymentTemplateService generateManifest.DeploymentTemplateService, +) (*DeploymentConfigurationServiceImpl, error) { + deploymentConfigurationService := &DeploymentConfigurationServiceImpl{ + logger: logger, + configMapService: configMapService, + appRepository: appRepository, + environmentRepository: environmentRepository, + chartService: chartService, + deploymentTemplateService: deploymentTemplateService, + } + + return deploymentConfigurationService, nil +} +func (impl *DeploymentConfigurationServiceImpl) ConfigAutoComplete(appId int, envId int) (*bean2.ConfigDataResponse, error) { + cMCSNamesAppLevel, cMCSNamesEnvLevel, err := impl.configMapService.FetchCmCsNamesAppAndEnvLevel(appId, envId) + if err != nil { + impl.logger.Errorw("error in fetching CM and CS names at app or env level", "appId", appId, "envId", envId, "err", err) + return nil, err + } + cmcsKeyPropertyAppLevelMap, cmcsKeyPropertyEnvLevelMap := adaptor.GetCmCsAppAndEnvLevelMap(cMCSNamesAppLevel, cMCSNamesEnvLevel) + for key, configProperty := range cmcsKeyPropertyAppLevelMap { + if _, ok := cmcsKeyPropertyEnvLevelMap[key]; !ok { + if envId > 0 { + configProperty.ConfigStage = bean2.Inheriting + } + + } + } + for key, configProperty := range cmcsKeyPropertyEnvLevelMap { + if _, ok := cmcsKeyPropertyAppLevelMap[key]; ok { + configProperty.ConfigStage = bean2.Overridden + } else { + configProperty.ConfigStage = bean2.Env + } + } + combinedProperties := helper.GetCombinedPropertiesMap(cmcsKeyPropertyAppLevelMap, cmcsKeyPropertyEnvLevelMap) + combinedProperties = append(combinedProperties, adaptor.GetConfigProperty(0, "", bean.DeploymentTemplate, bean2.PublishedConfigState)) + + configDataResp := bean2.NewConfigDataResponse().WithResourceConfig(combinedProperties) + return configDataResp, nil +} + +func (impl *DeploymentConfigurationServiceImpl) GetAllConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams) (*bean2.DeploymentAndCmCsConfigDto, error) { + if !configDataQueryParams.IsValidConfigType() { + return nil, &util.ApiError{HttpStatusCode: http.StatusBadRequest, Code: strconv.Itoa(http.StatusBadRequest), InternalMessage: bean2.InvalidConfigTypeErr, UserMessage: bean2.InvalidConfigTypeErr} + } + var err error + var envId int + var appId int + if configDataQueryParams.IsEnvNameProvided() { + envId, err = impl.environmentRepository.FindIdByName(configDataQueryParams.EnvName) + if err != nil { + impl.logger.Errorw("GetAllConfigData, error in getting environment model by envName", "envName", configDataQueryParams.EnvName, "err", err) + return nil, err + } + } + appId, err = impl.appRepository.FindAppIdByName(configDataQueryParams.AppName) + if err != nil { + impl.logger.Errorw("GetAllConfigData, error in getting app model by appName", "appName", configDataQueryParams.AppName, "err", err) + return nil, err + } + + configDataDto := &bean2.DeploymentAndCmCsConfigDto{} + switch configDataQueryParams.ConfigType { + default: // keeping default as PublishedOnly + configDataDto, err = impl.getPublishedConfigData(ctx, configDataQueryParams, appId, envId) + if err != nil { + impl.logger.Errorw("GetAllConfigData, error in config data for PublishedOnly", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + } + return configDataDto, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getCmCsEditDataForPublishedOnly(configDataQueryParams *bean2.ConfigDataQueryParams, envId, appId int) (*bean2.DeploymentAndCmCsConfigDto, error) { + configDataDto := &bean2.DeploymentAndCmCsConfigDto{} + + var resourceType bean.ResourceType + var fetchConfigFunc func(string, int, int, int) (*bean.ConfigDataRequest, error) + + if configDataQueryParams.IsResourceTypeSecret() { + //handles for single resource when resource type is secret and for a given resource name + resourceType = bean.CS + fetchConfigFunc = impl.getSecretConfigResponse + } else if configDataQueryParams.IsResourceTypeConfigMap() { + //handles for single resource when resource type is configMap and for a given resource name + resourceType = bean.CM + fetchConfigFunc = impl.getConfigMapResponse + } + cmcsConfigData, err := fetchConfigFunc(configDataQueryParams.ResourceName, configDataQueryParams.ResourceId, envId, appId) + if err != nil { + impl.logger.Errorw("getCmCsEditDataForPublishedOnly, error in getting config response", "resourceName", configDataQueryParams.ResourceName, "envName", configDataQueryParams.EnvName, "err", err) + return nil, err + } + + respJson, err := utils.ConvertToJsonRawMessage(cmcsConfigData) + if err != nil { + impl.logger.Errorw("getCmCsEditDataForPublishedOnly, error in converting to json raw message", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + + cmCsConfig := bean2.NewDeploymentAndCmCsConfig().WithConfigData(respJson).WithResourceType(resourceType) + if resourceType == bean.CS { + configDataDto.WithSecretData(cmCsConfig) + } else if resourceType == bean.CM { + configDataDto.WithConfigMapData(cmCsConfig) + } + return configDataDto, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getCmCsPublishedConfigResponse(envId, appId int) (*bean2.DeploymentAndCmCsConfigDto, error) { + + configDataDto := &bean2.DeploymentAndCmCsConfigDto{} + secretData, err := impl.getSecretConfigResponse("", 0, envId, appId) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in getting secret config response by appId and envId", "appId", appId, "envId", envId, "err", err) + return nil, err + } + + //iterate on secret configData and then and set draft data from draftResourcesMap if same resourceName found do the same for configMap below + cmData, err := impl.getConfigMapResponse("", 0, envId, appId) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in getting config map by appId and envId", "appId", appId, "envId", envId, "err", err) + return nil, err + } + + secretRespJson, err := utils.ConvertToJsonRawMessage(secretData) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in converting secret data to json raw message", "appId", appId, "envId", envId, "err", err) + return nil, err + } + + cmRespJson, err := utils.ConvertToJsonRawMessage(cmData) + if err != nil { + impl.logger.Errorw("getCmCsPublishedConfigResponse, error in converting config map data to json raw message", "appId", appId, "envId", envId, "err", err) + return nil, err + } + + cmConfigData := bean2.NewDeploymentAndCmCsConfig().WithConfigData(cmRespJson).WithResourceType(bean.CM) + secretConfigData := bean2.NewDeploymentAndCmCsConfig().WithConfigData(secretRespJson).WithResourceType(bean.CS) + + configDataDto.WithConfigMapData(cmConfigData).WithSecretData(secretConfigData) + return configDataDto, nil + +} + +func (impl *DeploymentConfigurationServiceImpl) getPublishedDeploymentConfig(ctx context.Context, appId, envId int) (json.RawMessage, error) { + if envId > 0 { + return impl.getDeploymentTemplateForEnvLevel(ctx, appId, envId) + } + return impl.getBaseDeploymentTemplate(appId) +} + +func (impl *DeploymentConfigurationServiceImpl) getPublishedConfigData(ctx context.Context, configDataQueryParams *bean2.ConfigDataQueryParams, + appId, envId int) (*bean2.DeploymentAndCmCsConfigDto, error) { + + if configDataQueryParams.IsRequestMadeForOneResource() { + return impl.getCmCsEditDataForPublishedOnly(configDataQueryParams, envId, appId) + } + //ConfigMapsData and SecretsData are populated here + configData, err := impl.getCmCsPublishedConfigResponse(envId, appId) + if err != nil { + impl.logger.Errorw("getPublishedConfigData, error in getting cm cs for PublishedOnly state", "appName", configDataQueryParams.AppName, "envName", configDataQueryParams.EnvName, "err", err) + return nil, err + } + deploymentTemplateJsonData, err := impl.getPublishedDeploymentConfig(ctx, appId, envId) + if err != nil { + impl.logger.Errorw("getPublishedConfigData, error in getting publishedOnly deployment config ", "configDataQueryParams", configDataQueryParams, "err", err) + return nil, err + } + deploymentConfig := bean2.NewDeploymentAndCmCsConfig().WithConfigData(deploymentTemplateJsonData).WithResourceType(bean.DeploymentTemplate) + + configData.WithDeploymentTemplateData(deploymentConfig) + return configData, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getBaseDeploymentTemplate(appId int) (json.RawMessage, error) { + deploymentTemplateData, err := impl.chartService.FindLatestChartForAppByAppId(appId) + if err != nil { + impl.logger.Errorw("error in getting base deployment template for appId", "appId", appId, "err", err) + return nil, err + } + return deploymentTemplateData.DefaultAppOverride, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getDeploymentTemplateForEnvLevel(ctx context.Context, appId, envId int) (json.RawMessage, error) { + deploymentTemplateRequest := generateManifest.DeploymentTemplateRequest{ + AppId: appId, + EnvId: envId, + RequestDataMode: generateManifest.Values, + Type: repository2.PublishedOnEnvironments, + } + deploymentTemplateResponse, err := impl.deploymentTemplateService.GetDeploymentTemplate(ctx, deploymentTemplateRequest) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in getting deployment template for ", "deploymentTemplateRequest", deploymentTemplateRequest, "err", err) + return nil, err + } + deploymentJson := json.RawMessage{} + err = deploymentJson.UnmarshalJSON([]byte(deploymentTemplateResponse.Data)) + if err != nil { + impl.logger.Errorw("getDeploymentTemplateForEnvLevel, error in unmarshalling string deploymentTemplateResponse data into json Raw message", "data", deploymentTemplateResponse.Data, "err", err) + return nil, err + } + return deploymentJson, nil +} + +func (impl *DeploymentConfigurationServiceImpl) getDeploymentConfig(ctx context.Context, appId, envId int) (json.RawMessage, error) { + if envId > 0 { + return impl.getDeploymentTemplateForEnvLevel(ctx, appId, envId) + } + return impl.getBaseDeploymentTemplate(appId) +} + +func (impl *DeploymentConfigurationServiceImpl) getSecretConfigResponse(resourceName string, resourceId, envId, appId int) (*bean.ConfigDataRequest, error) { + if len(resourceName) > 0 { + if envId > 0 { + return impl.configMapService.CSEnvironmentFetchForEdit(resourceName, resourceId, appId, envId) + } + return impl.configMapService.ConfigGlobalFetchEditUsingAppId(resourceName, appId, bean.CS) + } + + if envId > 0 { + return impl.configMapService.CSEnvironmentFetch(appId, envId) + } + return impl.configMapService.CSGlobalFetch(appId) +} + +func (impl *DeploymentConfigurationServiceImpl) getConfigMapResponse(resourceName string, resourceId, envId, appId int) (*bean.ConfigDataRequest, error) { + if len(resourceName) > 0 { + if envId > 0 { + return impl.configMapService.CMEnvironmentFetchForEdit(resourceName, resourceId, appId, envId) + } + return impl.configMapService.ConfigGlobalFetchEditUsingAppId(resourceName, appId, bean.CM) + } + + if envId > 0 { + return impl.configMapService.CMEnvironmentFetch(appId, envId) + } + return impl.configMapService.CMGlobalFetch(appId) +} diff --git a/pkg/configDiff/adaptor/adaptor.go b/pkg/configDiff/adaptor/adaptor.go new file mode 100644 index 00000000000..4ab81eb2d11 --- /dev/null +++ b/pkg/configDiff/adaptor/adaptor.go @@ -0,0 +1,29 @@ +package adaptor + +import ( + bean2 "github.com/devtron-labs/devtron/pkg/configDiff/bean" + "github.com/devtron-labs/devtron/pkg/pipeline/bean" +) + +func GetConfigProperty(id int, name string, configType bean.ResourceType, State bean2.ConfigState) *bean2.ConfigProperty { + return &bean2.ConfigProperty{ + Id: id, + Name: name, + Type: configType, + ConfigState: State, + } +} + +func GetCmCsAppAndEnvLevelMap(cMCSNamesAppLevel, cMCSNamesEnvLevel []bean.ConfigNameAndType) (map[string]*bean2.ConfigProperty, map[string]*bean2.ConfigProperty) { + cMCSNamesAppLevelMap, cMCSNamesEnvLevelMap := make(map[string]*bean2.ConfigProperty, len(cMCSNamesAppLevel)), make(map[string]*bean2.ConfigProperty, len(cMCSNamesEnvLevel)) + + for _, cmcs := range cMCSNamesAppLevel { + property := GetConfigProperty(cmcs.Id, cmcs.Name, cmcs.Type, bean2.PublishedConfigState) + cMCSNamesAppLevelMap[property.GetKey()] = property + } + for _, cmcs := range cMCSNamesEnvLevel { + property := GetConfigProperty(cmcs.Id, cmcs.Name, cmcs.Type, bean2.PublishedConfigState) + cMCSNamesEnvLevelMap[property.GetKey()] = property + } + return cMCSNamesAppLevelMap, cMCSNamesEnvLevelMap +} diff --git a/pkg/configDiff/bean/bean.go b/pkg/configDiff/bean/bean.go new file mode 100644 index 00000000000..2113ea81a65 --- /dev/null +++ b/pkg/configDiff/bean/bean.go @@ -0,0 +1,152 @@ +package bean + +import "C" +import ( + "encoding/json" + "fmt" + "github.com/devtron-labs/devtron/pkg/pipeline/bean" +) + +type ConfigState string + +const ( + PublishedConfigState ConfigState = "PublishedOnly" +) + +func (r ConfigState) ToString() string { + return string(r) +} + +type ConfigStage string + +const ( + Env ConfigStage = "Env" + Inheriting ConfigStage = "Inheriting" + Overridden ConfigStage = "Overridden" +) + +type ConfigProperty struct { + Id int `json:"id"` + Name string `json:"name"` + ConfigState ConfigState `json:"configState"` + Type bean.ResourceType `json:"type"` + ConfigStage ConfigStage `json:"configStage"` +} + +func NewConfigProperty() *ConfigProperty { + return &ConfigProperty{} +} + +func (r *ConfigProperty) IsConfigPropertyGlobal() bool { + return r.ConfigStage == Inheriting +} + +type ConfigDataResponse struct { + ResourceConfig []*ConfigProperty `json:"resourceConfig"` +} + +func NewConfigDataResponse() *ConfigDataResponse { + return &ConfigDataResponse{} +} + +func (r *ConfigDataResponse) WithResourceConfig(resourceConfig []*ConfigProperty) *ConfigDataResponse { + r.ResourceConfig = resourceConfig + return r +} + +func (r *ConfigProperty) GetKey() string { + return fmt.Sprintf("%s-%s", string(r.Type), r.Name) +} + +type ConfigPropertyIdentifier struct { + Name string `json:"name"` + Type bean.ResourceType `json:"type"` +} + +func (r *ConfigProperty) GetIdentifier() ConfigPropertyIdentifier { + return ConfigPropertyIdentifier{ + Name: r.Name, + Type: r.Type, + } +} + +type DeploymentAndCmCsConfig struct { + ResourceType bean.ResourceType `json:"resourceType"` + Data json.RawMessage `json:"data"` +} + +func NewDeploymentAndCmCsConfig() *DeploymentAndCmCsConfig { + return &DeploymentAndCmCsConfig{} +} + +func (r *DeploymentAndCmCsConfig) WithResourceType(resourceType bean.ResourceType) *DeploymentAndCmCsConfig { + r.ResourceType = resourceType + return r +} + +func (r *DeploymentAndCmCsConfig) WithConfigData(data json.RawMessage) *DeploymentAndCmCsConfig { + r.Data = data + return r +} + +type DeploymentAndCmCsConfigDto struct { + DeploymentTemplate *DeploymentAndCmCsConfig `json:"deploymentTemplate"` + ConfigMapsData *DeploymentAndCmCsConfig `json:"configMapData"` + SecretsData *DeploymentAndCmCsConfig `json:"secretsData"` + IsAppAdmin bool `json:"isAppAdmin"` +} + +func NewDeploymentAndCmCsConfigDto() *DeploymentAndCmCsConfigDto { + return &DeploymentAndCmCsConfigDto{} +} + +func (r *DeploymentAndCmCsConfigDto) WithDeploymentTemplateData(data *DeploymentAndCmCsConfig) *DeploymentAndCmCsConfigDto { + r.DeploymentTemplate = data + return r +} +func (r *DeploymentAndCmCsConfigDto) WithConfigMapData(data *DeploymentAndCmCsConfig) *DeploymentAndCmCsConfigDto { + r.ConfigMapsData = data + return r +} +func (r *DeploymentAndCmCsConfigDto) WithSecretData(data *DeploymentAndCmCsConfig) *DeploymentAndCmCsConfigDto { + r.SecretsData = data + return r +} + +type ConfigDataQueryParams struct { + AppName string `schema:"appName"` + EnvName string `schema:"envName"` + ConfigType string `schema:"configType"` + IdentifierId int `schema:"identifierId"` + PipelineId int `schema:"pipelineId"` // req for fetching previous deployments data + ResourceName string `schema:"resourceName"` + ResourceType string `schema:"resourceType"` + ResourceId int `schema:"resourceId"` + UserId int32 `schema:"-"` +} + +// FilterCriteria []string `schema:"filterCriteria"` +// OffSet int `schema:"offSet"` +// Limit int `schema:"limit"` +func (r *ConfigDataQueryParams) IsResourceTypeSecret() bool { + return r.ResourceType == bean.CS.ToString() +} + +func (r *ConfigDataQueryParams) IsResourceTypeConfigMap() bool { + return r.ResourceType == bean.CM.ToString() +} + +func (r *ConfigDataQueryParams) IsEnvNameProvided() bool { + return len(r.EnvName) > 0 +} +func (r *ConfigDataQueryParams) IsValidConfigType() bool { + return r.ConfigType == PublishedConfigState.ToString() +} + +func (r *ConfigDataQueryParams) IsRequestMadeForOneResource() bool { + return len(r.ResourceName) > 0 && len(r.ResourceType) > 0 +} + +const ( + InvalidConfigTypeErr = "invalid config type provided, please send a valid config type" +) diff --git a/pkg/configDiff/helper/helper.go b/pkg/configDiff/helper/helper.go new file mode 100644 index 00000000000..70082a7bea6 --- /dev/null +++ b/pkg/configDiff/helper/helper.go @@ -0,0 +1,20 @@ +package helper + +import ( + bean2 "github.com/devtron-labs/devtron/pkg/configDiff/bean" +) + +func GetCombinedPropertiesMap(cmcsKeyPropertyAppLevelMap, cmcsKeyPropertyEnvLevelMap map[string]*bean2.ConfigProperty) []*bean2.ConfigProperty { + combinedPropertiesMap := make(map[string]*bean2.ConfigProperty, len(cmcsKeyPropertyAppLevelMap)+len(cmcsKeyPropertyEnvLevelMap)) + for key, property := range cmcsKeyPropertyAppLevelMap { + combinedPropertiesMap[key] = property + } + for key, property := range cmcsKeyPropertyEnvLevelMap { + combinedPropertiesMap[key] = property + } + combinedProperties := make([]*bean2.ConfigProperty, 0, len(cmcsKeyPropertyAppLevelMap)+len(cmcsKeyPropertyEnvLevelMap)) + for _, property := range combinedPropertiesMap { + combinedProperties = append(combinedProperties, property) + } + return combinedProperties +} diff --git a/pkg/configDiff/utils/utils.go b/pkg/configDiff/utils/utils.go new file mode 100644 index 00000000000..8185993775f --- /dev/null +++ b/pkg/configDiff/utils/utils.go @@ -0,0 +1,16 @@ +package utils + +import "encoding/json" + +func ConvertToJsonRawMessage(request interface{}) (json.RawMessage, error) { + var r json.RawMessage + configMapByte, err := json.Marshal(request) + if err != nil { + return nil, err + } + err = r.UnmarshalJSON(configMapByte) + if err != nil { + return nil, err + } + return r, nil +} diff --git a/pkg/deployment/common/adapter.go b/pkg/deployment/common/adapter.go index a6f4618e5a0..9d58afaa20a 100644 --- a/pkg/deployment/common/adapter.go +++ b/pkg/deployment/common/adapter.go @@ -14,6 +14,7 @@ func ConvertDeploymentConfigDTOToDbObj(config *bean.DeploymentConfig) *deploymen ConfigType: config.ConfigType, RepoUrl: config.RepoURL, Active: config.Active, + ReleaseMode: config.ReleaseMode, } } @@ -26,5 +27,6 @@ func ConvertDeploymentConfigDbObjToDTO(dbObj *deploymentConfig.DeploymentConfig) ConfigType: dbObj.ConfigType, RepoURL: dbObj.RepoUrl, Active: dbObj.Active, + ReleaseMode: dbObj.ReleaseMode, } } diff --git a/pkg/deployment/common/bean/bean.go b/pkg/deployment/common/bean/bean.go index 8817d3cb45d..e057c249981 100644 --- a/pkg/deployment/common/bean/bean.go +++ b/pkg/deployment/common/bean/bean.go @@ -14,6 +14,7 @@ type DeploymentConfig struct { DeploymentAppType string RepoURL string RepoName string + ReleaseMode string Active bool } diff --git a/pkg/deployment/common/deploymentConfigService.go b/pkg/deployment/common/deploymentConfigService.go index 003ad389ea9..4c553b17600 100644 --- a/pkg/deployment/common/deploymentConfigService.go +++ b/pkg/deployment/common/deploymentConfigService.go @@ -6,6 +6,7 @@ import ( appRepository "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/deploymentConfig" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" + util2 "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" bean3 "github.com/devtron-labs/devtron/pkg/auth/user/bean" chartRepoRepository "github.com/devtron-labs/devtron/pkg/chartRepo/repository" @@ -101,6 +102,20 @@ func (impl *DeploymentConfigServiceImpl) GetConfigForDevtronApps(appId, envId in impl.logger.Errorw("error in parsing config from charts and pipeline repository", "appId", appId, "envId", envId, "err", err) return nil, err } + if envId > 0 { + // add columns added after migration (of deployment app type and repo url) here + appAndEnvLevelConfig, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting deployment config db object by appId and envId", "appId", appId, "envId", envId, "err", err) + return nil, err + } + if err == pg.ErrNoRows { + // deployment config is not done + configFromOldData.ReleaseMode = util2.PIPELINE_RELEASE_MODE_CREATE + } else { + configFromOldData.ReleaseMode = appAndEnvLevelConfig.ReleaseMode + } + } return configFromOldData, nil } @@ -190,6 +205,9 @@ func (impl *DeploymentConfigServiceImpl) GetAndMigrateConfigIfAbsentForDevtronAp impl.logger.Errorw("error in parsing config from charts and pipeline repository", "appId", appId, "envId", envId, "err", err) return nil, err } + if envId > 0 { + configFromOldData.ReleaseMode = envLevelConfig.ReleaseMode + } return configFromOldData, nil } @@ -256,6 +274,7 @@ func (impl *DeploymentConfigServiceImpl) parseEnvLevelConfigForDevtronApps(appLe EnvironmentId: envId, ConfigType: appLevelConfig.ConfigType, RepoUrl: appLevelConfig.RepoUrl, + ReleaseMode: util2.PIPELINE_RELEASE_MODE_CREATE, //for migration it is always equal to create as migration is happening for old cd pipelines Active: true, } diff --git a/pkg/deployment/manifest/ManifestCreationService.go b/pkg/deployment/manifest/ManifestCreationService.go index 9b2cc199274..cbb37243bb7 100644 --- a/pkg/deployment/manifest/ManifestCreationService.go +++ b/pkg/deployment/manifest/ManifestCreationService.go @@ -265,7 +265,7 @@ func (impl *ManifestCreationServiceImpl) GetValuesOverrideForTrigger(overrideReq appLabelJsonByte = nil } mergedValues, err := impl.mergeOverrideValues(envOverride, releaseOverrideJson, configMapJson.MergedJson, appLabelJsonByte, strategy) - appName := fmt.Sprintf("%s-%s", overrideRequest.AppName, envOverride.Environment.Name) + appName := pipeline.DeploymentAppName var k8sErr error mergedValues, k8sErr = impl.updatedExternalCmCsHashForTrigger(newCtx, overrideRequest.ClusterId, envOverride.Namespace, mergedValues, configMapJson.ExternalCmList, configMapJson.ExternalCsList) diff --git a/pkg/deployment/trigger/devtronApps/TriggerService.go b/pkg/deployment/trigger/devtronApps/TriggerService.go index c5f83a8f071..dbcff74daf2 100644 --- a/pkg/deployment/trigger/devtronApps/TriggerService.go +++ b/pkg/deployment/trigger/devtronApps/TriggerService.go @@ -956,20 +956,6 @@ func (impl *TriggerServiceImpl) buildManifestPushTemplate(overrideRequest *bean3 return manifestPushTemplate, err } -// getAcdAppGitOpsRepoName returns the GitOps repository name, configured for the argoCd app -func (impl *TriggerServiceImpl) getAcdAppGitOpsRepoName(appName string, environmentName string) (string, error) { - //this method should only call in case of argo-integration and gitops configured - acdToken, err := impl.argoUserService.GetLatestDevtronArgoCdUserToken() - if err != nil { - impl.logger.Errorw("error in getting acd token", "err", err) - return "", err - } - ctx := context.Background() - ctx = context.WithValue(ctx, "token", acdToken) - acdAppName := fmt.Sprintf("%s-%s", appName, environmentName) - return impl.argoClientWrapperService.GetGitOpsRepoName(ctx, acdAppName) -} - func (impl *TriggerServiceImpl) getManifestPushService(triggerEvent bean.TriggerEvent) publish.ManifestPushService { var manifestPushService publish.ManifestPushService if triggerEvent.ManifestStorageType == bean2.ManifestStorageGit { diff --git a/pkg/generateManifest/DeploymentTemplateService.go b/pkg/generateManifest/DeploymentTemplateService.go index 446d3867bc6..d4a7a2270d0 100644 --- a/pkg/generateManifest/DeploymentTemplateService.go +++ b/pkg/generateManifest/DeploymentTemplateService.go @@ -501,7 +501,18 @@ func (impl DeploymentTemplateServiceImpl) GetRestartWorkloadData(ctx context.Con impl.Logger.Errorw("error in fetching environment", "envId", envId, "err", err) return nil, err } - installReleaseRequests, err := impl.constructInstallReleaseBulkReq(apps, environment) + + pipelineMap := make(map[string]*pipelineConfig.Pipeline) + pipelines, err := impl.pipelineRepository.FindActiveByInFilter(envId, appIds) + if err != nil { + impl.Logger.Errorw("error in getting pipelines by appIds and envId", "appIds", appIds, "envId", envId, "err", err) + return nil, err + } + for _, p := range pipelines { + pipelineMap[fmt.Sprintf("%d-%d", p.AppId, p.EnvironmentId)] = p + } + + installReleaseRequests, err := impl.constructInstallReleaseBulkReq(apps, environment, pipelineMap) if err != nil { impl.Logger.Errorw("error in fetching installReleaseRequests", "appIds", appIds, "envId", envId, "err", err) return nil, err diff --git a/pkg/generateManifest/helper.go b/pkg/generateManifest/helper.go index 912ab62c80e..c8a4fc7b4ec 100644 --- a/pkg/generateManifest/helper.go +++ b/pkg/generateManifest/helper.go @@ -23,6 +23,7 @@ import ( "github.com/devtron-labs/devtron/api/helm-app/bean" "github.com/devtron-labs/devtron/api/helm-app/gRPC" "github.com/devtron-labs/devtron/internal/sql/repository/app" + "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/cluster/repository" "go.opentelemetry.io/otel" "golang.org/x/exp/maps" @@ -67,7 +68,7 @@ func (impl DeploymentTemplateServiceImpl) constructRotatePodResponse(templateCha return podResp, nil } -func (impl DeploymentTemplateServiceImpl) constructInstallReleaseBulkReq(apps []*app.App, environment *repository.Environment) ([]*gRPC.InstallReleaseRequest, error) { +func (impl DeploymentTemplateServiceImpl) constructInstallReleaseBulkReq(apps []*app.App, environment *repository.Environment, pipelineMap map[string]*pipelineConfig.Pipeline) ([]*gRPC.InstallReleaseRequest, error) { appIdToInstallReleaseRequest := make(map[int]*gRPC.InstallReleaseRequest) installReleaseRequests := make([]*gRPC.InstallReleaseRequest, 0) var applicationIds []int @@ -104,9 +105,10 @@ func (impl DeploymentTemplateServiceImpl) constructInstallReleaseBulkReq(apps [] impl.Logger.Errorw("error in fetching cluster detail", "clusterId", 1, "err", err) return nil, err } + for _, app := range apps { if _, ok := appIdToInstallReleaseRequest[app.Id]; ok { - appIdToInstallReleaseRequest[app.Id].ReleaseIdentifier = impl.getReleaseIdentifier(config, app, environment) + appIdToInstallReleaseRequest[app.Id].ReleaseIdentifier = impl.getReleaseIdentifier(config, app, environment, pipelineMap) appIdToInstallReleaseRequest[app.Id].K8SVersion = k8sServerVersion.String() } } @@ -140,10 +142,11 @@ func (impl DeploymentTemplateServiceImpl) setChartContent(ctx context.Context, i return err } -func (impl DeploymentTemplateServiceImpl) getReleaseIdentifier(config *gRPC.ClusterConfig, app *app.App, env *repository.Environment) *gRPC.ReleaseIdentifier { +func (impl DeploymentTemplateServiceImpl) getReleaseIdentifier(config *gRPC.ClusterConfig, app *app.App, env *repository.Environment, pipelineMap map[string]*pipelineConfig.Pipeline) *gRPC.ReleaseIdentifier { + pipeline := pipelineMap[fmt.Sprintf("%d-%d", app.Id, env.Id)] return &gRPC.ReleaseIdentifier{ ClusterConfig: config, - ReleaseName: fmt.Sprintf("%s-%s", app.AppName, env.Name), + ReleaseName: pipeline.DeploymentAppName, ReleaseNamespace: env.Namespace, } } diff --git a/pkg/pipeline/AppDeploymentTypeChangeManager.go b/pkg/pipeline/AppDeploymentTypeChangeManager.go index d78c59e60ac..a6130a23576 100644 --- a/pkg/pipeline/AppDeploymentTypeChangeManager.go +++ b/pkg/pipeline/AppDeploymentTypeChangeManager.go @@ -242,7 +242,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) ChangePipelineDeploymentType(ctx TriggeredPipelines: make([]*bean.CdPipelineTrigger, 0), } - var deleteDeploymentType bean3.DeploymentType + var deleteDeploymentType string if request.DesiredDeploymentType == bean3.ArgoCd { deleteDeploymentType = bean3.Helm @@ -251,12 +251,12 @@ func (impl *AppDeploymentTypeChangeManagerImpl) ChangePipelineDeploymentType(ctx } pipelines, err := impl.pipelineRepository.FindActiveByEnvIdAndDeploymentType(request.EnvId, - string(deleteDeploymentType), request.ExcludeApps, request.IncludeApps) + deleteDeploymentType, request.ExcludeApps, request.IncludeApps) if err != nil { impl.logger.Errorw("Error fetching cd pipelines", "environmentId", request.EnvId, - "currentDeploymentAppType", string(deleteDeploymentType), + "currentDeploymentAppTypes", deleteDeploymentType, "err", err) return response, err } @@ -334,7 +334,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) TriggerDeploymentAfterTypeChange var err error cdPipelines, err := impl.pipelineRepository.FindActiveByEnvIdAndDeploymentType(request.EnvId, - string(request.DesiredDeploymentType), request.ExcludeApps, request.IncludeApps) + request.DesiredDeploymentType, request.ExcludeApps, request.IncludeApps) if err != nil { impl.logger.Errorw("Error fetching cd pipelines", @@ -475,11 +475,10 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context continue } - deploymentAppName := fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) // delete request var err error if envDeploymentConfig.DeploymentAppType == bean3.ArgoCd { - err = impl.deleteArgoCdApp(ctx, pipeline, deploymentAppName, true) + err = impl.deleteArgoCdApp(ctx, pipeline, pipeline.DeploymentAppName, true) } else { @@ -552,7 +551,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context } if err != nil { impl.logger.Errorw("error registering app on ACD with error: "+err.Error(), - "deploymentAppName", deploymentAppName, + "deploymentAppName", pipeline.DeploymentAppName, "envId", pipeline.EnvironmentId, "appId", pipeline.AppId, "err", err) @@ -568,7 +567,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentApps(ctx context if err != nil { impl.logger.Errorw("error deleting app on "+envDeploymentConfig.DeploymentAppType, - "deployment app name", deploymentAppName, + "deployment app name", pipeline.DeploymentAppName, "err", err) // deletion failed, append to the list of failed pipelines @@ -597,7 +596,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) DeleteDeploymentAppsForEnvironme // fetch active pipelines from database for the given environment id and current deployment app type pipelines, err := impl.pipelineRepository.FindActiveByEnvIdAndDeploymentType(environmentId, - string(currentDeploymentAppType), exclusionList, includeApps) + currentDeploymentAppType, exclusionList, includeApps) deploymentConfigs := make([]*bean4.DeploymentConfig, 0) for _, p := range pipelines { @@ -728,7 +727,6 @@ func (impl *AppDeploymentTypeChangeManagerImpl) fetchDeletedApp(ctx context.Cont if err != nil { impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", pipeline.AppId, "envId", pipeline.EnvironmentId, "err", err) } - deploymentAppName := fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) if envDeploymentConfig.DeploymentAppType == bean3.ArgoCd { appIdentifier := &helmBean.AppIdentifier{ ClusterId: pipeline.Environment.ClusterId, @@ -738,12 +736,12 @@ func (impl *AppDeploymentTypeChangeManagerImpl) fetchDeletedApp(ctx context.Cont _, err = impl.helmAppService.GetApplicationDetail(ctx, appIdentifier) } else { req := &application.ApplicationQuery{ - Name: &deploymentAppName, + Name: &pipeline.DeploymentAppName, } _, err = impl.application.Get(ctx, req) } if err != nil { - impl.logger.Errorw("error in getting application detail", "err", err, "deploymentAppName", deploymentAppName) + impl.logger.Errorw("error in getting application detail", "err", err, "deploymentAppName", pipeline.DeploymentAppName) } if err != nil && checkAppReleaseNotExist(err) { diff --git a/pkg/pipeline/ConfigMapService.go b/pkg/pipeline/ConfigMapService.go index 49c08772a15..906aaf31933 100644 --- a/pkg/pipeline/ConfigMapService.go +++ b/pkg/pipeline/ConfigMapService.go @@ -58,6 +58,7 @@ type ConfigMapService interface { CMEnvironmentFetch(appId int, envId int) (*bean.ConfigDataRequest, error) CMGlobalFetchForEdit(name string, id int) (*bean.ConfigDataRequest, error) CMEnvironmentFetchForEdit(name string, id int, appId int, envId int) (*bean.ConfigDataRequest, error) + ConfigGlobalFetchEditUsingAppId(name string, appId int, resourceType bean.ResourceType) (*bean.ConfigDataRequest, error) CSGlobalAddUpdate(configMapRequest *bean.ConfigDataRequest) (*bean.ConfigDataRequest, error) CSGlobalFetch(appId int) (*bean.ConfigDataRequest, error) @@ -83,6 +84,8 @@ type ConfigMapService interface { ConfigSecretEnvironmentDelete(createJobEnvOverrideRequest *bean.CreateJobEnvOverridePayload) (*bean.CreateJobEnvOverridePayload, error) ConfigSecretEnvironmentGet(appId int) ([]bean.JobEnvOverrideResponse, error) ConfigSecretEnvironmentClone(appId int, cloneAppId int, userId int32) ([]chartConfig.ConfigMapEnvModel, error) + + FetchCmCsNamesAppAndEnvLevel(appId int, envId int) ([]bean.ConfigNameAndType, []bean.ConfigNameAndType, error) } type ConfigMapServiceImpl struct { @@ -126,6 +129,17 @@ func NewConfigMapServiceImpl(chartRepository chartRepoRepository.ChartRepository } } +func (impl ConfigMapServiceImpl) checkIfConfigDataAlreadyExist(appId int) (int, error) { + config, err := impl.configMapRepository.GetByAppIdAppLevel(appId) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error while checking if config data exist from db by appId", "appId", appId, "error", err) + return 0, err + } else if util.IsErrNoRows(err) { + return 0, nil + } + return config.Id, nil +} + func (impl ConfigMapServiceImpl) CMGlobalAddUpdate(configMapRequest *bean.ConfigDataRequest) (*bean.ConfigDataRequest, error) { if len(configMapRequest.ConfigData) != 1 { return nil, fmt.Errorf("invalid request multiple config found for add or update") @@ -137,6 +151,14 @@ func (impl ConfigMapServiceImpl) CMGlobalAddUpdate(configMapRequest *bean.Config return configMapRequest, err } var model *chartConfig.ConfigMapAppModel + requestId, err := impl.checkIfConfigDataAlreadyExist(configMapRequest.AppId) + if err != nil { + impl.logger.Errorw("error in checking if config map data already exists or not for appId", "appId", configMapRequest.AppId, "error", err) + return configMapRequest, err + } + if requestId > 0 { + configMapRequest.Id = requestId + } if configMapRequest.Id > 0 { model, err = impl.configMapRepository.GetByIdAppLevel(configMapRequest.Id) if err != nil { @@ -487,8 +509,6 @@ func (impl ConfigMapServiceImpl) CMEnvironmentFetch(appId int, envId int) (*bean if configDataRequest.ConfigData == nil { list := []*bean.ConfigData{} configDataRequest.ConfigData = list - } else { - //configDataRequest.ConfigData = configMapGlobalList.ConfigData } return configDataRequest, nil @@ -524,6 +544,14 @@ func (impl ConfigMapServiceImpl) CSGlobalAddUpdate(configMapRequest *bean.Config impl.logger.Errorw("error in validating", "error", err) return configMapRequest, err } + requestId, err := impl.checkIfConfigDataAlreadyExist(configMapRequest.AppId) + if err != nil { + impl.logger.Errorw("error in checking if config secret data already exists or not for appId", "appId", configMapRequest.AppId, "error", err) + return configMapRequest, err + } + if requestId > 0 { + configMapRequest.Id = requestId + } var model *chartConfig.ConfigMapAppModel if configMapRequest.Id > 0 { model, err = impl.configMapRepository.GetByIdAppLevel(configMapRequest.Id) @@ -640,68 +668,15 @@ func (impl ConfigMapServiceImpl) CSGlobalFetch(appId int) (*bean.ConfigDataReque configDataRequest := &bean.ConfigDataRequest{} configDataRequest.Id = configMapGlobal.Id configDataRequest.AppId = appId - //configDataRequest.ConfigData = configMapGlobalList.ConfigData for _, item := range configMapGlobalList.ConfigData { item.Global = true configDataRequest.ConfigData = append(configDataRequest.ConfigData, item) } - //removing actual values - var configs []*bean.ConfigData - for _, item := range configDataRequest.ConfigData { - resultMap := make(map[string]string) - resultMapFinal := make(map[string]string) - - if item.Data != nil { - err = json.Unmarshal(item.Data, &resultMap) - if err != nil { - impl.logger.Warnw("unmarshal failed: ", "error", err) - configs = append(configs, item) - continue - } - for k := range resultMap { - resultMapFinal[k] = "" - } - resultByte, err := json.Marshal(resultMapFinal) - if err != nil { - impl.logger.Errorw("error while marshaling request ", "err", err) - return nil, err - } - item.Data = resultByte - } - - var externalSecret []bean.ExternalSecret - if item.ExternalSecret != nil && len(item.ExternalSecret) > 0 { - for _, es := range item.ExternalSecret { - externalSecret = append(externalSecret, bean.ExternalSecret{Key: es.Key, Name: es.Name, Property: es.Property, IsBinary: es.IsBinary}) - } - } - item.ExternalSecret = externalSecret - - var esoData []bean.ESOData - if len(item.ESOSecretData.EsoData) > 0 { - for _, data := range item.ESOSecretData.EsoData { - esoData = append(esoData, bean.ESOData{Key: data.Key, SecretKey: data.SecretKey, Property: data.Property}) - } - } - - esoSecretData := bean.ESOSecretData{ - SecretStore: item.ESOSecretData.SecretStore, - SecretStoreRef: item.ESOSecretData.SecretStoreRef, - EsoData: esoData, - RefreshInterval: item.ESOSecretData.RefreshInterval, - } - item.ESOSecretData = esoSecretData - configs = append(configs, item) - } - configDataRequest.ConfigData = configs - if configDataRequest.ConfigData == nil { list := []*bean.ConfigData{} configDataRequest.ConfigData = list - } else { - //configDataRequest.ConfigData = configMapGlobalList.ConfigData } return configDataRequest, nil @@ -927,93 +902,6 @@ func (impl ConfigMapServiceImpl) CSEnvironmentFetch(appId int, envId int) (*bean } } - //removing actual values - var configs []*bean.ConfigData - for _, item := range configDataRequest.ConfigData { - - if item.Data != nil { - resultMap := make(map[string]string) - resultMapFinal := make(map[string]string) - err = json.Unmarshal(item.Data, &resultMap) - if err != nil { - impl.logger.Warnw("unmarshal failed: ", "error", err) - //item.Data = []byte("[]") - configs = append(configs, item) - continue - //return nil, err - } - for k := range resultMap { - resultMapFinal[k] = "" - } - var resultByte []byte - if resultMapFinal != nil && len(resultMapFinal) > 0 { - resultByte, err = json.Marshal(resultMapFinal) - if err != nil { - impl.logger.Errorw("error while marshaling request ", "err", err) - return nil, err - } - } - item.Data = resultByte - } - - var externalSecret []bean.ExternalSecret - if item.ExternalSecret != nil && len(item.ExternalSecret) > 0 { - for _, es := range item.ExternalSecret { - externalSecret = append(externalSecret, bean.ExternalSecret{Key: es.Key, Name: es.Name, Property: es.Property, IsBinary: es.IsBinary}) - } - } - item.ExternalSecret = externalSecret - - var esoData []bean.ESOData - if len(item.ESOSecretData.EsoData) > 0 { - for _, data := range item.ESOSecretData.EsoData { - esoData = append(esoData, bean.ESOData{Key: data.Key, SecretKey: data.SecretKey, Property: data.Property}) - } - } - - esoSecretData := bean.ESOSecretData{ - SecretStore: item.ESOSecretData.SecretStore, - SecretStoreRef: item.ESOSecretData.SecretStoreRef, - EsoData: esoData, - RefreshInterval: item.ESOSecretData.RefreshInterval, - } - item.ESOSecretData = esoSecretData - - if item.DefaultData != nil { - resultMap := make(map[string]string) - resultMapFinal := make(map[string]string) - err = json.Unmarshal(item.DefaultData, &resultMap) - if err != nil { - impl.logger.Warnw("unmarshal failed: ", "error", err) - //item.Data = []byte("[]") - configs = append(configs, item) - continue - //return nil, err - } - for k := range resultMap { - resultMapFinal[k] = "" - } - resultByte, err := json.Marshal(resultMapFinal) - if err != nil { - impl.logger.Errorw("error while marshaling request ", "err", err) - return nil, err - } - item.DefaultData = resultByte - } - - if item.DefaultExternalSecret != nil { - var externalSecret []bean.ExternalSecret - if item.DefaultExternalSecret != nil && len(item.DefaultExternalSecret) > 0 { - for _, es := range item.DefaultExternalSecret { - externalSecret = append(externalSecret, bean.ExternalSecret{Key: es.Key, Name: es.Name, Property: es.Property, IsBinary: es.IsBinary}) - } - } - item.DefaultExternalSecret = externalSecret - } - configs = append(configs, item) - } - configDataRequest.ConfigData = configs - if configDataRequest.ConfigData == nil { list := []*bean.ConfigData{} configDataRequest.ConfigData = list @@ -2019,3 +1907,42 @@ func (impl ConfigMapServiceImpl) ConfigSecretEnvironmentClone(appId int, cloneAp return jobEnvOverrideResponse, nil } +func (impl ConfigMapServiceImpl) FetchCmCsNamesAppAndEnvLevel(appId int, envId int) ([]bean.ConfigNameAndType, []bean.ConfigNameAndType, error) { + var cMCSNamesEnvLevel []bean.ConfigNameAndType + + cMCSNamesAppLevel, err := impl.configMapRepository.GetConfigNamesForAppAndEnvLevel(appId, -1) + if err != nil { + impl.logger.Errorw("error in fetching CM/CS names at app level ", "appId", appId, "err", err) + return nil, nil, err + } + if envId > 0 { + cMCSNamesEnvLevel, err = impl.configMapRepository.GetConfigNamesForAppAndEnvLevel(appId, envId) + if err != nil { + impl.logger.Errorw("error in fetching CM/CS names at env level ", "appId", appId, "envId", envId, "err", err) + return nil, nil, err + } + } + return cMCSNamesAppLevel, cMCSNamesEnvLevel, nil +} + +func (impl ConfigMapServiceImpl) ConfigGlobalFetchEditUsingAppId(name string, appId int, resourceType bean.ResourceType) (*bean.ConfigDataRequest, error) { + var fetchGlobalConfigFunc func(int) (*bean.ConfigDataRequest, error) + if resourceType == bean.CS { + fetchGlobalConfigFunc = impl.CSGlobalFetch + } else if resourceType == bean.CM { + fetchGlobalConfigFunc = impl.CMGlobalFetch + } + configDataRequest, err := fetchGlobalConfigFunc(appId) + if err != nil { + impl.logger.Errorw("error in fetching global cm using app id ", "cmName", name, "appId", appId, "err", err) + return nil, err + } + configs := make([]*bean.ConfigData, 0, len(configDataRequest.ConfigData)) + for _, configData := range configDataRequest.ConfigData { + if configData.Name == name { + configs = append(configs, configData) + } + } + configDataRequest.ConfigData = configs + return configDataRequest, nil +} diff --git a/pkg/pipeline/DeploymentPipelineConfigService.go b/pkg/pipeline/DeploymentPipelineConfigService.go index c622597c15d..c6aebef5cbb 100644 --- a/pkg/pipeline/DeploymentPipelineConfigService.go +++ b/pkg/pipeline/DeploymentPipelineConfigService.go @@ -465,6 +465,7 @@ func (impl *CdPipelineConfigServiceImpl) CreateCdPipelines(pipelineCreateRequest DeploymentAppType: pipeline.DeploymentAppType, RepoURL: AppDeploymentConfig.RepoURL, RepoName: AppDeploymentConfig.RepoName, + ReleaseMode: pipeline.ReleaseMode, Active: true, } envDeploymentConfig, err := impl.deploymentConfigService.CreateOrUpdateConfig(nil, envDeploymentConfig, pipelineCreateRequest.UserId) @@ -870,7 +871,7 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipeline(pipeline *pipelineConf } //delete app from argo cd, if created if pipeline.DeploymentAppCreated == true { - deploymentAppName := fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) + deploymentAppName := pipeline.DeploymentAppName if util.IsAcdApp(envDeploymentConfig.DeploymentAppType) { if !forceDelete && !deleteResponse.ClusterReachable { impl.logger.Errorw("cluster connection error", "err", clusterBean.ErrorInConnecting) @@ -927,7 +928,7 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipeline(pipeline *pipelineConf } func (impl *CdPipelineConfigServiceImpl) DeleteHelmTypePipelineDeploymentApp(ctx context.Context, forceDelete bool, pipeline *pipelineConfig.Pipeline) error { - deploymentAppName := fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) + deploymentAppName := pipeline.DeploymentAppName appIdentifier := &helmBean.AppIdentifier{ ClusterId: pipeline.Environment.ClusterId, ReleaseName: deploymentAppName, @@ -964,8 +965,7 @@ func (impl *CdPipelineConfigServiceImpl) DeleteACDAppCdPipelineWithNonCascade(pi } //delete app from argo cd with non-cascade, if created if pipeline.DeploymentAppCreated && util.IsAcdApp(envDeploymentConfig.DeploymentAppType) { - appDetails, err := impl.appRepo.FindById(pipeline.AppId) - deploymentAppName := fmt.Sprintf("%s-%s", appDetails.AppName, pipeline.Environment.Name) + deploymentAppName := pipeline.DeploymentAppName impl.logger.Debugw("acd app is already deleted for this pipeline", "pipeline", pipeline) cascadeDelete := false req := &application2.ApplicationDeleteRequest{ @@ -2061,7 +2061,7 @@ func (impl *CdPipelineConfigServiceImpl) DeleteCdPipelinePartial(pipeline *pipel impl.logger.Errorw("error in fetching environment deployment config by appId and envId", "appId", pipeline.AppId, "envId", pipeline.EnvironmentId, "err", err) return deleteResponse, err } - deploymentAppName := fmt.Sprintf("%s-%s", pipeline.App.AppName, pipeline.Environment.Name) + deploymentAppName := pipeline.DeploymentAppName if util.IsAcdApp(envDeploymentConfig.DeploymentAppType) { if !forceDelete && !deleteResponse.ClusterReachable { impl.logger.Errorw("cluster connection error", "err", clusterBean.ErrorInConnecting) diff --git a/pkg/pipeline/bean/ConfigMapBean.go b/pkg/pipeline/bean/ConfigMapBean.go index cae2f33f4a8..2f572bd6058 100644 --- a/pkg/pipeline/bean/ConfigMapBean.go +++ b/pkg/pipeline/bean/ConfigMapBean.go @@ -107,3 +107,21 @@ type CreateJobEnvOverridePayload struct { type SecretsList struct { ConfigData []*ConfigData `json:"secrets"` } + +type ConfigNameAndType struct { + Id int + Name string + Type ResourceType +} + +type ResourceType string + +const ( + CM ResourceType = "ConfigMap" + CS ResourceType = "Secret" + DeploymentTemplate ResourceType = "Deployment Template" +) + +func (r ResourceType) ToString() string { + return string(r) +} diff --git a/pkg/plugin/bean/bean.go b/pkg/plugin/bean/bean.go index 9e152272e37..55424f3caac 100644 --- a/pkg/plugin/bean/bean.go +++ b/pkg/plugin/bean/bean.go @@ -171,6 +171,7 @@ func (r *PluginsVersionDetail) SetMinimalPluginsVersionDetail(pluginVersionMetad r.Description = pluginVersionMetadata.Description r.Version = pluginVersionMetadata.PluginVersion r.IsLatest = pluginVersionMetadata.IsLatest + r.DocLink = pluginVersionMetadata.DocLink return r } diff --git a/pkg/security/ImageScanService.go b/pkg/security/ImageScanService.go index 99ec7267798..31211a07013 100644 --- a/pkg/security/ImageScanService.go +++ b/pkg/security/ImageScanService.go @@ -18,8 +18,10 @@ package security import ( "context" + securityBean "github.com/devtron-labs/devtron/internal/sql/repository/security/bean" "github.com/devtron-labs/devtron/pkg/cluster/repository/bean" bean2 "github.com/devtron-labs/devtron/pkg/deployment/trigger/devtronApps/bean" + bean3 "github.com/devtron-labs/devtron/pkg/security/bean" "go.opentelemetry.io/otel" "time" @@ -37,10 +39,10 @@ import ( ) type ImageScanService interface { - FetchAllDeployInfo(request *ImageScanRequest) ([]*security.ImageScanDeployInfo, error) - FetchScanExecutionListing(request *ImageScanRequest, ids []int) (*ImageScanHistoryListingResponse, error) - FetchExecutionDetailResult(request *ImageScanRequest) (*ImageScanExecutionDetail, error) - FetchMinScanResultByAppIdAndEnvId(request *ImageScanRequest) (*ImageScanExecutionDetail, error) + FetchAllDeployInfo(request *bean3.ImageScanRequest) ([]*security.ImageScanDeployInfo, error) + FetchScanExecutionListing(request *bean3.ImageScanRequest, ids []int) (*bean3.ImageScanHistoryListingResponse, error) + FetchExecutionDetailResult(request *bean3.ImageScanRequest) (*bean3.ImageScanExecutionDetail, error) + FetchMinScanResultByAppIdAndEnvId(request *bean3.ImageScanRequest) (*bean3.ImageScanExecutionDetail, error) VulnerabilityExposure(request *security.VulnerabilityRequest) (*security.VulnerabilityExposureListingResponse, error) GetArtifactVulnerabilityStatus(ctx context.Context, request *bean2.VulnerabilityCheckRequest) (bool, error) } @@ -65,70 +67,6 @@ type ImageScanServiceImpl struct { cvePolicyRepository security.CvePolicyRepository } -type ImageScanRequest struct { - ScanExecutionId int `json:"ScanExecutionId"` - ImageScanDeployInfoId int `json:"imageScanDeployInfo"` - AppId int `json:"appId"` - EnvId int `json:"envId"` - ObjectId int `json:"objectId"` - ArtifactId int `json:"artifactId"` - Image string `json:"image"` - security.ImageScanFilter -} - -type ImageScanHistoryListingResponse struct { - Offset int `json:"offset"` - Size int `json:"size"` - Total int `json:"total"` - ImageScanHistoryResponse []*ImageScanHistoryResponse `json:"scanList"` -} - -type ImageScanHistoryResponse struct { - ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` - AppId int `json:"appId"` - EnvId int `json:"envId"` - Name string `json:"name"` - Type string `json:"type"` - Environment string `json:"environment"` - LastChecked *time.Time `json:"lastChecked"` - Image string `json:"image,omitempty"` - SeverityCount *SeverityCount `json:"severityCount,omitempty"` -} - -type ImageScanExecutionDetail struct { - ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` - AppId int `json:"appId,omitempty"` - EnvId int `json:"envId,omitempty"` - AppName string `json:"appName,omitempty"` - EnvName string `json:"envName,omitempty"` - ArtifactId int `json:"artifactId,omitempty"` - Image string `json:"image,omitempty"` - PodName string `json:"podName,omitempty"` - ReplicaSet string `json:"replicaSet,omitempty"` - Vulnerabilities []*Vulnerabilities `json:"vulnerabilities,omitempty"` - SeverityCount *SeverityCount `json:"severityCount,omitempty"` - ExecutionTime time.Time `json:"executionTime,omitempty"` - ScanEnabled bool `json:"scanEnabled,notnull"` - Scanned bool `json:"scanned,notnull"` - ObjectType string `json:"objectType,notnull"` - ScanToolId int `json:"scanToolId,omitempty""` -} - -type Vulnerabilities struct { - CVEName string `json:"cveName"` - Severity string `json:"severity"` - Package string `json:"package,omitempty"` - CVersion string `json:"currentVersion"` - FVersion string `json:"fixedVersion"` - Permission string `json:"permission"` -} - -type SeverityCount struct { - High int `json:"high"` - Moderate int `json:"moderate"` - Low int `json:"low"` -} - func NewImageScanServiceImpl(Logger *zap.SugaredLogger, scanHistoryRepository security.ImageScanHistoryRepository, scanResultRepository security.ImageScanResultRepository, scanObjectMetaRepository security.ImageScanObjectMetaRepository, cveStoreRepository security.CveStoreRepository, imageScanDeployInfoRepository security.ImageScanDeployInfoRepository, @@ -154,7 +92,7 @@ func NewImageScanServiceImpl(Logger *zap.SugaredLogger, scanHistoryRepository se } } -func (impl ImageScanServiceImpl) FetchAllDeployInfo(request *ImageScanRequest) ([]*security.ImageScanDeployInfo, error) { +func (impl ImageScanServiceImpl) FetchAllDeployInfo(request *bean3.ImageScanRequest) ([]*security.ImageScanDeployInfo, error) { deployedList, err := impl.imageScanDeployInfoRepository.FindAll() if err != nil { impl.Logger.Errorw("error while fetching scan execution result", "err", err) @@ -163,28 +101,22 @@ func (impl ImageScanServiceImpl) FetchAllDeployInfo(request *ImageScanRequest) ( return deployedList, nil } -func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *ImageScanRequest, deployInfoIds []int) (*ImageScanHistoryListingResponse, error) { - size := request.Size - request.Size = 0 - groupByListCount, err := impl.imageScanDeployInfoRepository.ScanListingWithFilter(&request.ImageScanFilter, request.Size, request.Offset, deployInfoIds) - if err != nil { - impl.Logger.Errorw("error while fetching scan execution result", "err", err) - return nil, err - } - request.Size = size +func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *bean3.ImageScanRequest, deployInfoIds []int) (*bean3.ImageScanHistoryListingResponse, error) { groupByList, err := impl.imageScanDeployInfoRepository.ScanListingWithFilter(&request.ImageScanFilter, request.Size, request.Offset, deployInfoIds) if err != nil { impl.Logger.Errorw("error while fetching scan execution result", "err", err) return nil, err } var ids []int + totalCount := 0 for _, item := range groupByList { + totalCount = item.TotalCount ids = append(ids, item.Id) } if len(ids) == 0 { impl.Logger.Debugw("no image scan deploy info exists", "err", err) - responseList := make([]*ImageScanHistoryResponse, 0) - return &ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, nil + responseList := make([]*bean3.ImageScanHistoryResponse, 0) + return &bean3.ImageScanHistoryListingResponse{ImageScanHistoryResponse: responseList}, nil } deployedList, err := impl.imageScanDeployInfoRepository.FindByIds(ids) if err != nil { @@ -206,14 +138,15 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *ImageScanReq impl.Logger.Errorw("error encountered in FetchScanExecutionListing", "err", err) } - var finalResponseList []*ImageScanHistoryResponse + var finalResponseList []*bean3.ImageScanHistoryResponse for _, item := range groupByList { - imageScanHistoryResponse := &ImageScanHistoryResponse{} + imageScanHistoryResponse := &bean3.ImageScanHistoryResponse{} var lastChecked time.Time + criticalCount := 0 highCount := 0 moderateCount := 0 - lowCount := 0 + lowCount, unkownCount := 0, 0 imageScanDeployInfo := groupByListMap[item.Id] if imageScanDeployInfo != nil { scanResultList, err := impl.scanResultRepository.FetchByScanExecutionIds(imageScanDeployInfo.ImageScanExecutionHistoryId) @@ -228,13 +161,7 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *ImageScanReq for _, item := range scanResultList { lastChecked = item.ImageScanExecutionHistory.ExecutionTime - if item.CveStore.Severity == security.Critical { - highCount = highCount + 1 - } else if item.CveStore.Severity == security.Medium { - moderateCount = moderateCount + 1 - } else if item.CveStore.Severity == security.Low { - lowCount = lowCount + 1 - } + criticalCount, highCount, moderateCount, lowCount, unkownCount = impl.updateCount(item.CveStore.GetSeverity(), criticalCount, highCount, moderateCount, lowCount, unkownCount) } // updating in case when no vul are found (no results) if lastChecked.IsZero() && len(imageScanDeployInfo.ImageScanExecutionHistoryId) > 0 && mapOfExecutionHistoryIdVsLastExecTime != nil { @@ -245,10 +172,12 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *ImageScanReq } } } - severityCount := &SeverityCount{ + severityCount := &bean3.SeverityCount{ + Critical: criticalCount, High: highCount, - Moderate: moderateCount, + Medium: moderateCount, Low: lowCount, + Unknown: unkownCount, } imageScanHistoryResponse.ImageScanDeployInfoId = item.Id if imageScanDeployInfo != nil { @@ -292,11 +221,11 @@ func (impl ImageScanServiceImpl) FetchScanExecutionListing(request *ImageScanReq finalResponseList = append(finalResponseList, imageScanHistoryResponse) } - finalResponse := &ImageScanHistoryListingResponse{ + finalResponse := &bean3.ImageScanHistoryListingResponse{ Offset: request.Offset, Size: request.Size, ImageScanHistoryResponse: finalResponseList, - Total: len(groupByListCount), + Total: totalCount, } /* @@ -323,11 +252,11 @@ func (impl ImageScanServiceImpl) fetchImageExecutionHistoryMapByIds(historyIds [ return mapOfExecutionHistoryIdVsExecutionTime, nil } -func (impl ImageScanServiceImpl) FetchExecutionDetailResult(request *ImageScanRequest) (*ImageScanExecutionDetail, error) { +func (impl ImageScanServiceImpl) FetchExecutionDetailResult(request *bean3.ImageScanRequest) (*bean3.ImageScanExecutionDetail, error) { //var scanExecution *security.ImageScanExecutionHistory var scanExecutionIds []int var executionTime time.Time - imageScanResponse := &ImageScanExecutionDetail{} + imageScanResponse := &bean3.ImageScanExecutionDetail{} isRegularApp := false if request.ImageScanDeployInfoId > 0 { // scan detail for deployed images @@ -383,8 +312,8 @@ func (impl ImageScanServiceImpl) FetchExecutionDetailResult(request *ImageScanRe imageScanResponse.ObjectType = security.ScanObjectType_APP } - var vulnerabilities []*Vulnerabilities - var highCount, moderateCount, lowCount int + var vulnerabilities []*bean3.Vulnerabilities + var criticalCount, highCount, moderateCount, lowCount, unkownCount int var cveStores []*security.CveStore imageDigests := make(map[string]string) if len(scanExecutionIds) > 0 { @@ -396,12 +325,15 @@ func (impl ImageScanServiceImpl) FetchExecutionDetailResult(request *ImageScanRe } for _, item := range imageScanResult { - vulnerability := &Vulnerabilities{ + vulnerability := &bean3.Vulnerabilities{ CVEName: item.CveStore.Name, CVersion: item.CveStore.Version, FVersion: item.FixedVersion, Package: item.CveStore.Package, - Severity: item.CveStore.Severity.String(), + Severity: item.CveStore.GetSeverity().String(), + Target: item.Target, + Type: item.Type, + Class: item.Class, //Permission: "BLOCK", TODO } // data already migrated hence get package, version and fixedVersion from image_scan_execution_result @@ -412,13 +344,7 @@ func (impl ImageScanServiceImpl) FetchExecutionDetailResult(request *ImageScanRe if len(item.Version) > 0 { vulnerability.CVersion = item.Version } - if item.CveStore.Severity == security.Critical { - highCount = highCount + 1 - } else if item.CveStore.Severity == security.Medium { - moderateCount = moderateCount + 1 - } else if item.CveStore.Severity == security.Low { - lowCount = lowCount + 1 - } + criticalCount, highCount, moderateCount, lowCount, unkownCount = impl.updateCount(item.CveStore.GetSeverity(), criticalCount, highCount, moderateCount, lowCount, unkownCount) vulnerabilities = append(vulnerabilities, vulnerability) cveStores = append(cveStores, &item.CveStore) if _, ok := imageDigests[item.ImageScanExecutionHistory.ImageHash]; !ok { @@ -439,14 +365,15 @@ func (impl ImageScanServiceImpl) FetchExecutionDetailResult(request *ImageScanRe } } } - severityCount := &SeverityCount{ + severityCount := &bean3.SeverityCount{ + Critical: criticalCount, High: highCount, - Moderate: moderateCount, + Medium: moderateCount, Low: lowCount, } imageScanResponse.ImageScanDeployInfoId = request.ImageScanDeployInfoId if len(vulnerabilities) == 0 { - vulnerabilities = make([]*Vulnerabilities, 0) + vulnerabilities = make([]*bean3.Vulnerabilities, 0) } imageScanResponse.Vulnerabilities = vulnerabilities imageScanResponse.SeverityCount = severityCount @@ -490,31 +417,31 @@ func (impl ImageScanServiceImpl) FetchExecutionDetailResult(request *ImageScanRe if blockCveList != nil { vulnerabilityPermissionMap := make(map[string]string) for _, cve := range blockCveList { - vulnerabilityPermissionMap[cve.Name] = "BLOCK" + vulnerabilityPermissionMap[cve.Name] = bean3.BLOCK } - var updatedVulnerabilities []*Vulnerabilities + var updatedVulnerabilities []*bean3.Vulnerabilities for _, vulnerability := range imageScanResponse.Vulnerabilities { if _, ok := vulnerabilityPermissionMap[vulnerability.CVEName]; ok { - vulnerability.Permission = "BLOCK" + vulnerability.Permission = bean3.BLOCK } else { - vulnerability.Permission = "WHITELISTED" + vulnerability.Permission = bean3.WHITELISTED } updatedVulnerabilities = append(updatedVulnerabilities, vulnerability) } if len(updatedVulnerabilities) == 0 { - updatedVulnerabilities = make([]*Vulnerabilities, 0) + updatedVulnerabilities = make([]*bean3.Vulnerabilities, 0) } imageScanResponse.Vulnerabilities = updatedVulnerabilities } else { for _, vulnerability := range imageScanResponse.Vulnerabilities { - vulnerability.Permission = "WHITELISTED" + vulnerability.Permission = bean3.WHITELISTED } } } return imageScanResponse, nil } -func (impl ImageScanServiceImpl) FetchMinScanResultByAppIdAndEnvId(request *ImageScanRequest) (*ImageScanExecutionDetail, error) { +func (impl *ImageScanServiceImpl) FetchMinScanResultByAppIdAndEnvId(request *bean3.ImageScanRequest) (*bean3.ImageScanExecutionDetail, error) { //var scanExecution *security.ImageScanExecutionHistory var scanExecutionIds []int var executionTime time.Time @@ -531,7 +458,7 @@ func (impl ImageScanServiceImpl) FetchMinScanResultByAppIdAndEnvId(request *Imag } scanExecutionIds = append(scanExecutionIds, scanDeployInfo.ImageScanExecutionHistoryId...) - var highCount, moderateCount, lowCount, scantoolId int + var criticalCount, highCount, moderateCount, lowCount, unkownCount, scantoolId int if len(scanExecutionIds) > 0 { imageScanResult, err := impl.scanResultRepository.FetchByScanExecutionIds(scanExecutionIds) if err != nil { @@ -540,13 +467,7 @@ func (impl ImageScanServiceImpl) FetchMinScanResultByAppIdAndEnvId(request *Imag } for _, item := range imageScanResult { executionTime = item.ImageScanExecutionHistory.ExecutionTime - if item.CveStore.Severity == security.Critical { - highCount = highCount + 1 - } else if item.CveStore.Severity == security.Medium { - moderateCount = moderateCount + 1 - } else if item.CveStore.Severity == security.Low { - lowCount = lowCount + 1 - } + criticalCount, highCount, moderateCount, lowCount, unkownCount = impl.updateCount(item.CveStore.GetSeverity(), criticalCount, highCount, moderateCount, lowCount, unkownCount) } if len(imageScanResult) > 0 { scantoolId = imageScanResult[0].ScanToolId @@ -559,12 +480,14 @@ func (impl ImageScanServiceImpl) FetchMinScanResultByAppIdAndEnvId(request *Imag scantoolId = toolIdFromExecutionHistory } } - severityCount := &SeverityCount{ + severityCount := &bean3.SeverityCount{ + Critical: criticalCount, High: highCount, - Moderate: moderateCount, + Medium: moderateCount, Low: lowCount, + Unknown: unkownCount, } - imageScanResponse := &ImageScanExecutionDetail{ + imageScanResponse := &bean3.ImageScanExecutionDetail{ ImageScanDeployInfoId: scanDeployInfo.Id, SeverityCount: severityCount, ExecutionTime: executionTime, @@ -575,7 +498,8 @@ func (impl ImageScanServiceImpl) FetchMinScanResultByAppIdAndEnvId(request *Imag } return imageScanResponse, nil } -func (impl ImageScanServiceImpl) getScanToolIdFromExecutionHistory(scanExecutionIds []int) (int, error) { + +func (impl *ImageScanServiceImpl) getScanToolIdFromExecutionHistory(scanExecutionIds []int) (int, error) { scanToolHistoryMappings, err := impl.scanToolExecutionHistoryMappingRepository.GetAllScanHistoriesByExecutionHistoryIds(scanExecutionIds) if err != nil { if err == pg.ErrNoRows { @@ -591,7 +515,7 @@ func (impl ImageScanServiceImpl) getScanToolIdFromExecutionHistory(scanExecution return -1, err } -func (impl ImageScanServiceImpl) VulnerabilityExposure(request *security.VulnerabilityRequest) (*security.VulnerabilityExposureListingResponse, error) { +func (impl *ImageScanServiceImpl) VulnerabilityExposure(request *security.VulnerabilityRequest) (*security.VulnerabilityExposureListingResponse, error) { vulnerabilityExposureListingResponse := &security.VulnerabilityExposureListingResponse{ Offset: request.Offset, Size: request.Size, @@ -654,7 +578,25 @@ func (impl ImageScanServiceImpl) VulnerabilityExposure(request *security.Vulnera return vulnerabilityExposureListingResponse, nil } -func (impl ImageScanServiceImpl) GetArtifactVulnerabilityStatus(ctx context.Context, request *bean2.VulnerabilityCheckRequest) (bool, error) { +func (impl *ImageScanServiceImpl) CalculateSeverityCountInfo(vulnerabilities []*bean3.Vulnerabilities) *bean3.SeverityCount { + diff := bean3.SeverityCount{} + for _, vulnerability := range vulnerabilities { + if vulnerability.IsCritical() { + diff.Critical += 1 + } else if vulnerability.IsHigh() { + diff.High += 1 + } else if vulnerability.IsMedium() { + diff.Medium += 1 + } else if vulnerability.IsLow() { + diff.Low += 1 + } else if vulnerability.IsUnknown() { + diff.Unknown += 1 + } + } + return &diff +} + +func (impl *ImageScanServiceImpl) GetArtifactVulnerabilityStatus(ctx context.Context, request *bean2.VulnerabilityCheckRequest) (bool, error) { isVulnerable := false if len(request.ImageDigest) > 0 { var cveStores []*security.CveStore @@ -689,3 +631,18 @@ func (impl ImageScanServiceImpl) GetArtifactVulnerabilityStatus(ctx context.Cont } return isVulnerable, nil } + +func (impl ImageScanServiceImpl) updateCount(severity securityBean.Severity, criticalCount int, highCount int, moderateCount int, lowCount int, unkownCount int) (int, int, int, int, int) { + if severity == securityBean.Critical { + criticalCount += 1 + } else if severity == securityBean.High { + highCount = highCount + 1 + } else if severity == securityBean.Medium { + moderateCount = moderateCount + 1 + } else if severity == securityBean.Low { + lowCount = lowCount + 1 + } else if severity == securityBean.Unknown { + unkownCount += 1 + } + return criticalCount, highCount, moderateCount, lowCount, unkownCount +} diff --git a/pkg/security/bean/bean.go b/pkg/security/bean/bean.go new file mode 100644 index 00000000000..0744d544cd2 --- /dev/null +++ b/pkg/security/bean/bean.go @@ -0,0 +1,122 @@ +package bean + +import ( + "github.com/devtron-labs/devtron/internal/sql/repository/security/bean" + "time" +) + +type SortBy string +type SortOrder string + +const ( + Asc SortOrder = "ASC" + Desc SortOrder = "DESC" +) + +const ( + BLOCK string = "BLOCK" + WHITELISTED = "WHITELISTED" +) + +type Vulnerabilities struct { + CVEName string `json:"cveName"` + Severity string `json:"severity"` + Package string `json:"package,omitempty"` + CVersion string `json:"currentVersion"` + FVersion string `json:"fixedVersion"` + Permission string `json:"permission"` + Target string `json:"target"` + Class string `json:"class"` + Type string `json:"type"` +} + +func (vul *Vulnerabilities) IsCritical() bool { + return vul.Severity == bean.CRITICAL +} + +func (vul *Vulnerabilities) IsHigh() bool { + return vul.Severity == bean.HIGH +} + +func (vul *Vulnerabilities) IsMedium() bool { + return vul.Severity == bean.MODERATE || vul.Severity == bean.MEDIUM +} + +func (vul *Vulnerabilities) IsLow() bool { + return vul.Severity == bean.LOW +} + +func (vul *Vulnerabilities) IsUnknown() bool { + return vul.Severity == bean.UNKNOWN +} + +type SeverityCount struct { + Critical int `json:"critical"` + High int `json:"high"` + Medium int `json:"medium"` + Low int `json:"low"` + Unknown int `json:"unknown"` +} + +type ImageScanFilter struct { + Offset int `json:"offset"` + Size int `json:"size"` + CVEName string `json:"cveName"` + AppName string `json:"appName"` + // ObjectName deprecated + ObjectName string `json:"objectName"` + EnvironmentIds []int `json:"envIds"` + ClusterIds []int `json:"clusterIds"` + Severity []int `json:"severity"` + SortOrder SortOrder `json:"sortOrder"` + SortBy SortBy `json:"sortBy"` // sort by objectName,envName,lastChecked +} + +type ImageScanRequest struct { + ScanExecutionId int `json:"ScanExecutionId"` + ImageScanDeployInfoId int `json:"imageScanDeployInfo"` + AppId int `json:"appId"` + EnvId int `json:"envId"` + ObjectId int `json:"objectId"` + ArtifactId int `json:"artifactId"` + Image string `json:"image"` + ImageScanFilter +} + +type ImageScanHistoryListingResponse struct { + Offset int `json:"offset"` + Size int `json:"size"` + Total int `json:"total"` + ImageScanHistoryResponse []*ImageScanHistoryResponse `json:"scanList"` +} + +type ImageScanHistoryResponse struct { + ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` + AppId int `json:"appId"` + EnvId int `json:"envId"` + Name string `json:"name"` + Type string `json:"type"` + Environment string `json:"environment"` + LastChecked *time.Time `json:"lastChecked"` + Image string `json:"image,omitempty"` + SeverityCount *SeverityCount `json:"severityCount,omitempty"` +} + +type ImageScanExecutionDetail struct { + ImageScanDeployInfoId int `json:"imageScanDeployInfoId"` + AppId int `json:"appId,omitempty"` + EnvId int `json:"envId,omitempty"` + AppName string `json:"appName,omitempty"` + EnvName string `json:"envName,omitempty"` + ArtifactId int `json:"artifactId,omitempty"` + Image string `json:"image,omitempty"` + PodName string `json:"podName,omitempty"` + ReplicaSet string `json:"replicaSet,omitempty"` + Vulnerabilities []*Vulnerabilities `json:"vulnerabilities,omitempty"` + SeverityCount *SeverityCount `json:"severityCount,omitempty"` + ExecutionTime time.Time `json:"executionTime,omitempty"` + ScanEnabled bool `json:"scanEnabled,notnull"` + Scanned bool `json:"scanned,notnull"` + ObjectType string `json:"objectType,notnull"` + ScanToolId int `json:"scanToolId,omitempty""` +} diff --git a/pkg/security/policyService.go b/pkg/security/policyService.go index 0a99fda54db..e00fa0eacfa 100644 --- a/pkg/security/policyService.go +++ b/pkg/security/policyService.go @@ -22,6 +22,7 @@ import ( "fmt" repository1 "github.com/devtron-labs/devtron/internal/sql/repository/app" "github.com/devtron-labs/devtron/internal/sql/repository/helper" + securityBean "github.com/devtron-labs/devtron/internal/sql/repository/security/bean" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/sql" "net/http" @@ -42,12 +43,12 @@ type PolicyService interface { SavePolicy(request bean.CreateVulnerabilityPolicyRequest, userId int32) (*bean.IdVulnerabilityPolicyResult, error) UpdatePolicy(updatePolicyParams bean.UpdatePolicyParams, userId int32) (*bean.IdVulnerabilityPolicyResult, error) DeletePolicy(id int, userId int32) (*bean.IdVulnerabilityPolicyResult, error) - GetPolicies(policyLevel security.PolicyLevel, clusterId, environmentId, appId int) (*bean.GetVulnerabilityPolicyResult, error) + GetPolicies(policyLevel securityBean.PolicyLevel, clusterId, environmentId, appId int) (*bean.GetVulnerabilityPolicyResult, error) GetBlockedCVEList(cves []*security.CveStore, clusterId, envId, appId int, isAppstore bool) ([]*security.CveStore, error) VerifyImage(verifyImageRequest *VerifyImageRequest) (map[string][]*VerifyImageResponse, error) GetCvePolicy(id int, userId int32) (*security.CvePolicy, error) - GetApplicablePolicy(clusterId, envId, appId int, isAppstore bool) (map[string]*security.CvePolicy, map[security.Severity]*security.CvePolicy, error) - HasBlockedCVE(cves []*security.CveStore, cvePolicy map[string]*security.CvePolicy, severityPolicy map[security.Severity]*security.CvePolicy) bool + GetApplicablePolicy(clusterId, envId, appId int, isAppstore bool) (map[string]*security.CvePolicy, map[securityBean.Severity]*security.CvePolicy, error) + HasBlockedCVE(cves []*security.CveStore, cvePolicy map[string]*security.CvePolicy, severityPolicy map[securityBean.Severity]*security.CvePolicy) bool } type PolicyServiceImpl struct { environmentService cluster.EnvironmentService @@ -161,6 +162,7 @@ func (impl *PolicyServiceImpl) SendEventToClairUtility(event *ScanEvent) error { impl.logger.Errorw("error while UpdateJiraTransition request ", "err", err) return err } + resp.Body.Close() impl.logger.Debugw("response from test suit create api", "status code", resp.StatusCode) return err } @@ -192,7 +194,7 @@ func (impl *PolicyServiceImpl) VerifyImage(verifyImageRequest *VerifyImageReques appId = app.Id isAppStore = app.AppType == helper.ChartStoreApp } else { - //np app do nothing + // np app do nothing } cvePolicy, severityPolicy, err := impl.GetApplicablePolicy(clusterId, envId, appId, isAppStore) @@ -215,7 +217,7 @@ func (impl *PolicyServiceImpl) VerifyImage(verifyImageRequest *VerifyImageReques scanResultsIdMap := make(map[int]int) for _, image := range verifyImageRequest.Images { - //TODO - scan only if ci scan enabled + // TODO - scan only if ci scan enabled scanHistory, err := impl.scanHistoryRepository.FindByImage(image) if err != nil && err != pg.ErrNoRows { @@ -256,7 +258,7 @@ func (impl *PolicyServiceImpl) VerifyImage(verifyImageRequest *VerifyImageReques for _, cve := range blockedCves { vr := &VerifyImageResponse{ Name: cve.Name, - Severity: cve.Severity.String(), + Severity: cve.GetSeverity().String(), Package: cve.Package, Version: cve.Version, FixedVersion: cve.FixedVersion, @@ -273,7 +275,7 @@ func (impl *PolicyServiceImpl) VerifyImage(verifyImageRequest *VerifyImageReques } if objectType == security.ScanObjectType_POD { - //TODO create entry + // TODO create entry imageScanObjectMeta := &security.ImageScanObjectMeta{ Name: verifyImageRequest.PodName, Image: strings.Join(verifyImageRequest.Images, ","), @@ -308,7 +310,7 @@ func (impl *PolicyServiceImpl) VerifyImage(verifyImageRequest *VerifyImageReques } if len(scanResultsId) > 0 { - ot, err := impl.imageScanDeployInfoRepository.FindByTypeMetaAndTypeId(typeId, objectType) //todo insure this touple unique in db + ot, err := impl.imageScanDeployInfoRepository.FindByTypeMetaAndTypeId(typeId, objectType) // todo insure this touple unique in db if err != nil && err != pg.ErrNoRows { return nil, err } else if err == pg.ErrNoRows { @@ -336,28 +338,28 @@ func (impl *PolicyServiceImpl) VerifyImage(verifyImageRequest *VerifyImageReques return imageBlockedCves, nil } -func (impl *PolicyServiceImpl) GetApplicablePolicy(clusterId, envId, appId int, isAppstore bool) (map[string]*security.CvePolicy, map[security.Severity]*security.CvePolicy, error) { +func (impl *PolicyServiceImpl) GetApplicablePolicy(clusterId, envId, appId int, isAppstore bool) (map[string]*security.CvePolicy, map[securityBean.Severity]*security.CvePolicy, error) { - var policyLevel security.PolicyLevel + var policyLevel securityBean.PolicyLevel if isAppstore && appId > 0 && envId > 0 && clusterId > 0 { - policyLevel = security.Environment + policyLevel = securityBean.Environment } else if appId > 0 && envId > 0 && clusterId > 0 { - policyLevel = security.Application + policyLevel = securityBean.Application } else if envId > 0 && clusterId > 0 { - policyLevel = security.Environment + policyLevel = securityBean.Environment } else if clusterId > 0 { - policyLevel = security.Cluster + policyLevel = securityBean.Cluster } else { - //error in case of global or other policy + // error in case of global or other policy return nil, nil, fmt.Errorf("policy not identified") } cvePolicy, severityPolicy, err := impl.getPolicies(policyLevel, clusterId, envId, appId) return cvePolicy, severityPolicy, err } -func (impl *PolicyServiceImpl) getApplicablePolicies(policies []*security.CvePolicy) (map[string]*security.CvePolicy, map[security.Severity]*security.CvePolicy) { +func (impl *PolicyServiceImpl) getApplicablePolicies(policies []*security.CvePolicy) (map[string]*security.CvePolicy, map[securityBean.Severity]*security.CvePolicy) { cvePolicy := make(map[string][]*security.CvePolicy) - severityPolicy := make(map[security.Severity][]*security.CvePolicy) + severityPolicy := make(map[securityBean.Severity][]*security.CvePolicy) for _, policy := range policies { if policy.CVEStoreId != "" { cvePolicy[policy.CveStore.Name] = append(cvePolicy[policy.CveStore.Name], policy) @@ -387,8 +389,8 @@ func (impl *PolicyServiceImpl) getHighestPolicy(allPolicies map[string][]*securi } return applicablePolicies } -func (impl *PolicyServiceImpl) getHighestPolicyS(allPolicies map[security.Severity][]*security.CvePolicy) map[security.Severity]*security.CvePolicy { - applicablePolicies := make(map[security.Severity]*security.CvePolicy) +func (impl *PolicyServiceImpl) getHighestPolicyS(allPolicies map[securityBean.Severity][]*security.CvePolicy) map[securityBean.Severity]*security.CvePolicy { + applicablePolicies := make(map[securityBean.Severity]*security.CvePolicy) for key, policies := range allPolicies { var applicablePolicy *security.CvePolicy for _, policy := range policies { @@ -405,7 +407,7 @@ func (impl *PolicyServiceImpl) getHighestPolicyS(allPolicies map[security.Severi return applicablePolicies } -//-----------crud api---- +// -----------crud api---- /* Severity/cveId -- @@ -420,18 +422,18 @@ action res: id, */ -func (impl *PolicyServiceImpl) parsePolicyAction(action string) (security.PolicyAction, error) { - var policyAction security.PolicyAction +func (impl *PolicyServiceImpl) parsePolicyAction(action string) (securityBean.PolicyAction, error) { + var policyAction securityBean.PolicyAction if action == "allow" { - policyAction = security.Allow + policyAction = securityBean.Allow } else if action == "block" { - policyAction = security.Block + policyAction = securityBean.Block } else if action == "inherit" { - policyAction = security.Inherit + policyAction = securityBean.Inherit } else if action == "blockiffixed" { - policyAction = security.Blockiffixed + policyAction = securityBean.Blockiffixed } else { - return security.Inherit, fmt.Errorf("unsupported action %s", action) + return securityBean.Inherit, fmt.Errorf("unsupported action %s", action) } return policyAction, nil } @@ -445,14 +447,18 @@ func (impl *PolicyServiceImpl) SavePolicy(request bean.CreateVulnerabilityPolicy if err != nil { return nil, err } - var severity security.Severity + var severity securityBean.Severity if len(request.Severity) > 0 { - if request.Severity == "critical" { - severity = security.Critical - } else if request.Severity == "moderate" { - severity = security.Medium - } else if request.Severity == "low" { - severity = security.Low + if request.Severity == securityBean.CRITICAL { + severity = securityBean.Critical + } else if request.Severity == securityBean.HIGH { + severity = securityBean.High + } else if request.Severity == securityBean.MODERATE || request.Severity == securityBean.MEDIUM { + severity = securityBean.Medium + } else if request.Severity == securityBean.LOW { + severity = securityBean.Low + } else if request.Severity == securityBean.UNKNOWN { + severity = securityBean.Unknown } else { return nil, fmt.Errorf("unsupported Severity %s", request.Severity) } @@ -461,7 +467,7 @@ func (impl *PolicyServiceImpl) SavePolicy(request bean.CreateVulnerabilityPolicy if err != nil { return nil, err } - severity = cveStore.Severity + severity = cveStore.GetSeverity() } policy := &security.CvePolicy{ Global: isGlobal, @@ -495,7 +501,7 @@ func (impl *PolicyServiceImpl) UpdatePolicy(updatePolicyParams bean.UpdatePolicy if err != nil { return nil, err } - if policyAction == security.Inherit { + if policyAction == securityBean.Inherit { return impl.DeletePolicy(updatePolicyParams.Id, userId) } else { policy, err := impl.cvePolicyRepository.GetById(updatePolicyParams.Id) @@ -547,23 +553,23 @@ func (impl *PolicyServiceImpl) DeletePolicy(id int, userId int32) (*bean.IdVulne res: */ -func (impl *PolicyServiceImpl) GetPolicies(policyLevel security.PolicyLevel, clusterId, environmentId, appId int) (*bean.GetVulnerabilityPolicyResult, error) { +func (impl *PolicyServiceImpl) GetPolicies(policyLevel securityBean.PolicyLevel, clusterId, environmentId, appId int) (*bean.GetVulnerabilityPolicyResult, error) { vulnerabilityPolicyResult := &bean.GetVulnerabilityPolicyResult{ Level: bean.ResourceLevel(policyLevel.String()), } - if policyLevel == security.Global { + if policyLevel == securityBean.Global { cvePolicy, severityPolicy, err := impl.getPolicies(policyLevel, clusterId, environmentId, appId) if err != nil { return nil, err } vulnerabilityPolicy := impl.vulnerabilityPolicyBuilder(policyLevel, cvePolicy, severityPolicy) vulnerabilityPolicyResult.Policies = append(vulnerabilityPolicyResult.Policies, vulnerabilityPolicy) - } else if policyLevel == security.Cluster { + } else if policyLevel == securityBean.Cluster { if clusterId == 0 { return nil, fmt.Errorf("cluster id is missing") } - //get cluster name + // get cluster name cluster, err := impl.clusterService.FindById(clusterId) if err != nil { impl.logger.Errorw("error in fetching cluster details", "id", clusterId, "err", err) @@ -578,7 +584,7 @@ func (impl *PolicyServiceImpl) GetPolicies(policyLevel security.PolicyLevel, clu vulnerabilityPolicy.Name = cluster.ClusterName vulnerabilityPolicy.ClusterId = clusterId vulnerabilityPolicyResult.Policies = append(vulnerabilityPolicyResult.Policies, vulnerabilityPolicy) - } else if policyLevel == security.Environment { + } else if policyLevel == securityBean.Environment { if environmentId == 0 { return nil, fmt.Errorf("environmentId is missing") } @@ -596,7 +602,7 @@ func (impl *PolicyServiceImpl) GetPolicies(policyLevel security.PolicyLevel, clu vulnerabilityPolicy.Name = env.Environment vulnerabilityPolicy.EnvId = env.Id vulnerabilityPolicyResult.Policies = append(vulnerabilityPolicyResult.Policies, vulnerabilityPolicy) - } else if policyLevel == security.Application { + } else if policyLevel == securityBean.Application { if appId == 0 { return nil, fmt.Errorf("appId is missing") } @@ -630,7 +636,7 @@ func (impl *PolicyServiceImpl) GetPolicies(policyLevel security.PolicyLevel, clu return vulnerabilityPolicyResult, nil } -func (impl *PolicyServiceImpl) vulnerabilityPolicyBuilder(policyLevel security.PolicyLevel, cvePolicy map[string]*security.CvePolicy, severityPolicy map[security.Severity]*security.CvePolicy) *bean.VulnerabilityPolicy { +func (impl *PolicyServiceImpl) vulnerabilityPolicyBuilder(policyLevel securityBean.PolicyLevel, cvePolicy map[string]*security.CvePolicy, severityPolicy map[securityBean.Severity]*security.CvePolicy) *bean.VulnerabilityPolicy { vulnerabilityPolicy := &bean.VulnerabilityPolicy{} for _, v := range severityPolicy { @@ -665,16 +671,16 @@ func (impl *PolicyServiceImpl) vulnerabilityPolicyBuilder(policyLevel security.P return vulnerabilityPolicy } -func (impl *PolicyServiceImpl) getPolicies(policyLevel security.PolicyLevel, clusterId, environmentId, appId int) (map[string]*security.CvePolicy, map[security.Severity]*security.CvePolicy, error) { +func (impl *PolicyServiceImpl) getPolicies(policyLevel securityBean.PolicyLevel, clusterId, environmentId, appId int) (map[string]*security.CvePolicy, map[securityBean.Severity]*security.CvePolicy, error) { var policies []*security.CvePolicy var err error - if policyLevel == security.Global { + if policyLevel == securityBean.Global { policies, err = impl.cvePolicyRepository.GetGlobalPolicies() - } else if policyLevel == security.Cluster { + } else if policyLevel == securityBean.Cluster { policies, err = impl.cvePolicyRepository.GetClusterPolicies(clusterId) - } else if policyLevel == security.Environment { + } else if policyLevel == securityBean.Environment { policies, err = impl.cvePolicyRepository.GetEnvPolicies(clusterId, environmentId) - } else if policyLevel == security.Application { + } else if policyLevel == securityBean.Application { policies, err = impl.cvePolicyRepository.GetAppEnvPolicies(clusterId, environmentId, appId) } else { return nil, nil, fmt.Errorf("unsupported policy level: %s", policyLevel) @@ -685,7 +691,7 @@ func (impl *PolicyServiceImpl) getPolicies(policyLevel security.PolicyLevel, clu } cvePolicy, severityPolicy := impl.getApplicablePolicies(policies) impl.logger.Debugw("policy identified ", "policyLevel", policyLevel) - //transform and return + // transform and return return cvePolicy, severityPolicy, nil } @@ -699,18 +705,18 @@ func (impl *PolicyServiceImpl) GetBlockedCVEList(cves []*security.CveStore, clus return blockedCve, nil } -func (impl *PolicyServiceImpl) HasBlockedCVE(cves []*security.CveStore, cvePolicy map[string]*security.CvePolicy, severityPolicy map[security.Severity]*security.CvePolicy) bool { +func (impl *PolicyServiceImpl) HasBlockedCVE(cves []*security.CveStore, cvePolicy map[string]*security.CvePolicy, severityPolicy map[securityBean.Severity]*security.CvePolicy) bool { for _, cve := range cves { if policy, ok := cvePolicy[cve.Name]; ok { - if policy.Action == security.Allow { + if policy.Action == securityBean.Allow { continue - } else if (policy.Action == security.Block) || (policy.Action == security.Blockiffixed && cve.FixedVersion != "") { + } else if (policy.Action == securityBean.Block) || (policy.Action == securityBean.Blockiffixed && cve.FixedVersion != "") { return true } } else { - if severityPolicy[cve.Severity] != nil && severityPolicy[cve.Severity].Action == security.Allow { + if severityPolicy[cve.GetSeverity()] != nil && severityPolicy[cve.GetSeverity()].Action == securityBean.Allow { continue - } else if severityPolicy[cve.Severity] != nil && (severityPolicy[cve.Severity].Action == security.Block || (severityPolicy[cve.Severity].Action == security.Blockiffixed && cve.FixedVersion != "")) { + } else if severityPolicy[cve.GetSeverity()] != nil && (severityPolicy[cve.GetSeverity()].Action == securityBean.Block || (severityPolicy[cve.GetSeverity()].Action == securityBean.Blockiffixed && cve.FixedVersion != "")) { return true } } diff --git a/scripts/sql/278_scan_policies.down.sql b/scripts/sql/278_scan_policies.down.sql new file mode 100644 index 00000000000..dbe1502309d --- /dev/null +++ b/scripts/sql/278_scan_policies.down.sql @@ -0,0 +1,3 @@ +UPDATE cve_policy_control +SET deleted = true, updated_on = 'now()', updated_by = '1' +WHERE severity = '3' OR severity = '5'; \ No newline at end of file diff --git a/scripts/sql/278_scan_policies.up.sql b/scripts/sql/278_scan_policies.up.sql new file mode 100644 index 00000000000..64ad44b17ab --- /dev/null +++ b/scripts/sql/278_scan_policies.up.sql @@ -0,0 +1,6 @@ + +-- severity 3 is for high and 5 is for unknown +INSERT INTO "public"."cve_policy_control" ("global", "cluster_id", "env_id", "app_id", "cve_store_id", "action", "severity", "deleted", "created_on", "created_by", "updated_on", "updated_by") VALUES + ('t', NULL, NULL, NULL, NULL, '1', '3', 'f', 'now()', '1', 'now()', '1'), + ('t', NULL, NULL, NULL, NULL, '1', '5', 'f', 'now()', '1', 'now()', '1'); + diff --git a/scripts/sql/279_rbac_role_audit.down.sql b/scripts/sql/279_rbac_role_audit.down.sql new file mode 100644 index 00000000000..1f6b9eac299 --- /dev/null +++ b/scripts/sql/279_rbac_role_audit.down.sql @@ -0,0 +1,5 @@ +---- DROP TABLE +DROP TABLE IF EXISTS public.rbac_role_audit; + +---- DROP sequence +DROP SEQUENCE IF EXISTS public.id_seq_rbac_role_audit; diff --git a/scripts/sql/279_rbac_role_audit.up.sql b/scripts/sql/279_rbac_role_audit.up.sql new file mode 100644 index 00000000000..f655a326c0d --- /dev/null +++ b/scripts/sql/279_rbac_role_audit.up.sql @@ -0,0 +1,17 @@ +CREATE SEQUENCE IF NOT EXISTS id_seq_rbac_role_audit; + +CREATE TABLE IF NOT EXISTS "public"."rbac_role_audit" +( + "id" integer NOT NULL DEFAULT nextval('id_seq_rbac_role_audit'::regclass), + "entity" varchar(250) NOT NULL, + "access_type" varchar(250) , + "role" varchar(250) NOT NULL, + "policy_data" jsonb, + "role_data" jsonb, + "audit_operation" varchar(20) NOT NULL, + "created_on" timestamptz NOT NULL, + "created_by" int4 NOT NULL, + "updated_on" timestamptz NOT NULL, + "updated_by" int4 NOT NULL, + PRIMARY KEY ("id") + ); diff --git a/scripts/sql/280_link_external_release.down.sql b/scripts/sql/280_link_external_release.down.sql new file mode 100644 index 00000000000..be499632c88 --- /dev/null +++ b/scripts/sql/280_link_external_release.down.sql @@ -0,0 +1,3 @@ +DROP INDEX IF EXISTS unique_deployment_app_name; + +ALTER TABLE deployment_config DROP COLUMN IF EXISTS release_mode; diff --git a/scripts/sql/280_link_external_release.up.sql b/scripts/sql/280_link_external_release.up.sql new file mode 100644 index 00000000000..793f0b8a9de --- /dev/null +++ b/scripts/sql/280_link_external_release.up.sql @@ -0,0 +1,6 @@ +CREATE UNIQUE INDEX "unique_deployment_app_name" + ON pipeline(deployment_app_name,environment_id,deleted) where deleted=false; + +ALTER TABLE deployment_config + ADD COLUMN release_mode VARCHAR(256) DEFAULT 'create'; + diff --git a/scripts/sql/281_update_scan_tool_metadata.down.sql b/scripts/sql/281_update_scan_tool_metadata.down.sql new file mode 100644 index 00000000000..e3afba4ef14 --- /dev/null +++ b/scripts/sql/281_update_scan_tool_metadata.down.sql @@ -0,0 +1,21 @@ +UPDATE scan_tool_metadata +SET image_scan_descriptor_template = '[ + { + "pathToVulnerabilitiesArray": "Results.#.Vulnerabilities", + "name": "VulnerabilityID", + "package": "PkgName", + "packageVersion": "InstalledVersion", + "fixedInVersion": "FixedVersion", + "severity": "Severity" + } + ]', updated_on = 'now()' +WHERE name = 'TRIVY' + AND version = 'V1' + AND scan_target = 'IMAGE' + AND active = true + AND deleted = false; + +ALTER TABLE image_scan_execution_result + DROP COLUMN class, + DROP COLUMN type, + DROP COLUMN target; \ No newline at end of file diff --git a/scripts/sql/281_update_scan_tool_metadata.up.sql b/scripts/sql/281_update_scan_tool_metadata.up.sql new file mode 100644 index 00000000000..4d771950995 --- /dev/null +++ b/scripts/sql/281_update_scan_tool_metadata.up.sql @@ -0,0 +1,29 @@ +UPDATE scan_tool_metadata SET result_descriptor_template = '[ + { + "pathToResultArray": "Results", + "pathToVulnerabilitiesArray": "Vulnerabilities", + "vulnerabilityData":{ + "name": "VulnerabilityID", + "package": "PkgName", + "packageVersion": "InstalledVersion", + "fixedInVersion": "FixedVersion", + "severity": "Severity" + }, + "resultData":{ + "target":"Target", + "class":"Class", + "type":"Type" + } + } +]',updated_on = 'now()' + +WHERE name = 'TRIVY' + AND version = 'V1' + AND scan_target = 'IMAGE' + AND active = true + AND deleted = false; + +ALTER TABLE image_scan_execution_result + ADD COLUMN class TEXT, + ADD COLUMN type TEXT, + ADD COLUMN target TEXT; \ No newline at end of file diff --git a/specs/configDiffView.yaml b/specs/configDiffView.yaml new file mode 100644 index 00000000000..8a24d50989c --- /dev/null +++ b/specs/configDiffView.yaml @@ -0,0 +1,73 @@ +openapi: 3.0.0 +info: + title: Orchestrator Config Autocomplete API + version: 1.0.0 +paths: + /orchestrator/config/autocomplete: + get: + summary: Retrieve autocomplete data for configuration based on the provided appId and envId. The response includes configuration definitions with names, draft states, and types. + parameters: + - name: appId + in: query + description: The application ID. + required: true + schema: + type: string + - name: envId + in: query + description: The environment ID. + required: true + schema: + type: string + responses: + '200': + description: Successful response + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/ConfigProperty" + + '500': + description: will get this response if any failure occurs at server side. + '400': + description: will get this response if invalid payload is sent in the request. + '403': + description: will get this response if user doesn't view access permission for the app or env + '404': + description: will get this when BaseDeployment Template is not configured + +components: + schemas: + ConfigDataResponse: + type: object + properties: + resourceConfig: + type: array + items: + $ref: '#/components/schemas/ConfigProperty' + + ConfigProperty: + type: object + properties: + name: + type: string + description: Name of the config + example: cm-1 + nullable: true + configState: + $ref: '#/components/schemas/ConfigStateEnum' + type: + $ref: '#/components/schemas/ResourceTypeEnum' + + ConfigStateEnum: + type: integer + enum: [ 1, 2, 3 ] + description: State of config (1 represents draft state , 2 represents approval pending state,3 represents published state) + + ResourceTypeEnum: + type: string + enum: [ "ConfigMap", "Secret", "Deployment Template" ] + description: Describe the config type (possible values are ConfigMap, Secret, Deployment Template) + diff --git a/util/rbac/EnforcerUtil.go b/util/rbac/EnforcerUtil.go index bb646e70355..77f3b952130 100644 --- a/util/rbac/EnforcerUtil.go +++ b/util/rbac/EnforcerUtil.go @@ -76,6 +76,8 @@ type EnforcerUtil interface { CheckAppRbacForAppOrJob(token, resourceName, action string) bool CheckAppRbacForAppOrJobInBulk(token, action string, rbacObjects []string, appType helper.AppType) map[string]bool GetRbacObjectsByEnvIdsAndAppIdBatch(appIdToEnvIds map[int][]int) map[int]map[int]string + GetEnvRBACNameByAppAndEnvName(appName, envName string) string + GetAppRBACNameByAppName(appName string) string } type EnforcerUtilImpl struct { @@ -764,3 +766,19 @@ func (impl EnforcerUtilImpl) GetRbacObjectsByEnvIdsAndAppIdBatch(appIdToEnvIds m } return objects } + +func (impl EnforcerUtilImpl) GetAppRBACNameByAppName(appName string) string { + application, err := impl.appRepo.FindAppAndProjectByAppName(appName) + if err != nil { + return fmt.Sprintf("%s/%s", "", "") + } + return fmt.Sprintf("%s/%s", application.Team.Name, application.AppName) +} + +func (impl EnforcerUtilImpl) GetEnvRBACNameByAppAndEnvName(appName, envName string) string { + env, err := impl.environmentRepository.FindByName(envName) + if err != nil { + return fmt.Sprintf("%s/%s", "", appName) + } + return fmt.Sprintf("%s/%s", env.EnvironmentIdentifier, appName) +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml deleted file mode 100644 index 4025e01ec4a..00000000000 --- a/vendor/github.com/Masterminds/goutils/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - - tip - -script: - - go test -v - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md deleted file mode 100644 index d700ec47f2b..00000000000 --- a/vendor/github.com/Masterminds/goutils/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ -# 1.0.1 (2017-05-31) - -## Fixed -- #21: Fix generation of alphanumeric strings (thanks @dbarranco) - -# 1.0.0 (2014-04-30) - -- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/Masterminds/goutils/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md deleted file mode 100644 index 163ffe72a82..00000000000 --- a/vendor/github.com/Masterminds/goutils/README.md +++ /dev/null @@ -1,70 +0,0 @@ -GoUtils -=========== -[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) -[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) - - -GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some -string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: -* WordUtils -* RandomStringUtils -* StringUtils (partial implementation) - -## Installation -If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: - - go get github.com/Masterminds/goutils - -If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. - - -## Documentation -GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) - - -## Usage -The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - } -Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - - } - -## License -GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. - -## Issue Reporting -Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues - -## Website -* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml deleted file mode 100644 index 657564a8474..00000000000 --- a/vendor/github.com/Masterminds/goutils/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\goutils -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -build: off - -install: - - go version - - go env - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go deleted file mode 100644 index 8dbd9248583..00000000000 --- a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "unicode" -) - -/* -CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNonAlphaNumeric(count int) (string, error) { - return CryptoRandomAlphaNumericCustom(count, false, false) -} - -/* -CryptoRandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAscii(count int) (string, error) { - return CryptoRandom(count, 32, 127, false, false) -} - -/* -CryptoRandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, false, true) -} - -/* -CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphabetic(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, false) -} - -/* -CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumeric(count int) (string, error) { - return CryptoRandom(count, 0, 0, true, true) -} - -/* -CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) -*/ -func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return CryptoRandom(count, 0, 0, letters, numbers) -} - -/* -CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(getCryptoRandomInt(gap) + int64(start)) - } else { - ch = chars[getCryptoRandomInt(gap)+int64(start)] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + getCryptoRandomInt(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + getCryptoRandomInt(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} - -func getCryptoRandomInt(count int) int64 { - nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) - if err != nil { - panic(err) - } - return nBig.Int64() -} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go deleted file mode 100644 index 272670231ab..00000000000 --- a/vendor/github.com/Masterminds/goutils/randomstringutils.go +++ /dev/null @@ -1,248 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "fmt" - "math" - "math/rand" - "time" - "unicode" -) - -// RANDOM provides the time-based seed used to generate random numbers -var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) - -/* -RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNonAlphaNumeric(count int) (string, error) { - return RandomAlphaNumericCustom(count, false, false) -} - -/* -RandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAscii(count int) (string, error) { - return Random(count, 32, 127, false, false) -} - -/* -RandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNumeric(count int) (string, error) { - return Random(count, 0, 0, false, true) -} - -/* -RandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alphabetic characters. - -Parameters: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphabetic(count int) (string, error) { - return Random(count, 0, 0, true, false) -} - -/* -RandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumeric(count int) (string, error) { - return Random(count, 0, 0, true, true) -} - -/* -RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return Random(count, 0, 0, letters, numbers) -} - -/* -Random creates a random string based on a variety of options, using default source of randomness. -This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but -instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) -} - -/* -RandomSeed creates a random string based on a variety of options, using supplied source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. -This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance -with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode decimals) to start at - end - the position in set of chars (ASCII/Unicode decimals) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - random - a source of randomness. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { - - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(random.Intn(gap) + start) - } else { - ch = chars[random.Intn(gap)+start] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + random.Intn(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + random.Intn(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go deleted file mode 100644 index 741bb530e8a..00000000000 --- a/vendor/github.com/Masterminds/goutils/stringutils.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "bytes" - "fmt" - "strings" - "unicode" -) - -// Typically returned by functions where a searched item cannot be found -const INDEX_NOT_FOUND = -1 - -/* -Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." - -Specifically, the algorithm is as follows: - - - If str is less than maxWidth characters long, return it. - - Else abbreviate it to (str[0:maxWidth - 3] + "..."). - - If maxWidth is less than 4, return an illegal argument error. - - In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func Abbreviate(str string, maxWidth int) (string, error) { - return AbbreviateFull(str, 0, maxWidth) -} - -/* -AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." -This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not -necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear -somewhere in the result. -In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - offset - left edge of source string - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { - if str == "" { - return "", nil - } - if maxWidth < 4 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") - return "", err - } - if len(str) <= maxWidth { - return str, nil - } - if offset > len(str) { - offset = len(str) - } - if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 - offset = len(str) - (maxWidth - 3) - } - abrevMarker := "..." - if offset <= 4 { - return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; - } - if maxWidth < 7 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") - return "", err - } - if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 - abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) - return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); - } - return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); -} - -/* -DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). -It returns the string without whitespaces. - -Parameter: - str - the string to delete whitespace from, may be nil - -Returns: - the string without whitespaces -*/ -func DeleteWhiteSpace(str string) string { - if str == "" { - return str - } - sz := len(str) - var chs bytes.Buffer - count := 0 - for i := 0; i < sz; i++ { - ch := rune(str[i]) - if !unicode.IsSpace(ch) { - chs.WriteRune(ch) - count++ - } - } - if count == sz { - return str - } - return chs.String() -} - -/* -IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. - -Parameters: - str1 - the first string - str2 - the second string - -Returns: - the index where str1 and str2 begin to differ; -1 if they are equal -*/ -func IndexOfDifference(str1 string, str2 string) int { - if str1 == str2 { - return INDEX_NOT_FOUND - } - if IsEmpty(str1) || IsEmpty(str2) { - return 0 - } - var i int - for i = 0; i < len(str1) && i < len(str2); i++ { - if rune(str1[i]) != rune(str2[i]) { - break - } - } - if i < len(str2) || i < len(str1) { - return i - } - return INDEX_NOT_FOUND -} - -/* -IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: - - goutils.IsBlank("") = true - goutils.IsBlank(" ") = true - goutils.IsBlank("bob") = false - goutils.IsBlank(" bob ") = false - -Parameter: - str - the string to check - -Returns: - true - if the string is whitespace or empty ("") -*/ -func IsBlank(str string) bool { - strLen := len(str) - if str == "" || strLen == 0 { - return true - } - for i := 0; i < strLen; i++ { - if unicode.IsSpace(rune(str[i])) == false { - return false - } - } - return true -} - -/* -IndexOf returns the index of the first instance of sub in str, with the search beginning from the -index start point specified. -1 is returned if sub is not present in str. - -An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. -A start position greater than the string length returns -1. - -Parameters: - str - the string to check - sub - the substring to find - start - the start position; negative treated as zero - -Returns: - the first index where the sub string was found (always >= start) -*/ -func IndexOf(str string, sub string, start int) int { - - if start < 0 { - start = 0 - } - - if len(str) < start { - return INDEX_NOT_FOUND - } - - if IsEmpty(str) || IsEmpty(sub) { - return INDEX_NOT_FOUND - } - - partialIndex := strings.Index(str[start:len(str)], sub) - if partialIndex == -1 { - return INDEX_NOT_FOUND - } - return partialIndex + start -} - -// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. -func IsEmpty(str string) bool { - return len(str) == 0 -} - -// Returns either the passed in string, or if the string is empty, the value of defaultStr. -func DefaultString(str string, defaultStr string) string { - if IsEmpty(str) { - return defaultStr - } - return str -} - -// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. -func DefaultIfBlank(str string, defaultStr string) string { - if IsBlank(str) { - return defaultStr - } - return str -} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go deleted file mode 100644 index 034cad8e210..00000000000 --- a/vendor/github.com/Masterminds/goutils/wordutils.go +++ /dev/null @@ -1,357 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package goutils provides utility functions to manipulate strings in various ways. -The code snippets below show examples of how to use goutils. Some functions return -errors while others do not, so usage would vary as a result. - -Example: - - package main - - import ( - "fmt" - "github.com/aokoli/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - } -*/ -package goutils - -import ( - "bytes" - "strings" - "unicode" -) - -// VERSION indicates the current version of goutils -const VERSION = "1.0.0" - -/* -Wrap wraps a single line of text, identifying words by ' '. -New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - -Returns: - a line with newlines inserted -*/ -func Wrap(str string, wrapLength int) string { - return WrapCustom(str, wrapLength, "", false) -} - -/* -WrapCustom wraps a single line of text, identifying words by ' '. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - newLineStr - the string to insert for a new line, "" uses '\n' - wrapLongWords - true if long words (such as URLs) should be wrapped - -Returns: - a line with newlines inserted -*/ -func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { - - if str == "" { - return "" - } - if newLineStr == "" { - newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons - } - if wrapLength < 1 { - wrapLength = 1 - } - - inputLineLength := len(str) - offset := 0 - - var wrappedLine bytes.Buffer - - for inputLineLength-offset > wrapLength { - - if rune(str[offset]) == ' ' { - offset++ - continue - } - - end := wrapLength + offset + 1 - spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset - - if spaceToWrapAt >= offset { - // normal word (not longer than wrapLength) - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - - } else { - // long word or URL - if wrapLongWords { - end := wrapLength + offset - // long words are wrapped one line at a time - wrappedLine.WriteString(str[offset:end]) - wrappedLine.WriteString(newLineStr) - offset += wrapLength - } else { - // long words aren't wrapped, just extended beyond limit - end := wrapLength + offset - index := strings.IndexRune(str[end:len(str)], ' ') - if index == -1 { - wrappedLine.WriteString(str[offset:len(str)]) - offset = inputLineLength - } else { - spaceToWrapAt = index + end - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - } - } - } - } - - wrappedLine.WriteString(str[offset:len(str)]) - - return wrappedLine.String() - -} - -/* -Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. -To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). -The delimiters represent a set of characters understood to separate words. The first string character -and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func Capitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - capitalizeNext := true - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - capitalizeNext = true - } else if capitalizeNext { - buffer[i] = unicode.ToTitle(ch) - capitalizeNext = false - } - } - return string(buffer) - -} - -/* -CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a -titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood -to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func CapitalizeFully(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - str = strings.ToLower(str) - return Capitalize(str, delimiters...) -} - -/* -Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. -The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter -character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to uncapitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - uncapitalized string -*/ -func Uncapitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - uncapitalizeNext = true - } else if uncapitalizeNext { - buffer[i] = unicode.ToLower(ch) - uncapitalizeNext = false - } - } - return string(buffer) -} - -/* -SwapCase swaps the case of a string using a word based algorithm. - -Conversion algorithm: - - Upper case character converts to Lower case - Title case character converts to Lower case - Lower case character after Whitespace or at start converts to Title case - Other Lower case character converts to Upper case - Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to swap case - -Returns: - the changed string -*/ -func SwapCase(str string) string { - if str == "" { - return str - } - buffer := []rune(str) - - whitespace := true - - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if unicode.IsUpper(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsTitle(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsLower(ch) { - if whitespace { - buffer[i] = unicode.ToTitle(ch) - whitespace = false - } else { - buffer[i] = unicode.ToUpper(ch) - } - } else { - whitespace = unicode.IsSpace(ch) - } - } - return string(buffer) -} - -/* -Initials extracts the initial letters from each word in the string. The first letter of the string and all first -letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters -parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. - -Parameters: - str - the string to get initials from - delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter -Returns: - string of initial letters -*/ -func Initials(str string, delimiters ...rune) string { - if str == "" { - return str - } - if delimiters != nil && len(delimiters) == 0 { - return "" - } - strLen := len(str) - var buf bytes.Buffer - lastWasGap := true - for i := 0; i < strLen; i++ { - ch := rune(str[i]) - - if isDelimiter(ch, delimiters...) { - lastWasGap = true - } else if lastWasGap { - buf.WriteRune(ch) - lastWasGap = false - } - } - return buf.String() -} - -// private function (lower case func name) -func isDelimiter(ch rune, delimiters ...rune) bool { - if delimiters == nil { - return unicode.IsSpace(ch) - } - for _, delimiter := range delimiters { - if ch == delimiter { - return true - } - } - return false -} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore deleted file mode 100644 index 5e3002f88f5..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -vendor/ -/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md deleted file mode 100644 index 2ce45dd4eca..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md +++ /dev/null @@ -1,383 +0,0 @@ -# Changelog - -## Release 3.2.3 (2022-11-29) - -### Changed - -- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) -- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) -- #353: Updated masterminds/semver which included bug fixes -- #354: Updated golang.org/x/crypto which included bug fixes - -## Release 3.2.2 (2021-02-04) - -This is a re-release of 3.2.1 to satisfy something with the Go module system. - -## Release 3.2.1 (2021-02-04) - -### Changed - -- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) - -## Release 3.2.0 (2020-12-14) - -### Added - -- #211: Added randInt function (thanks @kochurovro) -- #223: Added fromJson and mustFromJson functions (thanks @mholt) -- #242: Added a bcrypt function (thanks @robbiet480) -- #253: Added randBytes function (thanks @MikaelSmith) -- #254: Added dig function for dicts (thanks @nyarly) -- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) -- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) -- #268: Added and and all functions for testing conditions (thanks @phuslu) -- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf - (thanks @andrewmostello) -- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) -- #270: Extend certificate functions to handle non-RSA keys + add support for - ed25519 keys (thanks @misberner) - -### Changed - -- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer -- Using semver 3.1.1 and mergo 0.3.11 - -### Fixed - -- #249: Fix htmlDateInZone example (thanks @spawnia) - -NOTE: The dependency github.com/imdario/mergo reverted the breaking change in -0.3.9 via 0.3.10 release. - -## Release 3.1.0 (2020-04-16) - -NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 -that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. - -### Added - -- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) -- #224: Added duration filter (thanks @frebib) -- #205: Added `seq` function (thanks @thadc23) - -### Changed - -- #203: Unlambda functions with correct signature (thanks @muesli) -- #236: Updated the license formatting for GitHub display purposes -- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 - as it causes a breaking change for sprig. That issue is tracked at - https://github.com/imdario/mergo/issues/139 - -### Fixed - -- #229: Fix `seq` example in docs (thanks @kalmant) - -## Release 3.0.2 (2019-12-13) - -### Fixed - -- #220: Updating to semver v3.0.3 to fix issue with <= ranges -- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) - -## Release 3.0.1 (2019-12-08) - -### Fixed - -- #212: Updated semver fixing broken constraint checking with ^0.0 - -## Release 3.0.0 (2019-10-02) - -### Added - -- #187: Added durationRound function (thanks @yjp20) -- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) -- #193: Added toRawJson support (thanks @Dean-Coakley) -- #197: Added get support to dicts (thanks @Dean-Coakley) - -### Changed - -- #186: Moving dependency management to Go modules -- #186: Updated semver to v3. This has changes in the way ^ is handled -- #194: Updated documentation on merging and how it copies. Added example using deepCopy -- #196: trunc now supports negative values (thanks @Dean-Coakley) - -## Release 2.22.0 (2019-10-02) - -### Added - -- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) -- #195: Added deepCopy function for use with dicts - -### Changed - -- Updated merge and mergeOverwrite documentation to explain copying and how to - use deepCopy with it - -## Release 2.21.0 (2019-09-18) - -### Added - -- #122: Added encryptAES/decryptAES functions (thanks @n0madic) -- #128: Added toDecimal support (thanks @Dean-Coakley) -- #169: Added list contcat (thanks @astorath) -- #174: Added deepEqual function (thanks @bonifaido) -- #170: Added url parse and join functions (thanks @astorath) - -### Changed - -- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify - -### Fixed - -- #172: Fix semver wildcard example (thanks @piepmatz) -- #175: Fix dateInZone doc example (thanks @s3than) - -## Release 2.20.0 (2019-06-18) - -### Added - -- #164: Adding function to get unix epoch for a time (@mattfarina) -- #166: Adding tests for date_in_zone (@mattfarina) - -### Changed - -- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) -- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) -- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) - -### Fixed - -## Release 2.19.0 (2019-03-02) - -IMPORTANT: This release reverts a change from 2.18.0 - -In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. - -We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. - -### Changed - -- Fix substr panic 35fb796 (Alexey igrychev) -- Remove extra period 1eb7729 (Matthew Lorimor) -- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) -- README edits/fixes/suggestions 08fe136 (Lauri Apple) - - -## Release 2.18.0 (2019-02-12) - -### Added - -- Added mergeOverwrite function -- cryptographic functions that use secure random (see fe1de12) - -### Changed - -- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) -- Handle has for nil list 9c10885 (Daniel Cohen) -- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) -- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) -- Replace outdated goutils imports 01893d2 (Matthew Lorimor) -- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) -- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) - -### Fixed - -- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) -- Fix substr var names and comments d581f80 (Dean Coakley) -- Fix substr documentation 2737203 (Dean Coakley) - -## Release 2.17.1 (2019-01-03) - -### Fixed - -The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. - -## Release 2.17.0 (2019-01-03) - -### Added - -- adds alder32sum function and test 6908fc2 (marshallford) -- Added kebabcase function ca331a1 (Ilyes512) - -### Changed - -- Update goutils to 1.1.0 4e1125d (Matt Butcher) - -### Fixed - -- Fix 'has' documentation e3f2a85 (dean-coakley) -- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) -- fixes spelling errors... not sure how that happened 4cf188a (marshallford) - -## Release 2.16.0 (2018-08-13) - -### Added - -- add splitn function fccb0b0 (Helgi Þorbjörnsson) -- Add slice func df28ca7 (gongdo) -- Generate serial number a3bdffd (Cody Coons) -- Extract values of dict with values function df39312 (Lawrence Jones) - -### Changed - -- Modify panic message for list.slice ae38335 (gongdo) -- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) -- Remove duplicated documentation 1d97af1 (Matthew Fisher) -- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) - -### Fixed - -- Fix file permissions c5f40b5 (gongdo) -- Fix example for buildCustomCert 7779e0d (Tin Lam) - -## Release 2.15.0 (2018-04-02) - -### Added - -- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) -- #66: Add ternary function (thanks @binoculars) -- #67: Allow keys function to take multiple dicts (thanks @binoculars) -- #89: Added sha1sum to crypto function (thanks @benkeil) -- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) -- #92: Add travis testing for go 1.10 -- #93: Adding appveyor config for windows testing - -### Changed - -- #90: Updating to more recent dependencies -- #73: replace satori/go.uuid with google/uuid (thanks @petterw) - -### Fixed - -- #76: Fixed documentation typos (thanks @Thiht) -- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older - -## Release 2.14.1 (2017-12-01) - -### Fixed - -- #60: Fix typo in function name documentation (thanks @neil-ca-moore) -- #61: Removing line with {{ due to blocking github pages genertion -- #64: Update the list functions to handle int, string, and other slices for compatibility - -## Release 2.14.0 (2017-10-06) - -This new version of Sprig adds a set of functions for generating and working with SSL certificates. - -- `genCA` generates an SSL Certificate Authority -- `genSelfSignedCert` generates an SSL self-signed certificate -- `genSignedCert` generates an SSL certificate and key based on a given CA - -## Release 2.13.0 (2017-09-18) - -This release adds new functions, including: - -- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions -- `floor`, `ceil`, and `round` math functions -- `toDate` converts a string to a date -- `nindent` is just like `indent` but also prepends a new line -- `ago` returns the time from `time.Now` - -### Added - -- #40: Added basic regex functionality (thanks @alanquillin) -- #41: Added ceil floor and round functions (thanks @alanquillin) -- #48: Added toDate function (thanks @andreynering) -- #50: Added nindent function (thanks @binoculars) -- #46: Added ago function (thanks @slayer) - -### Changed - -- #51: Updated godocs to include new string functions (thanks @curtisallen) -- #49: Added ability to merge multiple dicts (thanks @binoculars) - -## Release 2.12.0 (2017-05-17) - -- `snakecase`, `camelcase`, and `shuffle` are three new string functions -- `fail` allows you to bail out of a template render when conditions are not met - -## Release 2.11.0 (2017-05-02) - -- Added `toJson` and `toPrettyJson` -- Added `merge` -- Refactored documentation - -## Release 2.10.0 (2017-03-15) - -- Added `semver` and `semverCompare` for Semantic Versions -- `list` replaces `tuple` -- Fixed issue with `join` -- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` - -## Release 2.9.0 (2017-02-23) - -- Added `splitList` to split a list -- Added crypto functions of `genPrivateKey` and `derivePassword` - -## Release 2.8.0 (2016-12-21) - -- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) -- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) - -## Release 2.7.0 (2016-12-01) - -- Added `sha256sum` to generate a hash of an input -- Added functions to convert a numeric or string to `int`, `int64`, `float64` - -## Release 2.6.0 (2016-10-03) - -- Added a `uuidv4` template function for generating UUIDs inside of a template. - -## Release 2.5.0 (2016-08-19) - -- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions -- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) -- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 - -## Release 2.4.0 (2016-08-16) - -- Adds two functions: `until` and `untilStep` - -## Release 2.3.0 (2016-06-21) - -- cat: Concatenate strings with whitespace separators. -- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" -- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" -- indent: Indent blocks of text in a way that is sensitive to "\n" characters. - -## Release 2.2.0 (2016-04-21) - -- Added a `genPrivateKey` function (Thanks @bacongobbler) - -## Release 2.1.0 (2016-03-30) - -- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. -- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. - -## Release 2.0.0 (2016-03-29) - -Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. - -- `min` complements `max` (formerly `biggest`) -- `empty` indicates that a value is the empty value for its type -- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` -- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` -- Date formatters have been added for HTML dates (as used in `date` input fields) -- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). - -## Release 1.2.0 (2016-02-01) - -- Added quote and squote -- Added b32enc and b32dec -- add now takes varargs -- biggest now takes varargs - -## Release 1.1.0 (2015-12-29) - -- Added #4: Added contains function. strings.Contains, but with the arguments - switched to simplify common pipelines. (thanks krancour) -- Added Travis-CI testing support - -## Release 1.0.0 (2015-12-23) - -- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt deleted file mode 100644 index f311b1eaaaa..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2013-2020 Masterminds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile deleted file mode 100644 index 78d409cde2c..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -.PHONY: test -test: - @echo "==> Running tests" - GO111MODULE=on go test -v - -.PHONY: test-cover -test-cover: - @echo "==> Running Tests with coverage" - GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md deleted file mode 100644 index 3e22c60e1a0..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Sprig: Template functions for Go templates - -[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) -[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) -[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) -[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) - -The Go language comes with a [built-in template -language](http://golang.org/pkg/text/template/), but not -very many template functions. Sprig is a library that provides more than 100 commonly -used template functions. - -It is inspired by the template functions found in -[Twig](http://twig.sensiolabs.org/documentation) and in various -JavaScript libraries, such as [underscore.js](http://underscorejs.org/). - -## IMPORTANT NOTES - -Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In -its v0.3.9 release, there was a behavior change that impacts merging template -functions in sprig. It is currently recommended to use v0.3.10 or later of that package. -Using v0.3.9 will cause sprig tests to fail. - -## Package Versions - -There are two active major versions of the `sprig` package. - -* v3 is currently stable release series on the `master` branch. The Go API should - remain compatible with v2, the current stable version. Behavior change behind - some functions is the reason for the new major version. -* v2 is the previous stable release series. It has been more than three years since - the initial release of v2. You can read the documentation and see the code - on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. - Bug fixes to this major version will continue for some time. - -## Usage - -**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for -detailed instructions and code snippets for the >100 template functions available. - -**Go developers**: If you'd like to include Sprig as a library in your program, -our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). - -For standard usage, read on. - -### Load the Sprig library - -To load the Sprig `FuncMap`: - -```go - -import ( - "github.com/Masterminds/sprig/v3" - "html/template" -) - -// This example illustrates that the FuncMap *must* be set before the -// templates themselves are loaded. -tpl := template.Must( - template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") -) - - -``` - -### Calling the functions inside of templates - -By convention, all functions are lowercase. This seems to follow the Go -idiom for template functions (as opposed to template methods, which are -TitleCase). For example, this: - -``` -{{ "hello!" | upper | repeat 5 }} -``` - -produces this: - -``` -HELLO!HELLO!HELLO!HELLO!HELLO! -``` - -## Principles Driving Our Function Selection - -We followed these principles to decide which functions to add and how to implement them: - -- Use template functions to build layout. The following - types of operations are within the domain of template functions: - - Formatting - - Layout - - Simple type conversions - - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) -- Template functions should not return errors unless there is no way to print - a sensible value. For example, converting a string to an integer should not - produce an error if conversion fails. Instead, it should display a default - value. -- Simple math is necessary for grid layouts, pagers, and so on. Complex math - (anything other than arithmetic) should be done outside of templates. -- Template functions only deal with the data passed into them. They never retrieve - data from a source. -- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go deleted file mode 100644 index 13a5cd55934..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/crypto.go +++ /dev/null @@ -1,653 +0,0 @@ -package sprig - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/hmac" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "hash/adler32" - "io" - "math/big" - "net" - "time" - - "strings" - - "github.com/google/uuid" - bcrypt_lib "golang.org/x/crypto/bcrypt" - "golang.org/x/crypto/scrypt" -) - -func sha256sum(input string) string { - hash := sha256.Sum256([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func sha1sum(input string) string { - hash := sha1.Sum([]byte(input)) - return hex.EncodeToString(hash[:]) -} - -func adler32sum(input string) string { - hash := adler32.Checksum([]byte(input)) - return fmt.Sprintf("%d", hash) -} - -func bcrypt(input string) string { - hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) - if err != nil { - return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) - } - - return string(hash) -} - -func htpasswd(username string, password string) string { - if strings.Contains(username, ":") { - return fmt.Sprintf("invalid username: %s", username) - } - return fmt.Sprintf("%s:%s", username, bcrypt(password)) -} - -func randBytes(count int) (string, error) { - buf := make([]byte, count) - if _, err := rand.Read(buf); err != nil { - return "", err - } - return base64.StdEncoding.EncodeToString(buf), nil -} - -// uuidv4 provides a safe and secure UUID v4 implementation -func uuidv4() string { - return uuid.New().String() -} - -var masterPasswordSeed = "com.lyndir.masterpassword" - -var passwordTypeTemplates = map[string][][]byte{ - "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, - "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), - []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), - []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), - []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), - []byte("CvccCvcvCvccno")}, - "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, - "short": {[]byte("Cvcn")}, - "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, - "pin": {[]byte("nnnn")}, -} - -var templateCharacters = map[byte]string{ - 'V': "AEIOU", - 'C': "BCDFGHJKLMNPQRSTVWXYZ", - 'v': "aeiou", - 'c': "bcdfghjklmnpqrstvwxyz", - 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", - 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", - 'n': "0123456789", - 'o': "@&%?,=[]_:-+*$#!'^~;()/.", - 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", -} - -func derivePassword(counter uint32, passwordType, password, user, site string) string { - var templates = passwordTypeTemplates[passwordType] - if templates == nil { - return fmt.Sprintf("cannot find password template %s", passwordType) - } - - var buffer bytes.Buffer - buffer.WriteString(masterPasswordSeed) - binary.Write(&buffer, binary.BigEndian, uint32(len(user))) - buffer.WriteString(user) - - salt := buffer.Bytes() - key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) - if err != nil { - return fmt.Sprintf("failed to derive password: %s", err) - } - - buffer.Truncate(len(masterPasswordSeed)) - binary.Write(&buffer, binary.BigEndian, uint32(len(site))) - buffer.WriteString(site) - binary.Write(&buffer, binary.BigEndian, counter) - - var hmacv = hmac.New(sha256.New, key) - hmacv.Write(buffer.Bytes()) - var seed = hmacv.Sum(nil) - var temp = templates[int(seed[0])%len(templates)] - - buffer.Truncate(0) - for i, element := range temp { - passChars := templateCharacters[element] - passChar := passChars[int(seed[i+1])%len(passChars)] - buffer.WriteByte(passChar) - } - - return buffer.String() -} - -func generatePrivateKey(typ string) string { - var priv interface{} - var err error - switch typ { - case "", "rsa": - // good enough for government work - priv, err = rsa.GenerateKey(rand.Reader, 4096) - case "dsa": - key := new(dsa.PrivateKey) - // again, good enough for government work - if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { - return fmt.Sprintf("failed to generate dsa params: %s", err) - } - err = dsa.GenerateKey(key, rand.Reader) - priv = key - case "ecdsa": - // again, good enough for government work - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case "ed25519": - _, priv, err = ed25519.GenerateKey(rand.Reader) - default: - return "Unknown type " + typ - } - if err != nil { - return fmt.Sprintf("failed to generate private key: %s", err) - } - - return string(pem.EncodeToMemory(pemBlockForKey(priv))) -} - -// DSAKeyFormat stores the format for DSA keys. -// Used by pemBlockForKey -type DSAKeyFormat struct { - Version int - P, Q, G, Y, X *big.Int -} - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} - case *dsa.PrivateKey: - val := DSAKeyFormat{ - P: k.P, Q: k.Q, G: k.G, - Y: k.Y, X: k.X, - } - bytes, _ := asn1.Marshal(val) - return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} - case *ecdsa.PrivateKey: - b, _ := x509.MarshalECPrivateKey(k) - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - // attempt PKCS#8 format for all other keys - b, err := x509.MarshalPKCS8PrivateKey(k) - if err != nil { - return nil - } - return &pem.Block{Type: "PRIVATE KEY", Bytes: b} - } -} - -func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { - block, _ := pem.Decode([]byte(pemBlock)) - if block == nil { - return nil, errors.New("no PEM data in input") - } - - if block.Type == "PRIVATE KEY" { - priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) - } - return priv, nil - } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { - return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) - } - - switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" - case "RSA": - priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) - } - return priv, nil - case "EC": - priv, err := x509.ParseECPrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) - } - return priv, nil - case "DSA": - var k DSAKeyFormat - _, err := asn1.Unmarshal(block.Bytes, &k) - if err != nil { - return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) - } - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, Q: k.Q, G: k.G, - }, - Y: k.Y, - }, - X: k.X, - } - return priv, nil - default: - return nil, fmt.Errorf("invalid private key type %s", block.Type) - } -} - -func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { - switch k := priv.(type) { - case interface{ Public() crypto.PublicKey }: - return k.Public(), nil - case *dsa.PrivateKey: - return &k.PublicKey, nil - default: - return nil, fmt.Errorf("unable to get public key for type %T", priv) - } -} - -type certificate struct { - Cert string - Key string -} - -func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { - crt := certificate{} - - cert, err := base64.StdEncoding.DecodeString(b64cert) - if err != nil { - return crt, errors.New("unable to decode base64 certificate") - } - - key, err := base64.StdEncoding.DecodeString(b64key) - if err != nil { - return crt, errors.New("unable to decode base64 private key") - } - - decodedCert, _ := pem.Decode(cert) - if decodedCert == nil { - return crt, errors.New("unable to decode certificate") - } - _, err = x509.ParseCertificate(decodedCert.Bytes) - if err != nil { - return crt, fmt.Errorf( - "error parsing certificate: decodedCert.Bytes: %s", - err, - ) - } - - _, err = parsePrivateKeyPEM(string(key)) - if err != nil { - return crt, fmt.Errorf( - "error parsing private key: %s", - err, - ) - } - - crt.Cert = string(cert) - crt.Key = string(key) - - return crt, nil -} - -func generateCertificateAuthority( - cn string, - daysValid int, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - - return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -} - -func generateCertificateAuthorityWithPEMKey( - cn string, - daysValid int, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) -} - -func generateCertificateAuthorityWithKeyInternal( - cn string, - daysValid int, - priv crypto.PrivateKey, -) (certificate, error) { - ca := certificate{} - - template, err := getBaseCertTemplate(cn, nil, nil, daysValid) - if err != nil { - return ca, err - } - // Override KeyUsage and IsCA - template.KeyUsage = x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | - x509.KeyUsageCertSign - template.IsCA = true - - ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) - - return ca, err -} - -func generateSelfSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -} - -func generateSelfSignedCertificateWithPEMKey( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) -} - -func generateSelfSignedCertificateWithKeyInternal( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - priv crypto.PrivateKey, -) (certificate, error) { - cert := certificate{} - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) - - return cert, err -} - -func generateSignedCertificate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, -) (certificate, error) { - priv, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return certificate{}, fmt.Errorf("error generating rsa key: %s", err) - } - return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -} - -func generateSignedCertificateWithPEMKey( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, - privPEM string, -) (certificate, error) { - priv, err := parsePrivateKeyPEM(privPEM) - if err != nil { - return certificate{}, fmt.Errorf("parsing private key: %s", err) - } - return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) -} - -func generateSignedCertificateWithKeyInternal( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, - ca certificate, - priv crypto.PrivateKey, -) (certificate, error) { - cert := certificate{} - - decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) - if decodedSignerCert == nil { - return cert, errors.New("unable to decode certificate") - } - signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) - if err != nil { - return cert, fmt.Errorf( - "error parsing certificate: decodedSignerCert.Bytes: %s", - err, - ) - } - signerKey, err := parsePrivateKeyPEM(ca.Key) - if err != nil { - return cert, fmt.Errorf( - "error parsing private key: %s", - err, - ) - } - - template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) - if err != nil { - return cert, err - } - - cert.Cert, cert.Key, err = getCertAndKey( - template, - priv, - signerCert, - signerKey, - ) - - return cert, err -} - -func getCertAndKey( - template *x509.Certificate, - signeeKey crypto.PrivateKey, - parent *x509.Certificate, - signingKey crypto.PrivateKey, -) (string, string, error) { - signeePubKey, err := getPublicKey(signeeKey) - if err != nil { - return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) - } - derBytes, err := x509.CreateCertificate( - rand.Reader, - template, - parent, - signeePubKey, - signingKey, - ) - if err != nil { - return "", "", fmt.Errorf("error creating certificate: %s", err) - } - - certBuffer := bytes.Buffer{} - if err := pem.Encode( - &certBuffer, - &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) - } - - keyBuffer := bytes.Buffer{} - if err := pem.Encode( - &keyBuffer, - pemBlockForKey(signeeKey), - ); err != nil { - return "", "", fmt.Errorf("error pem-encoding key: %s", err) - } - - return certBuffer.String(), keyBuffer.String(), nil -} - -func getBaseCertTemplate( - cn string, - ips []interface{}, - alternateDNS []interface{}, - daysValid int, -) (*x509.Certificate, error) { - ipAddresses, err := getNetIPs(ips) - if err != nil { - return nil, err - } - dnsNames, err := getAlternateDNSStrs(alternateDNS) - if err != nil { - return nil, err - } - serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) - if err != nil { - return nil, err - } - return &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: cn, - }, - IPAddresses: ipAddresses, - DNSNames: dnsNames, - NotBefore: time.Now(), - NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - x509.ExtKeyUsageClientAuth, - }, - BasicConstraintsValid: true, - }, nil -} - -func getNetIPs(ips []interface{}) ([]net.IP, error) { - if ips == nil { - return []net.IP{}, nil - } - var ipStr string - var ok bool - var netIP net.IP - netIPs := make([]net.IP, len(ips)) - for i, ip := range ips { - ipStr, ok = ip.(string) - if !ok { - return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) - } - netIP = net.ParseIP(ipStr) - if netIP == nil { - return nil, fmt.Errorf("error parsing ip: %s", ipStr) - } - netIPs[i] = netIP - } - return netIPs, nil -} - -func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { - if alternateDNS == nil { - return []string{}, nil - } - var dnsStr string - var ok bool - alternateDNSStrs := make([]string, len(alternateDNS)) - for i, dns := range alternateDNS { - dnsStr, ok = dns.(string) - if !ok { - return nil, fmt.Errorf( - "error processing alternate dns name: %v is not a string", - dns, - ) - } - alternateDNSStrs[i] = dnsStr - } - return alternateDNSStrs, nil -} - -func encryptAES(password string, plaintext string) (string, error) { - if plaintext == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - content := []byte(plaintext) - blockSize := block.BlockSize() - padding := blockSize - len(content)%blockSize - padtext := bytes.Repeat([]byte{byte(padding)}, padding) - content = append(content, padtext...) - - ciphertext := make([]byte, aes.BlockSize+len(content)) - - iv := ciphertext[:aes.BlockSize] - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return "", err - } - - mode := cipher.NewCBCEncrypter(block, iv) - mode.CryptBlocks(ciphertext[aes.BlockSize:], content) - - return base64.StdEncoding.EncodeToString(ciphertext), nil -} - -func decryptAES(password string, crypt64 string) (string, error) { - if crypt64 == "" { - return "", nil - } - - key := make([]byte, 32) - copy(key, []byte(password)) - - crypt, err := base64.StdEncoding.DecodeString(crypt64) - if err != nil { - return "", err - } - - block, err := aes.NewCipher(key) - if err != nil { - return "", err - } - - iv := crypt[:aes.BlockSize] - crypt = crypt[aes.BlockSize:] - decrypted := make([]byte, len(crypt)) - mode := cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(decrypted, crypt) - - return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil -} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go deleted file mode 100644 index ed022ddacac..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/date.go +++ /dev/null @@ -1,152 +0,0 @@ -package sprig - -import ( - "strconv" - "time" -) - -// Given a format and a date, format the date string. -// -// Date can be a `time.Time` or an `int, int32, int64`. -// In the later case, it is treated as seconds since UNIX -// epoch. -func date(fmt string, date interface{}) string { - return dateInZone(fmt, date, "Local") -} - -func htmlDate(date interface{}) string { - return dateInZone("2006-01-02", date, "Local") -} - -func htmlDateInZone(date interface{}, zone string) string { - return dateInZone("2006-01-02", date, zone) -} - -func dateInZone(fmt string, date interface{}, zone string) string { - var t time.Time - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case *time.Time: - t = *date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - case int32: - t = time.Unix(int64(date), 0) - } - - loc, err := time.LoadLocation(zone) - if err != nil { - loc, _ = time.LoadLocation("UTC") - } - - return t.In(loc).Format(fmt) -} - -func dateModify(fmt string, date time.Time) time.Time { - d, err := time.ParseDuration(fmt) - if err != nil { - return date - } - return date.Add(d) -} - -func mustDateModify(fmt string, date time.Time) (time.Time, error) { - d, err := time.ParseDuration(fmt) - if err != nil { - return time.Time{}, err - } - return date.Add(d), nil -} - -func dateAgo(date interface{}) string { - var t time.Time - - switch date := date.(type) { - default: - t = time.Now() - case time.Time: - t = date - case int64: - t = time.Unix(date, 0) - case int: - t = time.Unix(int64(date), 0) - } - // Drop resolution to seconds - duration := time.Since(t).Round(time.Second) - return duration.String() -} - -func duration(sec interface{}) string { - var n int64 - switch value := sec.(type) { - default: - n = 0 - case string: - n, _ = strconv.ParseInt(value, 10, 64) - case int64: - n = value - } - return (time.Duration(n) * time.Second).String() -} - -func durationRound(duration interface{}) string { - var d time.Duration - switch duration := duration.(type) { - default: - d = 0 - case string: - d, _ = time.ParseDuration(duration) - case int64: - d = time.Duration(duration) - case time.Time: - d = time.Since(duration) - } - - u := uint64(d) - neg := d < 0 - if neg { - u = -u - } - - var ( - year = uint64(time.Hour) * 24 * 365 - month = uint64(time.Hour) * 24 * 30 - day = uint64(time.Hour) * 24 - hour = uint64(time.Hour) - minute = uint64(time.Minute) - second = uint64(time.Second) - ) - switch { - case u > year: - return strconv.FormatUint(u/year, 10) + "y" - case u > month: - return strconv.FormatUint(u/month, 10) + "mo" - case u > day: - return strconv.FormatUint(u/day, 10) + "d" - case u > hour: - return strconv.FormatUint(u/hour, 10) + "h" - case u > minute: - return strconv.FormatUint(u/minute, 10) + "m" - case u > second: - return strconv.FormatUint(u/second, 10) + "s" - } - return "0s" -} - -func toDate(fmt, str string) time.Time { - t, _ := time.ParseInLocation(fmt, str, time.Local) - return t -} - -func mustToDate(fmt, str string) (time.Time, error) { - return time.ParseInLocation(fmt, str, time.Local) -} - -func unixEpoch(date time.Time) string { - return strconv.FormatInt(date.Unix(), 10) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go deleted file mode 100644 index b9f979666dd..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/defaults.go +++ /dev/null @@ -1,163 +0,0 @@ -package sprig - -import ( - "bytes" - "encoding/json" - "math/rand" - "reflect" - "strings" - "time" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// dfault checks whether `given` is set, and returns default if not set. -// -// This returns `d` if `given` appears not to be set, and `given` otherwise. -// -// For numeric types 0 is unset. -// For strings, maps, arrays, and slices, len() = 0 is considered unset. -// For bool, false is unset. -// Structs are never considered unset. -// -// For everything else, including pointers, a nil value is unset. -func dfault(d interface{}, given ...interface{}) interface{} { - - if empty(given) || empty(given[0]) { - return d - } - return given[0] -} - -// empty returns true if the given value has the zero value for its type. -func empty(given interface{}) bool { - g := reflect.ValueOf(given) - if !g.IsValid() { - return true - } - - // Basically adapted from text/template.isTrue - switch g.Kind() { - default: - return g.IsNil() - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return g.Len() == 0 - case reflect.Bool: - return !g.Bool() - case reflect.Complex64, reflect.Complex128: - return g.Complex() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return g.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return g.Uint() == 0 - case reflect.Float32, reflect.Float64: - return g.Float() == 0 - case reflect.Struct: - return false - } -} - -// coalesce returns the first non-empty value. -func coalesce(v ...interface{}) interface{} { - for _, val := range v { - if !empty(val) { - return val - } - } - return nil -} - -// all returns true if empty(x) is false for all values x in the list. -// If the list is empty, return true. -func all(v ...interface{}) bool { - for _, val := range v { - if empty(val) { - return false - } - } - return true -} - -// any returns true if empty(x) is false for any x in the list. -// If the list is empty, return false. -func any(v ...interface{}) bool { - for _, val := range v { - if !empty(val) { - return true - } - } - return false -} - -// fromJson decodes JSON into a structured value, ignoring errors. -func fromJson(v string) interface{} { - output, _ := mustFromJson(v) - return output -} - -// mustFromJson decodes JSON into a structured value, returning errors. -func mustFromJson(v string) (interface{}, error) { - var output interface{} - err := json.Unmarshal([]byte(v), &output) - return output, err -} - -// toJson encodes an item into a JSON string -func toJson(v interface{}) string { - output, _ := json.Marshal(v) - return string(output) -} - -func mustToJson(v interface{}) (string, error) { - output, err := json.Marshal(v) - if err != nil { - return "", err - } - return string(output), nil -} - -// toPrettyJson encodes an item into a pretty (indented) JSON string -func toPrettyJson(v interface{}) string { - output, _ := json.MarshalIndent(v, "", " ") - return string(output) -} - -func mustToPrettyJson(v interface{}) (string, error) { - output, err := json.MarshalIndent(v, "", " ") - if err != nil { - return "", err - } - return string(output), nil -} - -// toRawJson encodes an item into a JSON string with no escaping of HTML characters. -func toRawJson(v interface{}) string { - output, err := mustToRawJson(v) - if err != nil { - panic(err) - } - return string(output) -} - -// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. -func mustToRawJson(v interface{}) (string, error) { - buf := new(bytes.Buffer) - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err := enc.Encode(&v) - if err != nil { - return "", err - } - return strings.TrimSuffix(buf.String(), "\n"), nil -} - -// ternary returns the first value if the last value is true, otherwise returns the second value. -func ternary(vt interface{}, vf interface{}, v bool) interface{} { - if v { - return vt - } - - return vf -} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go deleted file mode 100644 index ade88969840..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/dict.go +++ /dev/null @@ -1,174 +0,0 @@ -package sprig - -import ( - "github.com/imdario/mergo" - "github.com/mitchellh/copystructure" -) - -func get(d map[string]interface{}, key string) interface{} { - if val, ok := d[key]; ok { - return val - } - return "" -} - -func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { - d[key] = value - return d -} - -func unset(d map[string]interface{}, key string) map[string]interface{} { - delete(d, key) - return d -} - -func hasKey(d map[string]interface{}, key string) bool { - _, ok := d[key] - return ok -} - -func pluck(key string, d ...map[string]interface{}) []interface{} { - res := []interface{}{} - for _, dict := range d { - if val, ok := dict[key]; ok { - res = append(res, val) - } - } - return res -} - -func keys(dicts ...map[string]interface{}) []string { - k := []string{} - for _, dict := range dicts { - for key := range dict { - k = append(k, key) - } - } - return k -} - -func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - for _, k := range keys { - if v, ok := dict[k]; ok { - res[k] = v - } - } - return res -} - -func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { - res := map[string]interface{}{} - - omit := make(map[string]bool, len(keys)) - for _, k := range keys { - omit[k] = true - } - - for k, v := range dict { - if _, ok := omit[k]; !ok { - res[k] = v - } - } - return res -} - -func dict(v ...interface{}) map[string]interface{} { - dict := map[string]interface{}{} - lenv := len(v) - for i := 0; i < lenv; i += 2 { - key := strval(v[i]) - if i+1 >= lenv { - dict[key] = "" - continue - } - dict[key] = v[i+1] - } - return dict -} - -func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { - for _, src := range srcs { - if err := mergo.Merge(&dst, src); err != nil { - return nil, err - } - } - return dst, nil -} - -func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - // Swallow errors inside of a template. - return "" - } - } - return dst -} - -func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { - for _, src := range srcs { - if err := mergo.MergeWithOverwrite(&dst, src); err != nil { - return nil, err - } - } - return dst, nil -} - -func values(dict map[string]interface{}) []interface{} { - values := []interface{}{} - for _, value := range dict { - values = append(values, value) - } - - return values -} - -func deepCopy(i interface{}) interface{} { - c, err := mustDeepCopy(i) - if err != nil { - panic("deepCopy error: " + err.Error()) - } - - return c -} - -func mustDeepCopy(i interface{}) (interface{}, error) { - return copystructure.Copy(i) -} - -func dig(ps ...interface{}) (interface{}, error) { - if len(ps) < 3 { - panic("dig needs at least three arguments") - } - dict := ps[len(ps)-1].(map[string]interface{}) - def := ps[len(ps)-2] - ks := make([]string, len(ps)-2) - for i := 0; i < len(ks); i++ { - ks[i] = ps[i].(string) - } - - return digFromDict(dict, def, ks) -} - -func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { - k, ns := ks[0], ks[1:len(ks)] - step, has := dict[k] - if !has { - return d, nil - } - if len(ns) == 0 { - return step, nil - } - return digFromDict(step.(map[string]interface{}), d, ns) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go deleted file mode 100644 index aabb9d4489f..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Package sprig provides template functions for Go. - -This package contains a number of utility functions for working with data -inside of Go `html/template` and `text/template` files. - -To add these functions, use the `template.Funcs()` method: - - t := templates.New("foo").Funcs(sprig.FuncMap()) - -Note that you should add the function map before you parse any template files. - - In several cases, Sprig reverses the order of arguments from the way they - appear in the standard library. This is to make it easier to pipe - arguments into functions. - -See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. -*/ -package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go deleted file mode 100644 index 57fcec1d9ea..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/functions.go +++ /dev/null @@ -1,382 +0,0 @@ -package sprig - -import ( - "errors" - "html/template" - "math/rand" - "os" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - ttemplate "text/template" - "time" - - util "github.com/Masterminds/goutils" - "github.com/huandu/xstrings" - "github.com/shopspring/decimal" -) - -// FuncMap produces the function map. -// -// Use this to pass the functions into the template engine: -// -// tpl := template.New("foo").Funcs(sprig.FuncMap())) -// -func FuncMap() template.FuncMap { - return HtmlFuncMap() -} - -// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. -func HermeticTxtFuncMap() ttemplate.FuncMap { - r := TxtFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. -func HermeticHtmlFuncMap() template.FuncMap { - r := HtmlFuncMap() - for _, name := range nonhermeticFunctions { - delete(r, name) - } - return r -} - -// TxtFuncMap returns a 'text/template'.FuncMap -func TxtFuncMap() ttemplate.FuncMap { - return ttemplate.FuncMap(GenericFuncMap()) -} - -// HtmlFuncMap returns an 'html/template'.Funcmap -func HtmlFuncMap() template.FuncMap { - return template.FuncMap(GenericFuncMap()) -} - -// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. -func GenericFuncMap() map[string]interface{} { - gfm := make(map[string]interface{}, len(genericMap)) - for k, v := range genericMap { - gfm[k] = v - } - return gfm -} - -// These functions are not guaranteed to evaluate to the same result for given input, because they -// refer to the environment or global state. -var nonhermeticFunctions = []string{ - // Date functions - "date", - "date_in_zone", - "date_modify", - "now", - "htmlDate", - "htmlDateInZone", - "dateInZone", - "dateModify", - - // Strings - "randAlphaNum", - "randAlpha", - "randAscii", - "randNumeric", - "randBytes", - "uuidv4", - - // OS - "env", - "expandenv", - - // Network - "getHostByName", -} - -var genericMap = map[string]interface{}{ - "hello": func() string { return "Hello!" }, - - // Date functions - "ago": dateAgo, - "date": date, - "date_in_zone": dateInZone, - "date_modify": dateModify, - "dateInZone": dateInZone, - "dateModify": dateModify, - "duration": duration, - "durationRound": durationRound, - "htmlDate": htmlDate, - "htmlDateInZone": htmlDateInZone, - "must_date_modify": mustDateModify, - "mustDateModify": mustDateModify, - "mustToDate": mustToDate, - "now": time.Now, - "toDate": toDate, - "unixEpoch": unixEpoch, - - // Strings - "abbrev": abbrev, - "abbrevboth": abbrevboth, - "trunc": trunc, - "trim": strings.TrimSpace, - "upper": strings.ToUpper, - "lower": strings.ToLower, - "title": strings.Title, - "untitle": untitle, - "substr": substring, - // Switch order so that "foo" | repeat 5 - "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, - // Deprecated: Use trimAll. - "trimall": func(a, b string) string { return strings.Trim(b, a) }, - // Switch order so that "$foo" | trimall "$" - "trimAll": func(a, b string) string { return strings.Trim(b, a) }, - "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, - "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, - "nospace": util.DeleteWhiteSpace, - "initials": initials, - "randAlphaNum": randAlphaNumeric, - "randAlpha": randAlpha, - "randAscii": randAscii, - "randNumeric": randNumeric, - "swapcase": util.SwapCase, - "shuffle": xstrings.Shuffle, - "snakecase": xstrings.ToSnakeCase, - "camelcase": xstrings.ToCamelCase, - "kebabcase": xstrings.ToKebabCase, - "wrap": func(l int, s string) string { return util.Wrap(s, l) }, - "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, - // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, - "adler32sum": adler32sum, - "toString": strval, - - // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, - "seq": seq, - "toDecimal": toDecimal, - - //"gt": func(a, b int) bool {return a > b}, - //"gte": func(a, b int) bool {return a >= b}, - //"lt": func(a, b int) bool {return a < b}, - //"lte": func(a, b int) bool {return a <= b}, - - // split "/" foo/bar returns map[int]string{0: foo, 1: bar} - "split": split, - "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, - // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} - "splitn": splitn, - "toStrings": strslice, - - "until": until, - "untilStep": untilStep, - - // VERY basic arithmetic. - "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, - "add": func(i ...interface{}) int64 { - var a int64 = 0 - for _, b := range i { - a += toInt64(b) - } - return a - }, - "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, - "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, - "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, - "mul": func(a interface{}, v ...interface{}) int64 { - val := toInt64(a) - for _, b := range v { - val = val * toInt64(b) - } - return val - }, - "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, - "add1f": func(i interface{}) float64 { - return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) - }, - "addf": func(i ...interface{}) float64 { - a := interface{}(float64(0)) - return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) - }, - "subf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) - }, - "divf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) - }, - "mulf": func(a interface{}, v ...interface{}) float64 { - return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) - }, - "biggest": max, - "max": max, - "min": min, - "maxf": maxf, - "minf": minf, - "ceil": ceil, - "floor": floor, - "round": round, - - // string slices. Note that we reverse the order b/c that's better - // for template processing. - "join": join, - "sortAlpha": sortAlpha, - - // Defaults - "default": dfault, - "empty": empty, - "coalesce": coalesce, - "all": all, - "any": any, - "compact": compact, - "mustCompact": mustCompact, - "fromJson": fromJson, - "toJson": toJson, - "toPrettyJson": toPrettyJson, - "toRawJson": toRawJson, - "mustFromJson": mustFromJson, - "mustToJson": mustToJson, - "mustToPrettyJson": mustToPrettyJson, - "mustToRawJson": mustToRawJson, - "ternary": ternary, - "deepCopy": deepCopy, - "mustDeepCopy": mustDeepCopy, - - // Reflection - "typeOf": typeOf, - "typeIs": typeIs, - "typeIsLike": typeIsLike, - "kindOf": kindOf, - "kindIs": kindIs, - "deepEqual": reflect.DeepEqual, - - // OS: - "env": os.Getenv, - "expandenv": os.ExpandEnv, - - // Network: - "getHostByName": getHostByName, - - // Paths: - "base": path.Base, - "dir": path.Dir, - "clean": path.Clean, - "ext": path.Ext, - "isAbs": path.IsAbs, - - // Filepaths: - "osBase": filepath.Base, - "osClean": filepath.Clean, - "osDir": filepath.Dir, - "osExt": filepath.Ext, - "osIsAbs": filepath.IsAbs, - - // Encoding: - "b64enc": base64encode, - "b64dec": base64decode, - "b32enc": base32encode, - "b32dec": base32decode, - - // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "get": get, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, - "mergeOverwrite": mergeOverwrite, - "mustMerge": mustMerge, - "mustMergeOverwrite": mustMergeOverwrite, - "values": values, - - "append": push, "push": push, - "mustAppend": mustPush, "mustPush": mustPush, - "prepend": prepend, - "mustPrepend": mustPrepend, - "first": first, - "mustFirst": mustFirst, - "rest": rest, - "mustRest": mustRest, - "last": last, - "mustLast": mustLast, - "initial": initial, - "mustInitial": mustInitial, - "reverse": reverse, - "mustReverse": mustReverse, - "uniq": uniq, - "mustUniq": mustUniq, - "without": without, - "mustWithout": mustWithout, - "has": has, - "mustHas": mustHas, - "slice": slice, - "mustSlice": mustSlice, - "concat": concat, - "dig": dig, - "chunk": chunk, - "mustChunk": mustChunk, - - // Crypto: - "bcrypt": bcrypt, - "htpasswd": htpasswd, - "genPrivateKey": generatePrivateKey, - "derivePassword": derivePassword, - "buildCustomCert": buildCustomCertificate, - "genCA": generateCertificateAuthority, - "genCAWithKey": generateCertificateAuthorityWithPEMKey, - "genSelfSignedCert": generateSelfSignedCertificate, - "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, - "genSignedCert": generateSignedCertificate, - "genSignedCertWithKey": generateSignedCertificateWithPEMKey, - "encryptAES": encryptAES, - "decryptAES": decryptAES, - "randBytes": randBytes, - - // UUIDs: - "uuidv4": uuidv4, - - // SemVer: - "semver": semver, - "semverCompare": semverCompare, - - // Flow Control: - "fail": func(msg string) (string, error) { return "", errors.New(msg) }, - - // Regex - "regexMatch": regexMatch, - "mustRegexMatch": mustRegexMatch, - "regexFindAll": regexFindAll, - "mustRegexFindAll": mustRegexFindAll, - "regexFind": regexFind, - "mustRegexFind": mustRegexFind, - "regexReplaceAll": regexReplaceAll, - "mustRegexReplaceAll": mustRegexReplaceAll, - "regexReplaceAllLiteral": regexReplaceAllLiteral, - "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, - "regexSplit": regexSplit, - "mustRegexSplit": mustRegexSplit, - "regexQuoteMeta": regexQuoteMeta, - - // URLs: - "urlParse": urlParse, - "urlJoin": urlJoin, -} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go deleted file mode 100644 index ca0fbb78932..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/list.go +++ /dev/null @@ -1,464 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "reflect" - "sort" -) - -// Reflection is used in these functions so that slices and arrays of strings, -// ints, and other types not implementing []interface{} can be worked with. -// For example, this is useful if you need to work on the output of regexs. - -func list(v ...interface{}) []interface{} { - return v -} - -func push(list interface{}, v interface{}) []interface{} { - l, err := mustPush(list, v) - if err != nil { - panic(err) - } - - return l -} - -func mustPush(list interface{}, v interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append(nl, v), nil - - default: - return nil, fmt.Errorf("Cannot push on type %s", tp) - } -} - -func prepend(list interface{}, v interface{}) []interface{} { - l, err := mustPrepend(list, v) - if err != nil { - panic(err) - } - - return l -} - -func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { - //return append([]interface{}{v}, list...) - - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[i] = l2.Index(i).Interface() - } - - return append([]interface{}{v}, nl...), nil - - default: - return nil, fmt.Errorf("Cannot prepend on type %s", tp) - } -} - -func chunk(size int, list interface{}) [][]interface{} { - l, err := mustChunk(size, list) - if err != nil { - panic(err) - } - - return l -} - -func mustChunk(size int, list interface{}) ([][]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - - cs := int(math.Floor(float64(l-1)/float64(size)) + 1) - nl := make([][]interface{}, cs) - - for i := 0; i < cs; i++ { - clen := size - if i == cs-1 { - clen = int(math.Floor(math.Mod(float64(l), float64(size)))) - if clen == 0 { - clen = size - } - } - - nl[i] = make([]interface{}, clen) - - for j := 0; j < clen; j++ { - ix := i*size + j - nl[i][j] = l2.Index(ix).Interface() - } - } - - return nl, nil - - default: - return nil, fmt.Errorf("Cannot chunk type %s", tp) - } -} - -func last(list interface{}) interface{} { - l, err := mustLast(list) - if err != nil { - panic(err) - } - - return l -} - -func mustLast(list interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - return l2.Index(l - 1).Interface(), nil - default: - return nil, fmt.Errorf("Cannot find last on type %s", tp) - } -} - -func first(list interface{}) interface{} { - l, err := mustFirst(list) - if err != nil { - panic(err) - } - - return l -} - -func mustFirst(list interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - return l2.Index(0).Interface(), nil - default: - return nil, fmt.Errorf("Cannot find first on type %s", tp) - } -} - -func rest(list interface{}) []interface{} { - l, err := mustRest(list) - if err != nil { - panic(err) - } - - return l -} - -func mustRest(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - nl := make([]interface{}, l-1) - for i := 1; i < l; i++ { - nl[i-1] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find rest on type %s", tp) - } -} - -func initial(list interface{}) []interface{} { - l, err := mustInitial(list) - if err != nil { - panic(err) - } - - return l -} - -func mustInitial(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - nl := make([]interface{}, l-1) - for i := 0; i < l-1; i++ { - nl[i] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find initial on type %s", tp) - } -} - -func sortAlpha(list interface{}) []string { - k := reflect.Indirect(reflect.ValueOf(list)).Kind() - switch k { - case reflect.Slice, reflect.Array: - a := strslice(list) - s := sort.StringSlice(a) - s.Sort() - return s - } - return []string{strval(list)} -} - -func reverse(v interface{}) []interface{} { - l, err := mustReverse(v) - if err != nil { - panic(err) - } - - return l -} - -func mustReverse(v interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(v).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(v) - - l := l2.Len() - // We do not sort in place because the incoming array should not be altered. - nl := make([]interface{}, l) - for i := 0; i < l; i++ { - nl[l-i-1] = l2.Index(i).Interface() - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot find reverse on type %s", tp) - } -} - -func compact(list interface{}) []interface{} { - l, err := mustCompact(list) - if err != nil { - panic(err) - } - - return l -} - -func mustCompact(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - nl := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !empty(item) { - nl = append(nl, item) - } - } - - return nl, nil - default: - return nil, fmt.Errorf("Cannot compact on type %s", tp) - } -} - -func uniq(list interface{}) []interface{} { - l, err := mustUniq(list) - if err != nil { - panic(err) - } - - return l -} - -func mustUniq(list interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - dest := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(dest, item) { - dest = append(dest, item) - } - } - - return dest, nil - default: - return nil, fmt.Errorf("Cannot find uniq on type %s", tp) - } -} - -func inList(haystack []interface{}, needle interface{}) bool { - for _, h := range haystack { - if reflect.DeepEqual(needle, h) { - return true - } - } - return false -} - -func without(list interface{}, omit ...interface{}) []interface{} { - l, err := mustWithout(list, omit...) - if err != nil { - panic(err) - } - - return l -} - -func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - res := []interface{}{} - var item interface{} - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if !inList(omit, item) { - res = append(res, item) - } - } - - return res, nil - default: - return nil, fmt.Errorf("Cannot find without on type %s", tp) - } -} - -func has(needle interface{}, haystack interface{}) bool { - l, err := mustHas(needle, haystack) - if err != nil { - panic(err) - } - - return l -} - -func mustHas(needle interface{}, haystack interface{}) (bool, error) { - if haystack == nil { - return false, nil - } - tp := reflect.TypeOf(haystack).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(haystack) - var item interface{} - l := l2.Len() - for i := 0; i < l; i++ { - item = l2.Index(i).Interface() - if reflect.DeepEqual(needle, item) { - return true, nil - } - } - - return false, nil - default: - return false, fmt.Errorf("Cannot find has on type %s", tp) - } -} - -// $list := [1, 2, 3, 4, 5] -// slice $list -> list[0:5] = list[:] -// slice $list 0 3 -> list[0:3] = list[:3] -// slice $list 3 5 -> list[3:5] -// slice $list 3 -> list[3:5] = list[3:] -func slice(list interface{}, indices ...interface{}) interface{} { - l, err := mustSlice(list, indices...) - if err != nil { - panic(err) - } - - return l -} - -func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - - l := l2.Len() - if l == 0 { - return nil, nil - } - - var start, end int - if len(indices) > 0 { - start = toInt(indices[0]) - } - if len(indices) < 2 { - end = l - } else { - end = toInt(indices[1]) - } - - return l2.Slice(start, end).Interface(), nil - default: - return nil, fmt.Errorf("list should be type of slice or array but %s", tp) - } -} - -func concat(lists ...interface{}) interface{} { - var res []interface{} - for _, list := range lists { - tp := reflect.TypeOf(list).Kind() - switch tp { - case reflect.Slice, reflect.Array: - l2 := reflect.ValueOf(list) - for i := 0; i < l2.Len(); i++ { - res = append(res, l2.Index(i).Interface()) - } - default: - panic(fmt.Sprintf("Cannot concat type %s as list", tp)) - } - } - return res -} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go deleted file mode 100644 index 108d78a9462..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/network.go +++ /dev/null @@ -1,12 +0,0 @@ -package sprig - -import ( - "math/rand" - "net" -) - -func getHostByName(name string) string { - addrs, _ := net.LookupHost(name) - //TODO: add error handing when release v3 comes out - return addrs[rand.Intn(len(addrs))] -} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go deleted file mode 100644 index f68e4182ee6..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/numeric.go +++ /dev/null @@ -1,186 +0,0 @@ -package sprig - -import ( - "fmt" - "math" - "strconv" - "strings" - - "github.com/spf13/cast" - "github.com/shopspring/decimal" -) - -// toFloat64 converts 64-bit floats -func toFloat64(v interface{}) float64 { - return cast.ToFloat64(v) -} - -func toInt(v interface{}) int { - return cast.ToInt(v) -} - -// toInt64 converts integer types to 64-bit integers -func toInt64(v interface{}) int64 { - return cast.ToInt64(v) -} - -func max(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb > aa { - aa = bb - } - } - return aa -} - -func maxf(a interface{}, i ...interface{}) float64 { - aa := toFloat64(a) - for _, b := range i { - bb := toFloat64(b) - aa = math.Max(aa, bb) - } - return aa -} - -func min(a interface{}, i ...interface{}) int64 { - aa := toInt64(a) - for _, b := range i { - bb := toInt64(b) - if bb < aa { - aa = bb - } - } - return aa -} - -func minf(a interface{}, i ...interface{}) float64 { - aa := toFloat64(a) - for _, b := range i { - bb := toFloat64(b) - aa = math.Min(aa, bb) - } - return aa -} - -func until(count int) []int { - step := 1 - if count < 0 { - step = -1 - } - return untilStep(0, count, step) -} - -func untilStep(start, stop, step int) []int { - v := []int{} - - if stop < start { - if step >= 0 { - return v - } - for i := start; i > stop; i += step { - v = append(v, i) - } - return v - } - - if step <= 0 { - return v - } - for i := start; i < stop; i += step { - v = append(v, i) - } - return v -} - -func floor(a interface{}) float64 { - aa := toFloat64(a) - return math.Floor(aa) -} - -func ceil(a interface{}) float64 { - aa := toFloat64(a) - return math.Ceil(aa) -} - -func round(a interface{}, p int, rOpt ...float64) float64 { - roundOn := .5 - if len(rOpt) > 0 { - roundOn = rOpt[0] - } - val := toFloat64(a) - places := toFloat64(p) - - var round float64 - pow := math.Pow(10, places) - digit := pow * val - _, div := math.Modf(digit) - if div >= roundOn { - round = math.Ceil(digit) - } else { - round = math.Floor(digit) - } - return round / pow -} - -// converts unix octal to decimal -func toDecimal(v interface{}) int64 { - result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) - if err != nil { - return 0 - } - return result -} - -func seq(params ...int) string { - increment := 1 - switch len(params) { - case 0: - return "" - case 1: - start := 1 - end := params[0] - if end < start { - increment = -1 - } - return intArrayToString(untilStep(start, end+increment, increment), " ") - case 3: - start := params[0] - end := params[2] - step := params[1] - if end < start { - increment = -1 - if step > 0 { - return "" - } - } - return intArrayToString(untilStep(start, end+increment, step), " ") - case 2: - start := params[0] - end := params[1] - step := 1 - if end < start { - step = -1 - } - return intArrayToString(untilStep(start, end+step, step), " ") - default: - return "" - } -} - -func intArrayToString(slice []int, delimeter string) string { - return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") -} - -// performs a float and subsequent decimal.Decimal conversion on inputs, -// and iterates through a and b executing the mathmetical operation f -func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { - prt := decimal.NewFromFloat(toFloat64(a)) - for _, x := range b { - dx := decimal.NewFromFloat(toFloat64(x)) - prt = f(prt, dx) - } - rslt, _ := prt.Float64() - return rslt -} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go deleted file mode 100644 index 8a65c132f08..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/reflect.go +++ /dev/null @@ -1,28 +0,0 @@ -package sprig - -import ( - "fmt" - "reflect" -) - -// typeIs returns true if the src is the type named in target. -func typeIs(target string, src interface{}) bool { - return target == typeOf(src) -} - -func typeIsLike(target string, src interface{}) bool { - t := typeOf(src) - return target == t || "*"+target == t -} - -func typeOf(src interface{}) string { - return fmt.Sprintf("%T", src) -} - -func kindIs(target string, src interface{}) bool { - return target == kindOf(src) -} - -func kindOf(src interface{}) string { - return reflect.ValueOf(src).Kind().String() -} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go deleted file mode 100644 index fab55101897..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/regex.go +++ /dev/null @@ -1,83 +0,0 @@ -package sprig - -import ( - "regexp" -) - -func regexMatch(regex string, s string) bool { - match, _ := regexp.MatchString(regex, s) - return match -} - -func mustRegexMatch(regex string, s string) (bool, error) { - return regexp.MatchString(regex, s) -} - -func regexFindAll(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.FindAllString(s, n) -} - -func mustRegexFindAll(regex string, s string, n int) ([]string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return []string{}, err - } - return r.FindAllString(s, n), nil -} - -func regexFind(regex string, s string) string { - r := regexp.MustCompile(regex) - return r.FindString(s) -} - -func mustRegexFind(regex string, s string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.FindString(s), nil -} - -func regexReplaceAll(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllString(s, repl) -} - -func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.ReplaceAllString(s, repl), nil -} - -func regexReplaceAllLiteral(regex string, s string, repl string) string { - r := regexp.MustCompile(regex) - return r.ReplaceAllLiteralString(s, repl) -} - -func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return "", err - } - return r.ReplaceAllLiteralString(s, repl), nil -} - -func regexSplit(regex string, s string, n int) []string { - r := regexp.MustCompile(regex) - return r.Split(s, n) -} - -func mustRegexSplit(regex string, s string, n int) ([]string, error) { - r, err := regexp.Compile(regex) - if err != nil { - return []string{}, err - } - return r.Split(s, n), nil -} - -func regexQuoteMeta(s string) string { - return regexp.QuoteMeta(s) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go deleted file mode 100644 index 3fbe08aa637..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/semver.go +++ /dev/null @@ -1,23 +0,0 @@ -package sprig - -import ( - sv2 "github.com/Masterminds/semver/v3" -) - -func semverCompare(constraint, version string) (bool, error) { - c, err := sv2.NewConstraint(constraint) - if err != nil { - return false, err - } - - v, err := sv2.NewVersion(version) - if err != nil { - return false, err - } - - return c.Check(v), nil -} - -func semver(version string) (*sv2.Version, error) { - return sv2.NewVersion(version) -} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go deleted file mode 100644 index e0ae628c841..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/strings.go +++ /dev/null @@ -1,236 +0,0 @@ -package sprig - -import ( - "encoding/base32" - "encoding/base64" - "fmt" - "reflect" - "strconv" - "strings" - - util "github.com/Masterminds/goutils" -) - -func base64encode(v string) string { - return base64.StdEncoding.EncodeToString([]byte(v)) -} - -func base64decode(v string) string { - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func base32encode(v string) string { - return base32.StdEncoding.EncodeToString([]byte(v)) -} - -func base32decode(v string) string { - data, err := base32.StdEncoding.DecodeString(v) - if err != nil { - return err.Error() - } - return string(data) -} - -func abbrev(width int, s string) string { - if width < 4 { - return s - } - r, _ := util.Abbreviate(s, width) - return r -} - -func abbrevboth(left, right int, s string) string { - if right < 4 || left > 0 && right < 7 { - return s - } - r, _ := util.AbbreviateFull(s, left, right) - return r -} -func initials(s string) string { - // Wrap this just to eliminate the var args, which templates don't do well. - return util.Initials(s) -} - -func randAlphaNumeric(count int) string { - // It is not possible, it appears, to actually generate an error here. - r, _ := util.CryptoRandomAlphaNumeric(count) - return r -} - -func randAlpha(count int) string { - r, _ := util.CryptoRandomAlphabetic(count) - return r -} - -func randAscii(count int) string { - r, _ := util.CryptoRandomAscii(count) - return r -} - -func randNumeric(count int) string { - r, _ := util.CryptoRandomNumeric(count) - return r -} - -func untitle(str string) string { - return util.Uncapitalize(str) -} - -func quote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("%q", strval(s))) - } - } - return strings.Join(out, " ") -} - -func squote(str ...interface{}) string { - out := make([]string, 0, len(str)) - for _, s := range str { - if s != nil { - out = append(out, fmt.Sprintf("'%v'", s)) - } - } - return strings.Join(out, " ") -} - -func cat(v ...interface{}) string { - v = removeNilElements(v) - r := strings.TrimSpace(strings.Repeat("%v ", len(v))) - return fmt.Sprintf(r, v...) -} - -func indent(spaces int, v string) string { - pad := strings.Repeat(" ", spaces) - return pad + strings.Replace(v, "\n", "\n"+pad, -1) -} - -func nindent(spaces int, v string) string { - return "\n" + indent(spaces, v) -} - -func replace(old, new, src string) string { - return strings.Replace(src, old, new, -1) -} - -func plural(one, many string, count int) string { - if count == 1 { - return one - } - return many -} - -func strslice(v interface{}) []string { - switch v := v.(type) { - case []string: - return v - case []interface{}: - b := make([]string, 0, len(v)) - for _, s := range v { - if s != nil { - b = append(b, strval(s)) - } - } - return b - default: - val := reflect.ValueOf(v) - switch val.Kind() { - case reflect.Array, reflect.Slice: - l := val.Len() - b := make([]string, 0, l) - for i := 0; i < l; i++ { - value := val.Index(i).Interface() - if value != nil { - b = append(b, strval(value)) - } - } - return b - default: - if v == nil { - return []string{} - } - - return []string{strval(v)} - } - } -} - -func removeNilElements(v []interface{}) []interface{} { - newSlice := make([]interface{}, 0, len(v)) - for _, i := range v { - if i != nil { - newSlice = append(newSlice, i) - } - } - return newSlice -} - -func strval(v interface{}) string { - switch v := v.(type) { - case string: - return v - case []byte: - return string(v) - case error: - return v.Error() - case fmt.Stringer: - return v.String() - default: - return fmt.Sprintf("%v", v) - } -} - -func trunc(c int, s string) string { - if c < 0 && len(s)+c > 0 { - return s[len(s)+c:] - } - if c >= 0 && len(s) > c { - return s[:c] - } - return s -} - -func join(sep string, v interface{}) string { - return strings.Join(strslice(v), sep) -} - -func split(sep, orig string) map[string]string { - parts := strings.Split(orig, sep) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -func splitn(sep string, n int, orig string) map[string]string { - parts := strings.SplitN(orig, sep, n) - res := make(map[string]string, len(parts)) - for i, v := range parts { - res["_"+strconv.Itoa(i)] = v - } - return res -} - -// substring creates a substring of the given string. -// -// If start is < 0, this calls string[:end]. -// -// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] -// -// Otherwise, this calls string[start, end]. -func substring(start, end int, s string) string { - if start < 0 { - return s[:end] - } - if end < 0 || end > len(s) { - return s[start:] - } - return s[start:end] -} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go deleted file mode 100644 index b8e120e19ba..00000000000 --- a/vendor/github.com/Masterminds/sprig/v3/url.go +++ /dev/null @@ -1,66 +0,0 @@ -package sprig - -import ( - "fmt" - "net/url" - "reflect" -) - -func dictGetOrEmpty(dict map[string]interface{}, key string) string { - value, ok := dict[key] - if !ok { - return "" - } - tp := reflect.TypeOf(value).Kind() - if tp != reflect.String { - panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) - } - return reflect.ValueOf(value).String() -} - -// parses given URL to return dict object -func urlParse(v string) map[string]interface{} { - dict := map[string]interface{}{} - parsedURL, err := url.Parse(v) - if err != nil { - panic(fmt.Sprintf("unable to parse url: %s", err)) - } - dict["scheme"] = parsedURL.Scheme - dict["host"] = parsedURL.Host - dict["hostname"] = parsedURL.Hostname() - dict["path"] = parsedURL.Path - dict["query"] = parsedURL.RawQuery - dict["opaque"] = parsedURL.Opaque - dict["fragment"] = parsedURL.Fragment - if parsedURL.User != nil { - dict["userinfo"] = parsedURL.User.String() - } else { - dict["userinfo"] = "" - } - - return dict -} - -// join given dict to URL string -func urlJoin(d map[string]interface{}) string { - resURL := url.URL{ - Scheme: dictGetOrEmpty(d, "scheme"), - Host: dictGetOrEmpty(d, "host"), - Path: dictGetOrEmpty(d, "path"), - RawQuery: dictGetOrEmpty(d, "query"), - Opaque: dictGetOrEmpty(d, "opaque"), - Fragment: dictGetOrEmpty(d, "fragment"), - } - userinfo := dictGetOrEmpty(d, "userinfo") - var user *url.Userinfo - if userinfo != "" { - tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) - if err != nil { - panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) - } - user = tempURL.User - } - - resURL.User = user - return resURL.String() -} diff --git a/vendor/github.com/antonmedv/expr/.gitignore b/vendor/github.com/antonmedv/expr/.gitignore deleted file mode 100644 index b0df3eb4442..00000000000 --- a/vendor/github.com/antonmedv/expr/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.exe -*.exe~ -*.dll -*.so -*.dylib -*.test -*.out -*.html diff --git a/vendor/github.com/antonmedv/expr/LICENSE b/vendor/github.com/antonmedv/expr/LICENSE deleted file mode 100644 index 7d058f841cb..00000000000 --- a/vendor/github.com/antonmedv/expr/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019 Anton Medvedev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/antonmedv/expr/README.md b/vendor/github.com/antonmedv/expr/README.md deleted file mode 100644 index 242431f2ceb..00000000000 --- a/vendor/github.com/antonmedv/expr/README.md +++ /dev/null @@ -1,160 +0,0 @@ -# Expr -[![test](https://github.com/antonmedv/expr/actions/workflows/test.yml/badge.svg)](https://github.com/antonmedv/expr/actions/workflows/test.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/antonmedv/expr)](https://goreportcard.com/report/github.com/antonmedv/expr) -[![GoDoc](https://godoc.org/github.com/antonmedv/expr?status.svg)](https://godoc.org/github.com/antonmedv/expr) - -expr logo - -**Expr** package provides an engine that can compile and evaluate expressions. -An expression is a one-liner that returns a value (mostly, but not limited to, booleans). -It is designed for simplicity, speed and safety. - -The purpose of the package is to allow users to use expressions inside configuration for more complex logic. -It is a perfect candidate for the foundation of a _business rule engine_. -The idea is to let configure things in a dynamic way without recompile of a program: - -```coffeescript -# Get the special price if -user.Group in ["good_customers", "collaborator"] - -# Promote article to the homepage when -len(article.Comments) > 100 and article.Category not in ["misc"] - -# Send an alert when -product.Stock < 15 -``` - -## Features - -* Seamless integration with Go (no need to redefine types) -* Static typing ([example](https://godoc.org/github.com/antonmedv/expr#example-Env)). - ```go - out, err := expr.Compile(`name + age`) - // err: invalid operation + (mismatched types string and int) - // | name + age - // | .....^ - ``` -* User-friendly error messages. -* Reasonable set of basic operators. -* Builtins `all`, `none`, `any`, `one`, `filter`, `map`. - ```coffeescript - all(Tweets, {.Size <= 280}) - ``` -* Fast ([benchmarks](https://github.com/antonmedv/golang-expression-evaluation-comparison#readme)): uses bytecode virtual machine and optimizing compiler. - -## Install - -``` -go get github.com/antonmedv/expr -``` - -## Documentation - -* See [Getting Started](https://expr.medv.io/docs/Getting-Started) page for developer documentation. -* See [Language Definition](https://expr.medv.io/docs/Language-Definition) page to learn the syntax. - -## Expr Code Editor - - - Expr Code Editor - - -Also, I have an embeddable code editor written in JavaScript which allows editing expressions with syntax highlighting and autocomplete based on your types declaration. - -[Learn more →](https://antonmedv.github.io/expr/) - -## Examples - -[Play Online](https://play.golang.org/p/z7T8ytJ1T1d) - -```go -package main - -import ( - "fmt" - "github.com/antonmedv/expr" -) - -func main() { - env := map[string]interface{}{ - "greet": "Hello, %v!", - "names": []string{"world", "you"}, - "sprintf": fmt.Sprintf, - } - - code := `sprintf(greet, names[0])` - - program, err := expr.Compile(code, expr.Env(env)) - if err != nil { - panic(err) - } - - output, err := expr.Run(program, env) - if err != nil { - panic(err) - } - - fmt.Println(output) -} -``` - -[Play Online](https://play.golang.org/p/4S4brsIvU4i) - -```go -package main - -import ( - "fmt" - "github.com/antonmedv/expr" -) - -type Tweet struct { - Len int -} - -type Env struct { - Tweets []Tweet -} - -func main() { - code := `all(Tweets, {.Len <= 240})` - - program, err := expr.Compile(code, expr.Env(Env{})) - if err != nil { - panic(err) - } - - env := Env{ - Tweets: []Tweet{{42}, {98}, {69}}, - } - output, err := expr.Run(program, env) - if err != nil { - panic(err) - } - - fmt.Println(output) -} -``` - -## Who uses Expr? - -* [Aviasales](https://aviasales.ru) uses Expr as a business rule engine for our flight search engine. -* [Wish.com](https://www.wish.com) uses Expr for decision-making rule engine in the Wish Assistant. -* [Argo](https://argoproj.github.io) uses Expr in Argo Rollouts and Argo Workflows for Kubernetes. -* [Crowdsec](https://crowdsec.net) uses Expr in a security automation tool. -* [FACEIT](https://www.faceit.com) uses Expr to allow customization of its eSports matchmaking algorithm. -* [qiniu](https://www.qiniu.com) uses Expr in trade systems. -* [Junglee Games](https://www.jungleegames.com/) uses Expr for an in house marketing retention tool [Project Audience](https://www.linkedin.com/pulse/meet-project-audience-our-no-code-swiss-army-knife-product-bharti). -* [OpenTelemetry](https://opentelemetry.io) uses Expr in the OpenTelemetry Collector. -* [Philips Labs](https://github.com/philips-labs/tabia) uses Expr in Tabia, a tool for collecting insights on the characteristics of our code bases. -* [CoreDNS](https://coredns.io) uses Expr in CoreDNS, a DNS server. -* [Chaos Mesh](https://chaos-mesh.org) uses Expr in Chaos Mesh, a cloud-native Chaos Engineering platform. -* [Milvus](https://milvus.io) uses Expr in Milvus, an open-source vector database. -* [Visually.io](https://visually.io) uses Expr as a business rule engine for our personalization targeting algorithm. -* [Akvorado](https://github.com/akvorado/akvorado) uses Expr to classify exporters and interfaces in network flows. - -[Add your company too](https://github.com/antonmedv/expr/edit/master/README.md) - -## License - -[MIT](https://github.com/antonmedv/expr/blob/master/LICENSE) diff --git a/vendor/github.com/antonmedv/expr/ast/node.go b/vendor/github.com/antonmedv/expr/ast/node.go deleted file mode 100644 index e85f853e91f..00000000000 --- a/vendor/github.com/antonmedv/expr/ast/node.go +++ /dev/null @@ -1,169 +0,0 @@ -package ast - -import ( - "reflect" - "regexp" - - "github.com/antonmedv/expr/builtin" - "github.com/antonmedv/expr/file" -) - -// Node represents items of abstract syntax tree. -type Node interface { - Location() file.Location - SetLocation(file.Location) - Type() reflect.Type - SetType(reflect.Type) -} - -func Patch(node *Node, newNode Node) { - newNode.SetType((*node).Type()) - newNode.SetLocation((*node).Location()) - *node = newNode -} - -type base struct { - loc file.Location - nodeType reflect.Type -} - -func (n *base) Location() file.Location { - return n.loc -} - -func (n *base) SetLocation(loc file.Location) { - n.loc = loc -} - -func (n *base) Type() reflect.Type { - return n.nodeType -} - -func (n *base) SetType(t reflect.Type) { - n.nodeType = t -} - -type NilNode struct { - base -} - -type IdentifierNode struct { - base - Value string - Deref bool - FieldIndex []int - Method bool // true if method, false if field - MethodIndex int // index of method, set only if Method is true -} - -type IntegerNode struct { - base - Value int -} - -type FloatNode struct { - base - Value float64 -} - -type BoolNode struct { - base - Value bool -} - -type StringNode struct { - base - Value string -} - -type ConstantNode struct { - base - Value interface{} -} - -type UnaryNode struct { - base - Operator string - Node Node -} - -type BinaryNode struct { - base - Regexp *regexp.Regexp - Operator string - Left Node - Right Node -} - -type ChainNode struct { - base - Node Node -} - -type MemberNode struct { - base - Node Node - Property Node - Name string // Name of the filed or method. Used for error reporting. - Optional bool - Deref bool - FieldIndex []int - - // TODO: Replace with a single MethodIndex field of &int type. - Method bool - MethodIndex int -} - -type SliceNode struct { - base - Node Node - From Node - To Node -} - -type CallNode struct { - base - Callee Node - Arguments []Node - Typed int - Fast bool - Func *builtin.Function -} - -type BuiltinNode struct { - base - Name string - Arguments []Node -} - -type ClosureNode struct { - base - Node Node -} - -type PointerNode struct { - base -} - -type ConditionalNode struct { - base - Cond Node - Exp1 Node - Exp2 Node -} - -type ArrayNode struct { - base - Nodes []Node -} - -type MapNode struct { - base - Pairs []Node -} - -type PairNode struct { - base - Key Node - Value Node -} diff --git a/vendor/github.com/antonmedv/expr/ast/print.go b/vendor/github.com/antonmedv/expr/ast/print.go deleted file mode 100644 index 56bc7dbe2e3..00000000000 --- a/vendor/github.com/antonmedv/expr/ast/print.go +++ /dev/null @@ -1,59 +0,0 @@ -package ast - -import ( - "fmt" - "reflect" - "regexp" -) - -func Dump(node Node) string { - return dump(reflect.ValueOf(node), "") -} - -func dump(v reflect.Value, ident string) string { - if !v.IsValid() { - return "nil" - } - t := v.Type() - switch t.Kind() { - case reflect.Struct: - out := t.Name() + "{\n" - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if isPrivate(f.Name) { - continue - } - s := v.Field(i) - out += fmt.Sprintf("%v%v: %v,\n", ident+"\t", f.Name, dump(s, ident+"\t")) - } - return out + ident + "}" - case reflect.Slice: - if v.Len() == 0 { - return t.String() + "{}" - } - out := t.String() + "{\n" - for i := 0; i < v.Len(); i++ { - s := v.Index(i) - out += fmt.Sprintf("%v%v,", ident+"\t", dump(s, ident+"\t")) - if i+1 < v.Len() { - out += "\n" - } - } - return out + "\n" + ident + "}" - case reflect.Ptr: - return dump(v.Elem(), ident) - case reflect.Interface: - return dump(reflect.ValueOf(v.Interface()), ident) - - case reflect.String: - return fmt.Sprintf("%q", v) - default: - return fmt.Sprintf("%v", v) - } -} - -var isCapital = regexp.MustCompile("^[A-Z]") - -func isPrivate(s string) bool { - return !isCapital.Match([]byte(s)) -} diff --git a/vendor/github.com/antonmedv/expr/ast/visitor.go b/vendor/github.com/antonmedv/expr/ast/visitor.go deleted file mode 100644 index 351e5d72b23..00000000000 --- a/vendor/github.com/antonmedv/expr/ast/visitor.go +++ /dev/null @@ -1,68 +0,0 @@ -package ast - -import "fmt" - -type Visitor interface { - Visit(node *Node) -} - -func Walk(node *Node, v Visitor) { - switch n := (*node).(type) { - case *NilNode: - case *IdentifierNode: - case *IntegerNode: - case *FloatNode: - case *BoolNode: - case *StringNode: - case *ConstantNode: - case *UnaryNode: - Walk(&n.Node, v) - case *BinaryNode: - Walk(&n.Left, v) - Walk(&n.Right, v) - case *ChainNode: - Walk(&n.Node, v) - case *MemberNode: - Walk(&n.Node, v) - Walk(&n.Property, v) - case *SliceNode: - Walk(&n.Node, v) - if n.From != nil { - Walk(&n.From, v) - } - if n.To != nil { - Walk(&n.To, v) - } - case *CallNode: - Walk(&n.Callee, v) - for i := range n.Arguments { - Walk(&n.Arguments[i], v) - } - case *BuiltinNode: - for i := range n.Arguments { - Walk(&n.Arguments[i], v) - } - case *ClosureNode: - Walk(&n.Node, v) - case *PointerNode: - case *ConditionalNode: - Walk(&n.Cond, v) - Walk(&n.Exp1, v) - Walk(&n.Exp2, v) - case *ArrayNode: - for i := range n.Nodes { - Walk(&n.Nodes[i], v) - } - case *MapNode: - for i := range n.Pairs { - Walk(&n.Pairs[i], v) - } - case *PairNode: - Walk(&n.Key, v) - Walk(&n.Value, v) - default: - panic(fmt.Sprintf("undefined node type (%T)", node)) - } - - v.Visit(node) -} diff --git a/vendor/github.com/antonmedv/expr/builtin/builtin.go b/vendor/github.com/antonmedv/expr/builtin/builtin.go deleted file mode 100644 index ad9376962ee..00000000000 --- a/vendor/github.com/antonmedv/expr/builtin/builtin.go +++ /dev/null @@ -1,101 +0,0 @@ -package builtin - -import ( - "fmt" - "reflect" -) - -var ( - anyType = reflect.TypeOf(new(interface{})).Elem() - integerType = reflect.TypeOf(0) - floatType = reflect.TypeOf(float64(0)) -) - -type Function struct { - Name string - Func func(args ...interface{}) (interface{}, error) - Opcode int - Types []reflect.Type - Validate func(args []reflect.Type) (reflect.Type, error) -} - -const ( - Len = iota + 1 - Abs - Int - Float -) - -var Builtins = map[int]*Function{ - Len: { - Name: "len", - Opcode: Len, - Validate: func(args []reflect.Type) (reflect.Type, error) { - if len(args) != 1 { - return anyType, fmt.Errorf("invalid number of arguments for len (expected 1, got %d)", len(args)) - } - switch kind(args[0]) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String, reflect.Interface: - return integerType, nil - } - return anyType, fmt.Errorf("invalid argument for len (type %s)", args[0]) - }, - }, - Abs: { - Name: "abs", - Opcode: Abs, - Validate: func(args []reflect.Type) (reflect.Type, error) { - if len(args) != 1 { - return anyType, fmt.Errorf("invalid number of arguments for abs (expected 1, got %d)", len(args)) - } - switch kind(args[0]) { - case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Interface: - return args[0], nil - } - return anyType, fmt.Errorf("invalid argument for abs (type %s)", args[0]) - }, - }, - Int: { - Name: "int", - Opcode: Int, - Validate: func(args []reflect.Type) (reflect.Type, error) { - if len(args) != 1 { - return anyType, fmt.Errorf("invalid number of arguments for int (expected 1, got %d)", len(args)) - } - switch kind(args[0]) { - case reflect.Interface: - return integerType, nil - case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return integerType, nil - case reflect.String: - return integerType, nil - } - return anyType, fmt.Errorf("invalid argument for int (type %s)", args[0]) - }, - }, - Float: { - Name: "float", - Opcode: Float, - Validate: func(args []reflect.Type) (reflect.Type, error) { - if len(args) != 1 { - return anyType, fmt.Errorf("invalid number of arguments for float (expected 1, got %d)", len(args)) - } - switch kind(args[0]) { - case reflect.Interface: - return floatType, nil - case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return floatType, nil - case reflect.String: - return floatType, nil - } - return anyType, fmt.Errorf("invalid argument for float (type %s)", args[0]) - }, - }, -} - -func kind(t reflect.Type) reflect.Kind { - if t == nil { - return reflect.Invalid - } - return t.Kind() -} diff --git a/vendor/github.com/antonmedv/expr/checker/checker.go b/vendor/github.com/antonmedv/expr/checker/checker.go deleted file mode 100644 index 00025a33cee..00000000000 --- a/vendor/github.com/antonmedv/expr/checker/checker.go +++ /dev/null @@ -1,856 +0,0 @@ -package checker - -import ( - "fmt" - "reflect" - "regexp" - - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/builtin" - "github.com/antonmedv/expr/conf" - "github.com/antonmedv/expr/file" - "github.com/antonmedv/expr/parser" - "github.com/antonmedv/expr/vm" -) - -func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) { - if config == nil { - config = conf.New(nil) - } - - v := &visitor{ - config: config, - collections: make([]reflect.Type, 0), - parents: make([]ast.Node, 0), - } - - t, _ = v.visit(tree.Node) - - if v.err != nil { - return t, v.err.Bind(tree.Source) - } - - if v.config.Expect != reflect.Invalid { - switch v.config.Expect { - case reflect.Int, reflect.Int64, reflect.Float64: - if !isNumber(t) && !isAny(t) { - return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t) - } - default: - if t != nil { - if t.Kind() == v.config.Expect { - return t, nil - } - } - return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t) - } - } - - return t, nil -} - -type visitor struct { - config *conf.Config - collections []reflect.Type - parents []ast.Node - err *file.Error -} - -type info struct { - method bool - fn *builtin.Function -} - -func (v *visitor) visit(node ast.Node) (reflect.Type, info) { - var t reflect.Type - var i info - v.parents = append(v.parents, node) - switch n := node.(type) { - case *ast.NilNode: - t, i = v.NilNode(n) - case *ast.IdentifierNode: - t, i = v.IdentifierNode(n) - case *ast.IntegerNode: - t, i = v.IntegerNode(n) - case *ast.FloatNode: - t, i = v.FloatNode(n) - case *ast.BoolNode: - t, i = v.BoolNode(n) - case *ast.StringNode: - t, i = v.StringNode(n) - case *ast.ConstantNode: - t, i = v.ConstantNode(n) - case *ast.UnaryNode: - t, i = v.UnaryNode(n) - case *ast.BinaryNode: - t, i = v.BinaryNode(n) - case *ast.ChainNode: - t, i = v.ChainNode(n) - case *ast.MemberNode: - t, i = v.MemberNode(n) - case *ast.SliceNode: - t, i = v.SliceNode(n) - case *ast.CallNode: - t, i = v.CallNode(n) - case *ast.BuiltinNode: - t, i = v.BuiltinNode(n) - case *ast.ClosureNode: - t, i = v.ClosureNode(n) - case *ast.PointerNode: - t, i = v.PointerNode(n) - case *ast.ConditionalNode: - t, i = v.ConditionalNode(n) - case *ast.ArrayNode: - t, i = v.ArrayNode(n) - case *ast.MapNode: - t, i = v.MapNode(n) - case *ast.PairNode: - t, i = v.PairNode(n) - default: - panic(fmt.Sprintf("undefined node type (%T)", node)) - } - v.parents = v.parents[:len(v.parents)-1] - node.SetType(t) - return t, i -} - -func (v *visitor) error(node ast.Node, format string, args ...interface{}) (reflect.Type, info) { - if v.err == nil { // show first error - v.err = &file.Error{ - Location: node.Location(), - Message: fmt.Sprintf(format, args...), - } - } - return anyType, info{} // interface represent undefined type -} - -func (v *visitor) NilNode(*ast.NilNode) (reflect.Type, info) { - return nilType, info{} -} - -func (v *visitor) IdentifierNode(node *ast.IdentifierNode) (reflect.Type, info) { - if fn, ok := v.config.Functions[node.Value]; ok { - // Return anyType instead of func type as we don't know the arguments yet. - // The func type can be one of the fn.Types. The type will be resolved - // when the arguments are known in CallNode. - return anyType, info{fn: fn} - } - if v.config.Types == nil { - node.Deref = true - } else if t, ok := v.config.Types[node.Value]; ok { - if t.Ambiguous { - return v.error(node, "ambiguous identifier %v", node.Value) - } - d, c := deref(t.Type) - node.Deref = c - node.Method = t.Method - node.MethodIndex = t.MethodIndex - node.FieldIndex = t.FieldIndex - return d, info{method: t.Method} - } - if v.config.Strict { - return v.error(node, "unknown name %v", node.Value) - } - if v.config.DefaultType != nil { - return v.config.DefaultType, info{} - } - return anyType, info{} -} - -func (v *visitor) IntegerNode(*ast.IntegerNode) (reflect.Type, info) { - return integerType, info{} -} - -func (v *visitor) FloatNode(*ast.FloatNode) (reflect.Type, info) { - return floatType, info{} -} - -func (v *visitor) BoolNode(*ast.BoolNode) (reflect.Type, info) { - return boolType, info{} -} - -func (v *visitor) StringNode(*ast.StringNode) (reflect.Type, info) { - return stringType, info{} -} - -func (v *visitor) ConstantNode(node *ast.ConstantNode) (reflect.Type, info) { - return reflect.TypeOf(node.Value), info{} -} - -func (v *visitor) UnaryNode(node *ast.UnaryNode) (reflect.Type, info) { - t, _ := v.visit(node.Node) - - switch node.Operator { - - case "!", "not": - if isBool(t) { - return boolType, info{} - } - if isAny(t) { - return boolType, info{} - } - - case "+", "-": - if isNumber(t) { - return t, info{} - } - if isAny(t) { - return anyType, info{} - } - - default: - return v.error(node, "unknown operator (%v)", node.Operator) - } - - return v.error(node, `invalid operation: %v (mismatched type %v)`, node.Operator, t) -} - -func (v *visitor) BinaryNode(node *ast.BinaryNode) (reflect.Type, info) { - l, _ := v.visit(node.Left) - r, _ := v.visit(node.Right) - - // check operator overloading - if fns, ok := v.config.Operators[node.Operator]; ok { - t, _, ok := conf.FindSuitableOperatorOverload(fns, v.config.Types, l, r) - if ok { - return t, info{} - } - } - - switch node.Operator { - case "==", "!=": - if isNumber(l) && isNumber(r) { - return boolType, info{} - } - if l == nil || r == nil { // It is possible to compare with nil. - return boolType, info{} - } - if l.Kind() == r.Kind() { - return boolType, info{} - } - if isAny(l) || isAny(r) { - return boolType, info{} - } - - case "or", "||", "and", "&&": - if isBool(l) && isBool(r) { - return boolType, info{} - } - if or(l, r, isBool) { - return boolType, info{} - } - - case "<", ">", ">=", "<=": - if isNumber(l) && isNumber(r) { - return boolType, info{} - } - if isString(l) && isString(r) { - return boolType, info{} - } - if isTime(l) && isTime(r) { - return boolType, info{} - } - if or(l, r, isNumber, isString, isTime) { - return boolType, info{} - } - - case "-": - if isNumber(l) && isNumber(r) { - return combined(l, r), info{} - } - if isTime(l) && isTime(r) { - return durationType, info{} - } - if or(l, r, isNumber, isTime) { - return anyType, info{} - } - - case "/", "*": - if isNumber(l) && isNumber(r) { - return combined(l, r), info{} - } - if or(l, r, isNumber) { - return anyType, info{} - } - - case "**", "^": - if isNumber(l) && isNumber(r) { - return floatType, info{} - } - if or(l, r, isNumber) { - return floatType, info{} - } - - case "%": - if isInteger(l) && isInteger(r) { - return combined(l, r), info{} - } - if or(l, r, isInteger) { - return anyType, info{} - } - - case "+": - if isNumber(l) && isNumber(r) { - return combined(l, r), info{} - } - if isString(l) && isString(r) { - return stringType, info{} - } - if isTime(l) && isDuration(r) { - return timeType, info{} - } - if isDuration(l) && isTime(r) { - return timeType, info{} - } - if or(l, r, isNumber, isString, isTime, isDuration) { - return anyType, info{} - } - - case "in": - if (isString(l) || isAny(l)) && isStruct(r) { - return boolType, info{} - } - if isMap(r) { - return boolType, info{} - } - if isArray(r) { - return boolType, info{} - } - if isAny(l) && anyOf(r, isString, isArray, isMap) { - return boolType, info{} - } - if isAny(r) { - return boolType, info{} - } - - case "matches": - if s, ok := node.Right.(*ast.StringNode); ok { - r, err := regexp.Compile(s.Value) - if err != nil { - return v.error(node, err.Error()) - } - node.Regexp = r - } - if isString(l) && isString(r) { - return boolType, info{} - } - if or(l, r, isString) { - return boolType, info{} - } - - case "contains", "startsWith", "endsWith": - if isString(l) && isString(r) { - return boolType, info{} - } - if or(l, r, isString) { - return boolType, info{} - } - - case "..": - ret := reflect.SliceOf(integerType) - if isInteger(l) && isInteger(r) { - return ret, info{} - } - if or(l, r, isInteger) { - return ret, info{} - } - - case "??": - if l == nil && r != nil { - return r, info{} - } - if l != nil && r == nil { - return l, info{} - } - if l == nil && r == nil { - return nilType, info{} - } - if r.AssignableTo(l) { - return l, info{} - } - return anyType, info{} - - default: - return v.error(node, "unknown operator (%v)", node.Operator) - - } - - return v.error(node, `invalid operation: %v (mismatched types %v and %v)`, node.Operator, l, r) -} - -func (v *visitor) ChainNode(node *ast.ChainNode) (reflect.Type, info) { - return v.visit(node.Node) -} - -func (v *visitor) MemberNode(node *ast.MemberNode) (reflect.Type, info) { - base, _ := v.visit(node.Node) - prop, _ := v.visit(node.Property) - - if name, ok := node.Property.(*ast.StringNode); ok { - if base == nil { - return v.error(node, "type %v has no field %v", base, name.Value) - } - // First, check methods defined on base type itself, - // independent of which type it is. Without dereferencing. - if m, ok := base.MethodByName(name.Value); ok { - if base.Kind() == reflect.Interface { - // In case of interface type method will not have a receiver, - // and to prevent checker decreasing numbers of in arguments - // return method type as not method (second argument is false). - - // Also, we can not use m.Index here, because it will be - // different indexes for different types which implement - // the same interface. - return m.Type, info{} - } else { - node.Method = true - node.MethodIndex = m.Index - node.Name = name.Value - return m.Type, info{method: true} - } - } - } - - if base.Kind() == reflect.Ptr { - base = base.Elem() - } - - switch base.Kind() { - case reflect.Interface: - node.Deref = true - return anyType, info{} - - case reflect.Map: - if prop != nil && !prop.AssignableTo(base.Key()) && !isAny(prop) { - return v.error(node.Property, "cannot use %v to get an element from %v", prop, base) - } - t, c := deref(base.Elem()) - node.Deref = c - return t, info{} - - case reflect.Array, reflect.Slice: - if !isInteger(prop) && !isAny(prop) { - return v.error(node.Property, "array elements can only be selected using an integer (got %v)", prop) - } - t, c := deref(base.Elem()) - node.Deref = c - return t, info{} - - case reflect.Struct: - if name, ok := node.Property.(*ast.StringNode); ok { - propertyName := name.Value - if field, ok := fetchField(base, propertyName); ok { - t, c := deref(field.Type) - node.Deref = c - node.FieldIndex = field.Index - node.Name = propertyName - return t, info{} - } - if len(v.parents) > 1 { - if _, ok := v.parents[len(v.parents)-2].(*ast.CallNode); ok { - return v.error(node, "type %v has no method %v", base, propertyName) - } - } - return v.error(node, "type %v has no field %v", base, propertyName) - } - } - - return v.error(node, "type %v[%v] is undefined", base, prop) -} - -func (v *visitor) SliceNode(node *ast.SliceNode) (reflect.Type, info) { - t, _ := v.visit(node.Node) - - switch t.Kind() { - case reflect.Interface: - // ok - case reflect.String, reflect.Array, reflect.Slice: - // ok - default: - return v.error(node, "cannot slice %v", t) - } - - if node.From != nil { - from, _ := v.visit(node.From) - if !isInteger(from) && !isAny(from) { - return v.error(node.From, "non-integer slice index %v", from) - } - } - if node.To != nil { - to, _ := v.visit(node.To) - if !isInteger(to) && !isAny(to) { - return v.error(node.To, "non-integer slice index %v", to) - } - } - return t, info{} -} - -func (v *visitor) CallNode(node *ast.CallNode) (reflect.Type, info) { - fn, fnInfo := v.visit(node.Callee) - - if fnInfo.fn != nil { - f := fnInfo.fn - node.Func = f - if f.Validate != nil { - args := make([]reflect.Type, len(node.Arguments)) - for i, arg := range node.Arguments { - args[i], _ = v.visit(arg) - } - t, err := f.Validate(args) - if err != nil { - return v.error(node, "%v", err) - } - return t, info{} - } - if len(f.Types) == 0 { - t, err := v.checkFunc(f.Name, functionType, false, node) - if err != nil { - if v.err == nil { - v.err = err - } - return anyType, info{} - } - // No type was specified, so we assume the function returns any. - return t, info{} - } - var lastErr *file.Error - for _, t := range f.Types { - outType, err := v.checkFunc(f.Name, t, false, node) - if err != nil { - lastErr = err - continue - } - return outType, info{} - } - if lastErr != nil { - if v.err == nil { - v.err = lastErr - } - return anyType, info{} - } - } - - fnName := "function" - if identifier, ok := node.Callee.(*ast.IdentifierNode); ok { - fnName = identifier.Value - } - if member, ok := node.Callee.(*ast.MemberNode); ok { - if name, ok := member.Property.(*ast.StringNode); ok { - fnName = name.Value - } - } - switch fn.Kind() { - case reflect.Interface: - return anyType, info{} - case reflect.Func: - inputParamsCount := 1 // for functions - if fnInfo.method { - inputParamsCount = 2 // for methods - } - // TODO: Deprecate OpCallFast and move fn(...any) any to TypedFunc list. - // To do this we need add support for variadic arguments in OpCallTyped. - if !isAny(fn) && - fn.IsVariadic() && - fn.NumIn() == inputParamsCount && - fn.NumOut() == 1 && - fn.Out(0).Kind() == reflect.Interface { - rest := fn.In(fn.NumIn() - 1) // function has only one param for functions and two for methods - if rest.Kind() == reflect.Slice && rest.Elem().Kind() == reflect.Interface { - node.Fast = true - } - } - - outType, err := v.checkFunc(fnName, fn, fnInfo.method, node) - if err != nil { - if v.err == nil { - v.err = err - } - return anyType, info{} - } - - v.findTypedFunc(node, fn, fnInfo.method) - - return outType, info{} - } - return v.error(node, "%v is not callable", fn) -} - -func (v *visitor) checkFunc(name string, fn reflect.Type, method bool, node *ast.CallNode) (reflect.Type, *file.Error) { - if isAny(fn) { - return anyType, nil - } - - if fn.NumOut() == 0 { - return anyType, &file.Error{ - Location: node.Location(), - Message: fmt.Sprintf("func %v doesn't return value", name), - } - } - if numOut := fn.NumOut(); numOut > 2 { - return anyType, &file.Error{ - Location: node.Location(), - Message: fmt.Sprintf("func %v returns more then two values", name), - } - } - - // If func is method on an env, first argument should be a receiver, - // and actual arguments less than fnNumIn by one. - fnNumIn := fn.NumIn() - if method { - fnNumIn-- - } - // Skip first argument in case of the receiver. - fnInOffset := 0 - if method { - fnInOffset = 1 - } - - if fn.IsVariadic() { - if len(node.Arguments) < fnNumIn-1 { - return anyType, &file.Error{ - Location: node.Location(), - Message: fmt.Sprintf("not enough arguments to call %v", name), - } - } - } else { - if len(node.Arguments) > fnNumIn { - return anyType, &file.Error{ - Location: node.Location(), - Message: fmt.Sprintf("too many arguments to call %v", name), - } - } - if len(node.Arguments) < fnNumIn { - return anyType, &file.Error{ - Location: node.Location(), - Message: fmt.Sprintf("not enough arguments to call %v", name), - } - } - } - - for i, arg := range node.Arguments { - t, _ := v.visit(arg) - - var in reflect.Type - if fn.IsVariadic() && i >= fnNumIn-1 { - // For variadic arguments fn(xs ...int), go replaces type of xs (int) with ([]int). - // As we compare arguments one by one, we need underling type. - in = fn.In(fn.NumIn() - 1).Elem() - } else { - in = fn.In(i + fnInOffset) - } - - if isIntegerOrArithmeticOperation(arg) && (isInteger(in) || isFloat(in)) { - t = in - setTypeForIntegers(arg, t) - } - - if t == nil { - continue - } - - if !t.AssignableTo(in) && t.Kind() != reflect.Interface { - return anyType, &file.Error{ - Location: arg.Location(), - Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name), - } - } - } - - return fn.Out(0), nil -} - -func (v *visitor) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) { - switch node.Name { - case "all", "none", "any", "one": - collection, _ := v.visit(node.Arguments[0]) - if !isArray(collection) && !isAny(collection) { - return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) - } - - v.collections = append(v.collections, collection) - closure, _ := v.visit(node.Arguments[1]) - v.collections = v.collections[:len(v.collections)-1] - - if isFunc(closure) && - closure.NumOut() == 1 && - closure.NumIn() == 1 && isAny(closure.In(0)) { - - if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) { - return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String()) - } - return boolType, info{} - } - return v.error(node.Arguments[1], "closure should has one input and one output param") - - case "filter": - collection, _ := v.visit(node.Arguments[0]) - if !isArray(collection) && !isAny(collection) { - return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) - } - - v.collections = append(v.collections, collection) - closure, _ := v.visit(node.Arguments[1]) - v.collections = v.collections[:len(v.collections)-1] - - if isFunc(closure) && - closure.NumOut() == 1 && - closure.NumIn() == 1 && isAny(closure.In(0)) { - - if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) { - return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String()) - } - if isAny(collection) { - return arrayType, info{} - } - return reflect.SliceOf(collection.Elem()), info{} - } - return v.error(node.Arguments[1], "closure should has one input and one output param") - - case "map": - collection, _ := v.visit(node.Arguments[0]) - if !isArray(collection) && !isAny(collection) { - return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) - } - - v.collections = append(v.collections, collection) - closure, _ := v.visit(node.Arguments[1]) - v.collections = v.collections[:len(v.collections)-1] - - if isFunc(closure) && - closure.NumOut() == 1 && - closure.NumIn() == 1 && isAny(closure.In(0)) { - - return reflect.SliceOf(closure.Out(0)), info{} - } - return v.error(node.Arguments[1], "closure should has one input and one output param") - - case "count": - collection, _ := v.visit(node.Arguments[0]) - if !isArray(collection) && !isAny(collection) { - return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection) - } - - v.collections = append(v.collections, collection) - closure, _ := v.visit(node.Arguments[1]) - v.collections = v.collections[:len(v.collections)-1] - - if isFunc(closure) && - closure.NumOut() == 1 && - closure.NumIn() == 1 && isAny(closure.In(0)) { - if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) { - return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String()) - } - - return integerType, info{} - } - return v.error(node.Arguments[1], "closure should has one input and one output param") - - default: - return v.error(node, "unknown builtin %v", node.Name) - } -} - -func (v *visitor) ClosureNode(node *ast.ClosureNode) (reflect.Type, info) { - t, _ := v.visit(node.Node) - return reflect.FuncOf([]reflect.Type{anyType}, []reflect.Type{t}, false), info{} -} - -func (v *visitor) PointerNode(node *ast.PointerNode) (reflect.Type, info) { - if len(v.collections) == 0 { - return v.error(node, "cannot use pointer accessor outside closure") - } - - collection := v.collections[len(v.collections)-1] - switch collection.Kind() { - case reflect.Interface: - return anyType, info{} - case reflect.Array, reflect.Slice: - return collection.Elem(), info{} - } - return v.error(node, "cannot use %v as array", collection) -} - -func (v *visitor) ConditionalNode(node *ast.ConditionalNode) (reflect.Type, info) { - c, _ := v.visit(node.Cond) - if !isBool(c) && !isAny(c) { - return v.error(node.Cond, "non-bool expression (type %v) used as condition", c) - } - - t1, _ := v.visit(node.Exp1) - t2, _ := v.visit(node.Exp2) - - if t1 == nil && t2 != nil { - return t2, info{} - } - if t1 != nil && t2 == nil { - return t1, info{} - } - if t1 == nil && t2 == nil { - return nilType, info{} - } - if t1.AssignableTo(t2) { - return t1, info{} - } - return anyType, info{} -} - -func (v *visitor) ArrayNode(node *ast.ArrayNode) (reflect.Type, info) { - for _, node := range node.Nodes { - v.visit(node) - } - return arrayType, info{} -} - -func (v *visitor) MapNode(node *ast.MapNode) (reflect.Type, info) { - for _, pair := range node.Pairs { - v.visit(pair) - } - return mapType, info{} -} - -func (v *visitor) PairNode(node *ast.PairNode) (reflect.Type, info) { - v.visit(node.Key) - v.visit(node.Value) - return nilType, info{} -} - -func (v *visitor) findTypedFunc(node *ast.CallNode, fn reflect.Type, method bool) { - // OnCallTyped doesn't work for functions with variadic arguments, - // and doesn't work named function, like `type MyFunc func() int`. - // In PkgPath() is an empty string, it's unnamed function. - if !fn.IsVariadic() && fn.PkgPath() == "" { - fnNumIn := fn.NumIn() - fnInOffset := 0 - if method { - fnNumIn-- - fnInOffset = 1 - } - funcTypes: - for i := range vm.FuncTypes { - if i == 0 { - continue - } - typed := reflect.ValueOf(vm.FuncTypes[i]).Elem().Type() - if typed.Kind() != reflect.Func { - continue - } - if typed.NumOut() != fn.NumOut() { - continue - } - for j := 0; j < typed.NumOut(); j++ { - if typed.Out(j) != fn.Out(j) { - continue funcTypes - } - } - if typed.NumIn() != fnNumIn { - continue - } - for j := 0; j < typed.NumIn(); j++ { - if typed.In(j) != fn.In(j+fnInOffset) { - continue funcTypes - } - } - node.Typed = i - } - } -} diff --git a/vendor/github.com/antonmedv/expr/checker/types.go b/vendor/github.com/antonmedv/expr/checker/types.go deleted file mode 100644 index 7ccd8948091..00000000000 --- a/vendor/github.com/antonmedv/expr/checker/types.go +++ /dev/null @@ -1,262 +0,0 @@ -package checker - -import ( - "reflect" - "time" - - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/conf" -) - -var ( - nilType = reflect.TypeOf(nil) - boolType = reflect.TypeOf(true) - integerType = reflect.TypeOf(0) - floatType = reflect.TypeOf(float64(0)) - stringType = reflect.TypeOf("") - arrayType = reflect.TypeOf([]interface{}{}) - mapType = reflect.TypeOf(map[string]interface{}{}) - anyType = reflect.TypeOf(new(interface{})).Elem() - timeType = reflect.TypeOf(time.Time{}) - durationType = reflect.TypeOf(time.Duration(0)) - functionType = reflect.TypeOf(new(func(...interface{}) (interface{}, error))).Elem() - errorType = reflect.TypeOf((*error)(nil)).Elem() -) - -func combined(a, b reflect.Type) reflect.Type { - if a.Kind() == b.Kind() { - return a - } - if isFloat(a) || isFloat(b) { - return floatType - } - return integerType -} - -func anyOf(t reflect.Type, fns ...func(reflect.Type) bool) bool { - for _, fn := range fns { - if fn(t) { - return true - } - } - return false -} - -func or(l, r reflect.Type, fns ...func(reflect.Type) bool) bool { - if isAny(l) && isAny(r) { - return true - } - if isAny(l) && anyOf(r, fns...) { - return true - } - if isAny(r) && anyOf(l, fns...) { - return true - } - return false -} - -func isAny(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Interface: - return true - } - } - return false -} - -func isInteger(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - fallthrough - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - } - } - return false -} - -func isFloat(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Float32, reflect.Float64: - return true - } - } - return false -} - -func isNumber(t reflect.Type) bool { - return isInteger(t) || isFloat(t) -} - -func isTime(t reflect.Type) bool { - if t != nil { - switch t { - case timeType: - return true - } - } - return isAny(t) -} - -func isDuration(t reflect.Type) bool { - if t != nil { - switch t { - case durationType: - return true - } - } - return false -} - -func isBool(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Bool: - return true - } - } - return false -} - -func isString(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.String: - return true - } - } - return false -} - -func isArray(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Ptr: - return isArray(t.Elem()) - case reflect.Slice, reflect.Array: - return true - } - } - return false -} - -func isMap(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Ptr: - return isMap(t.Elem()) - case reflect.Map: - return true - } - } - return false -} - -func isStruct(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Ptr: - return isStruct(t.Elem()) - case reflect.Struct: - return true - } - } - return false -} - -func isFunc(t reflect.Type) bool { - if t != nil { - switch t.Kind() { - case reflect.Ptr: - return isFunc(t.Elem()) - case reflect.Func: - return true - } - } - return false -} - -func fetchField(t reflect.Type, name string) (reflect.StructField, bool) { - if t != nil { - // First check all structs fields. - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - // Search all fields, even embedded structs. - if conf.FieldName(field) == name { - return field, true - } - } - - // Second check fields of embedded structs. - for i := 0; i < t.NumField(); i++ { - anon := t.Field(i) - if anon.Anonymous { - if field, ok := fetchField(anon.Type, name); ok { - field.Index = append(anon.Index, field.Index...) - return field, true - } - } - } - } - return reflect.StructField{}, false -} - -func deref(t reflect.Type) (reflect.Type, bool) { - if t == nil { - return nil, false - } - if t.Kind() == reflect.Interface { - return t, true - } - found := false - for t != nil && t.Kind() == reflect.Ptr { - e := t.Elem() - switch e.Kind() { - case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice: - return t, false - default: - found = true - t = e - } - } - return t, found -} - -func isIntegerOrArithmeticOperation(node ast.Node) bool { - switch n := node.(type) { - case *ast.IntegerNode: - return true - case *ast.UnaryNode: - switch n.Operator { - case "+", "-": - return true - } - case *ast.BinaryNode: - switch n.Operator { - case "+", "/", "-", "*": - return true - } - } - return false -} - -func setTypeForIntegers(node ast.Node, t reflect.Type) { - switch n := node.(type) { - case *ast.IntegerNode: - n.SetType(t) - case *ast.UnaryNode: - switch n.Operator { - case "+", "-": - setTypeForIntegers(n.Node, t) - } - case *ast.BinaryNode: - switch n.Operator { - case "+", "/", "-", "*": - setTypeForIntegers(n.Left, t) - setTypeForIntegers(n.Right, t) - } - } -} diff --git a/vendor/github.com/antonmedv/expr/compiler/compiler.go b/vendor/github.com/antonmedv/expr/compiler/compiler.go deleted file mode 100644 index 3cd32af0f27..00000000000 --- a/vendor/github.com/antonmedv/expr/compiler/compiler.go +++ /dev/null @@ -1,739 +0,0 @@ -package compiler - -import ( - "fmt" - "reflect" - - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/conf" - "github.com/antonmedv/expr/file" - "github.com/antonmedv/expr/parser" - . "github.com/antonmedv/expr/vm" - "github.com/antonmedv/expr/vm/runtime" -) - -const ( - placeholder = 12345 -) - -func Compile(tree *parser.Tree, config *conf.Config) (program *Program, err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%v", r) - } - }() - - c := &compiler{ - locations: make([]file.Location, 0), - constantsIndex: make(map[interface{}]int), - functionsIndex: make(map[string]int), - } - - if config != nil { - c.mapEnv = config.MapEnv - c.cast = config.Expect - } - - c.compile(tree.Node) - - switch c.cast { - case reflect.Int: - c.emit(OpCast, 0) - case reflect.Int64: - c.emit(OpCast, 1) - case reflect.Float64: - c.emit(OpCast, 2) - } - - program = &Program{ - Node: tree.Node, - Source: tree.Source, - Locations: c.locations, - Constants: c.constants, - Bytecode: c.bytecode, - Arguments: c.arguments, - Functions: c.functions, - } - return -} - -type compiler struct { - locations []file.Location - bytecode []Opcode - constants []interface{} - constantsIndex map[interface{}]int - functions []Function - functionsIndex map[string]int - mapEnv bool - cast reflect.Kind - nodes []ast.Node - chains [][]int - arguments []int -} - -func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int { - c.bytecode = append(c.bytecode, op) - current := len(c.bytecode) - c.arguments = append(c.arguments, arg) - c.locations = append(c.locations, loc) - return current -} - -func (c *compiler) emit(op Opcode, args ...int) int { - arg := 0 - if len(args) > 1 { - panic("too many arguments") - } - if len(args) == 1 { - arg = args[0] - } - var loc file.Location - if len(c.nodes) > 0 { - loc = c.nodes[len(c.nodes)-1].Location() - } - return c.emitLocation(loc, op, arg) -} - -func (c *compiler) emitPush(value interface{}) int { - return c.emit(OpPush, c.addConstant(value)) -} - -func (c *compiler) addConstant(constant interface{}) int { - indexable := true - hash := constant - switch reflect.TypeOf(constant).Kind() { - case reflect.Slice, reflect.Map, reflect.Struct: - indexable = false - } - if field, ok := constant.(*runtime.Field); ok { - indexable = true - hash = fmt.Sprintf("%v", field) - } - if method, ok := constant.(*runtime.Method); ok { - indexable = true - hash = fmt.Sprintf("%v", method) - } - if indexable { - if p, ok := c.constantsIndex[hash]; ok { - return p - } - } - c.constants = append(c.constants, constant) - p := len(c.constants) - 1 - if indexable { - c.constantsIndex[hash] = p - } - return p -} - -func (c *compiler) addFunction(node *ast.CallNode) int { - if node.Func == nil { - panic("function is nil") - } - if p, ok := c.functionsIndex[node.Func.Name]; ok { - return p - } - p := len(c.functions) - c.functions = append(c.functions, node.Func.Func) - c.functionsIndex[node.Func.Name] = p - return p -} - -func (c *compiler) patchJump(placeholder int) { - offset := len(c.bytecode) - placeholder - c.arguments[placeholder-1] = offset -} - -func (c *compiler) calcBackwardJump(to int) int { - return len(c.bytecode) + 1 - to -} - -func (c *compiler) compile(node ast.Node) { - c.nodes = append(c.nodes, node) - defer func() { - c.nodes = c.nodes[:len(c.nodes)-1] - }() - - switch n := node.(type) { - case *ast.NilNode: - c.NilNode(n) - case *ast.IdentifierNode: - c.IdentifierNode(n) - case *ast.IntegerNode: - c.IntegerNode(n) - case *ast.FloatNode: - c.FloatNode(n) - case *ast.BoolNode: - c.BoolNode(n) - case *ast.StringNode: - c.StringNode(n) - case *ast.ConstantNode: - c.ConstantNode(n) - case *ast.UnaryNode: - c.UnaryNode(n) - case *ast.BinaryNode: - c.BinaryNode(n) - case *ast.ChainNode: - c.ChainNode(n) - case *ast.MemberNode: - c.MemberNode(n) - case *ast.SliceNode: - c.SliceNode(n) - case *ast.CallNode: - c.CallNode(n) - case *ast.BuiltinNode: - c.BuiltinNode(n) - case *ast.ClosureNode: - c.ClosureNode(n) - case *ast.PointerNode: - c.PointerNode(n) - case *ast.ConditionalNode: - c.ConditionalNode(n) - case *ast.ArrayNode: - c.ArrayNode(n) - case *ast.MapNode: - c.MapNode(n) - case *ast.PairNode: - c.PairNode(n) - default: - panic(fmt.Sprintf("undefined node type (%T)", node)) - } -} - -func (c *compiler) NilNode(_ *ast.NilNode) { - c.emit(OpNil) -} - -func (c *compiler) IdentifierNode(node *ast.IdentifierNode) { - if c.mapEnv { - c.emit(OpLoadFast, c.addConstant(node.Value)) - } else if len(node.FieldIndex) > 0 { - c.emit(OpLoadField, c.addConstant(&runtime.Field{ - Index: node.FieldIndex, - Path: []string{node.Value}, - })) - } else if node.Method { - c.emit(OpLoadMethod, c.addConstant(&runtime.Method{ - Name: node.Value, - Index: node.MethodIndex, - })) - } else { - c.emit(OpLoadConst, c.addConstant(node.Value)) - } - if node.Deref { - c.emit(OpDeref) - } else if node.Type() == nil { - c.emit(OpDeref) - } -} - -func (c *compiler) IntegerNode(node *ast.IntegerNode) { - t := node.Type() - if t == nil { - c.emitPush(node.Value) - return - } - switch t.Kind() { - case reflect.Float32: - c.emitPush(float32(node.Value)) - case reflect.Float64: - c.emitPush(float64(node.Value)) - case reflect.Int: - c.emitPush(node.Value) - case reflect.Int8: - c.emitPush(int8(node.Value)) - case reflect.Int16: - c.emitPush(int16(node.Value)) - case reflect.Int32: - c.emitPush(int32(node.Value)) - case reflect.Int64: - c.emitPush(int64(node.Value)) - case reflect.Uint: - c.emitPush(uint(node.Value)) - case reflect.Uint8: - c.emitPush(uint8(node.Value)) - case reflect.Uint16: - c.emitPush(uint16(node.Value)) - case reflect.Uint32: - c.emitPush(uint32(node.Value)) - case reflect.Uint64: - c.emitPush(uint64(node.Value)) - default: - c.emitPush(node.Value) - } -} - -func (c *compiler) FloatNode(node *ast.FloatNode) { - c.emitPush(node.Value) -} - -func (c *compiler) BoolNode(node *ast.BoolNode) { - if node.Value { - c.emit(OpTrue) - } else { - c.emit(OpFalse) - } -} - -func (c *compiler) StringNode(node *ast.StringNode) { - c.emitPush(node.Value) -} - -func (c *compiler) ConstantNode(node *ast.ConstantNode) { - c.emitPush(node.Value) -} - -func (c *compiler) UnaryNode(node *ast.UnaryNode) { - c.compile(node.Node) - - switch node.Operator { - - case "!", "not": - c.emit(OpNot) - - case "+": - // Do nothing - - case "-": - c.emit(OpNegate) - - default: - panic(fmt.Sprintf("unknown operator (%v)", node.Operator)) - } -} - -func (c *compiler) BinaryNode(node *ast.BinaryNode) { - l := kind(node.Left) - r := kind(node.Right) - - switch node.Operator { - case "==": - c.compile(node.Left) - c.compile(node.Right) - - if l == r && l == reflect.Int { - c.emit(OpEqualInt) - } else if l == r && l == reflect.String { - c.emit(OpEqualString) - } else { - c.emit(OpEqual) - } - - case "!=": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpEqual) - c.emit(OpNot) - - case "or", "||": - c.compile(node.Left) - end := c.emit(OpJumpIfTrue, placeholder) - c.emit(OpPop) - c.compile(node.Right) - c.patchJump(end) - - case "and", "&&": - c.compile(node.Left) - end := c.emit(OpJumpIfFalse, placeholder) - c.emit(OpPop) - c.compile(node.Right) - c.patchJump(end) - - case "<": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpLess) - - case ">": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpMore) - - case "<=": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpLessOrEqual) - - case ">=": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpMoreOrEqual) - - case "+": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpAdd) - - case "-": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpSubtract) - - case "*": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpMultiply) - - case "/": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpDivide) - - case "%": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpModulo) - - case "**", "^": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpExponent) - - case "in": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpIn) - - case "matches": - if node.Regexp != nil { - c.compile(node.Left) - c.emit(OpMatchesConst, c.addConstant(node.Regexp)) - } else { - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpMatches) - } - - case "contains": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpContains) - - case "startsWith": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpStartsWith) - - case "endsWith": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpEndsWith) - - case "..": - c.compile(node.Left) - c.compile(node.Right) - c.emit(OpRange) - - case "??": - c.compile(node.Left) - end := c.emit(OpJumpIfNotNil, placeholder) - c.emit(OpPop) - c.compile(node.Right) - c.patchJump(end) - - default: - panic(fmt.Sprintf("unknown operator (%v)", node.Operator)) - - } -} - -func (c *compiler) ChainNode(node *ast.ChainNode) { - c.chains = append(c.chains, []int{}) - c.compile(node.Node) - // Chain activate (got nit somewhere) - for _, ph := range c.chains[len(c.chains)-1] { - c.patchJump(ph) - } - c.chains = c.chains[:len(c.chains)-1] -} - -func (c *compiler) MemberNode(node *ast.MemberNode) { - if node.Method { - c.compile(node.Node) - c.emit(OpMethod, c.addConstant(&runtime.Method{ - Name: node.Name, - Index: node.MethodIndex, - })) - return - } - op := OpFetch - original := node - index := node.FieldIndex - path := []string{node.Name} - base := node.Node - if len(node.FieldIndex) > 0 { - op = OpFetchField - for !node.Optional { - ident, ok := base.(*ast.IdentifierNode) - if ok && len(ident.FieldIndex) > 0 { - if ident.Deref { - panic("IdentifierNode should not be dereferenced") - } - index = append(ident.FieldIndex, index...) - path = append([]string{ident.Value}, path...) - c.emitLocation(ident.Location(), OpLoadField, c.addConstant( - &runtime.Field{Index: index, Path: path}, - )) - goto deref - } - member, ok := base.(*ast.MemberNode) - if ok && len(member.FieldIndex) > 0 { - if member.Deref { - panic("MemberNode should not be dereferenced") - } - index = append(member.FieldIndex, index...) - path = append([]string{member.Name}, path...) - node = member - base = member.Node - } else { - break - } - } - } - - c.compile(base) - if node.Optional { - ph := c.emit(OpJumpIfNil, placeholder) - c.chains[len(c.chains)-1] = append(c.chains[len(c.chains)-1], ph) - } - - if op == OpFetch { - c.compile(node.Property) - c.emit(OpFetch) - } else { - c.emitLocation(node.Location(), op, c.addConstant( - &runtime.Field{Index: index, Path: path}, - )) - } - -deref: - if original.Deref { - c.emit(OpDeref) - } else if original.Type() == nil { - c.emit(OpDeref) - } -} - -func (c *compiler) SliceNode(node *ast.SliceNode) { - c.compile(node.Node) - if node.To != nil { - c.compile(node.To) - } else { - c.emit(OpLen) - } - if node.From != nil { - c.compile(node.From) - } else { - c.emitPush(0) - } - c.emit(OpSlice) -} - -func (c *compiler) CallNode(node *ast.CallNode) { - for _, arg := range node.Arguments { - c.compile(arg) - } - if node.Func != nil { - if node.Func.Opcode > 0 { - c.emit(OpBuiltin, node.Func.Opcode) - return - } - switch len(node.Arguments) { - case 0: - c.emit(OpCall0, c.addFunction(node)) - case 1: - c.emit(OpCall1, c.addFunction(node)) - case 2: - c.emit(OpCall2, c.addFunction(node)) - case 3: - c.emit(OpCall3, c.addFunction(node)) - default: - c.emit(OpLoadFunc, c.addFunction(node)) - c.emit(OpCallN, len(node.Arguments)) - } - return - } - c.compile(node.Callee) - if node.Typed > 0 { - c.emit(OpCallTyped, node.Typed) - return - } else if node.Fast { - c.emit(OpCallFast, len(node.Arguments)) - } else { - c.emit(OpCall, len(node.Arguments)) - } -} - -func (c *compiler) BuiltinNode(node *ast.BuiltinNode) { - switch node.Name { - case "all": - c.compile(node.Arguments[0]) - c.emit(OpBegin) - var loopBreak int - c.emitLoop(func() { - c.compile(node.Arguments[1]) - loopBreak = c.emit(OpJumpIfFalse, placeholder) - c.emit(OpPop) - }) - c.emit(OpTrue) - c.patchJump(loopBreak) - c.emit(OpEnd) - - case "none": - c.compile(node.Arguments[0]) - c.emit(OpBegin) - var loopBreak int - c.emitLoop(func() { - c.compile(node.Arguments[1]) - c.emit(OpNot) - loopBreak = c.emit(OpJumpIfFalse, placeholder) - c.emit(OpPop) - }) - c.emit(OpTrue) - c.patchJump(loopBreak) - c.emit(OpEnd) - - case "any": - c.compile(node.Arguments[0]) - c.emit(OpBegin) - var loopBreak int - c.emitLoop(func() { - c.compile(node.Arguments[1]) - loopBreak = c.emit(OpJumpIfTrue, placeholder) - c.emit(OpPop) - }) - c.emit(OpFalse) - c.patchJump(loopBreak) - c.emit(OpEnd) - - case "one": - c.compile(node.Arguments[0]) - c.emit(OpBegin) - c.emitLoop(func() { - c.compile(node.Arguments[1]) - c.emitCond(func() { - c.emit(OpIncrementCount) - }) - }) - c.emit(OpGetCount) - c.emitPush(1) - c.emit(OpEqual) - c.emit(OpEnd) - - case "filter": - c.compile(node.Arguments[0]) - c.emit(OpBegin) - c.emitLoop(func() { - c.compile(node.Arguments[1]) - c.emitCond(func() { - c.emit(OpIncrementCount) - c.emit(OpPointer) - }) - }) - c.emit(OpGetCount) - c.emit(OpEnd) - c.emit(OpArray) - - case "map": - c.compile(node.Arguments[0]) - c.emit(OpBegin) - c.emitLoop(func() { - c.compile(node.Arguments[1]) - }) - c.emit(OpGetLen) - c.emit(OpEnd) - c.emit(OpArray) - - case "count": - c.compile(node.Arguments[0]) - c.emit(OpBegin) - c.emitLoop(func() { - c.compile(node.Arguments[1]) - c.emitCond(func() { - c.emit(OpIncrementCount) - }) - }) - c.emit(OpGetCount) - c.emit(OpEnd) - - default: - panic(fmt.Sprintf("unknown builtin %v", node.Name)) - } -} - -func (c *compiler) emitCond(body func()) { - noop := c.emit(OpJumpIfFalse, placeholder) - c.emit(OpPop) - - body() - - jmp := c.emit(OpJump, placeholder) - c.patchJump(noop) - c.emit(OpPop) - c.patchJump(jmp) -} - -func (c *compiler) emitLoop(body func()) { - begin := len(c.bytecode) - end := c.emit(OpJumpIfEnd, placeholder) - - body() - - c.emit(OpIncrementIt) - c.emit(OpJumpBackward, c.calcBackwardJump(begin)) - c.patchJump(end) -} - -func (c *compiler) ClosureNode(node *ast.ClosureNode) { - c.compile(node.Node) -} - -func (c *compiler) PointerNode(node *ast.PointerNode) { - c.emit(OpPointer) -} - -func (c *compiler) ConditionalNode(node *ast.ConditionalNode) { - c.compile(node.Cond) - otherwise := c.emit(OpJumpIfFalse, placeholder) - - c.emit(OpPop) - c.compile(node.Exp1) - end := c.emit(OpJump, placeholder) - - c.patchJump(otherwise) - c.emit(OpPop) - c.compile(node.Exp2) - - c.patchJump(end) -} - -func (c *compiler) ArrayNode(node *ast.ArrayNode) { - for _, node := range node.Nodes { - c.compile(node) - } - - c.emitPush(len(node.Nodes)) - c.emit(OpArray) -} - -func (c *compiler) MapNode(node *ast.MapNode) { - for _, pair := range node.Pairs { - c.compile(pair) - } - - c.emitPush(len(node.Pairs)) - c.emit(OpMap) -} - -func (c *compiler) PairNode(node *ast.PairNode) { - c.compile(node.Key) - c.compile(node.Value) -} - -func kind(node ast.Node) reflect.Kind { - t := node.Type() - if t == nil { - return reflect.Invalid - } - return t.Kind() -} diff --git a/vendor/github.com/antonmedv/expr/conf/config.go b/vendor/github.com/antonmedv/expr/conf/config.go deleted file mode 100644 index 1ac0fa7d291..00000000000 --- a/vendor/github.com/antonmedv/expr/conf/config.go +++ /dev/null @@ -1,96 +0,0 @@ -package conf - -import ( - "fmt" - "reflect" - - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/builtin" - "github.com/antonmedv/expr/vm/runtime" -) - -type Config struct { - Env interface{} - Types TypesTable - MapEnv bool - DefaultType reflect.Type - Operators OperatorsTable - Expect reflect.Kind - Optimize bool - Strict bool - ConstFns map[string]reflect.Value - Visitors []ast.Visitor - Functions map[string]*builtin.Function -} - -// CreateNew creates new config with default values. -func CreateNew() *Config { - c := &Config{ - Operators: make(map[string][]string), - ConstFns: make(map[string]reflect.Value), - Functions: make(map[string]*builtin.Function), - Optimize: true, - } - for _, f := range builtin.Builtins { - c.Functions[f.Name] = f - } - return c -} - -// New creates new config with environment. -func New(env interface{}) *Config { - c := CreateNew() - c.WithEnv(env) - return c -} - -func (c *Config) WithEnv(env interface{}) { - var mapEnv bool - var mapValueType reflect.Type - if _, ok := env.(map[string]interface{}); ok { - mapEnv = true - } else { - if reflect.ValueOf(env).Kind() == reflect.Map { - mapValueType = reflect.TypeOf(env).Elem() - } - } - - c.Env = env - c.Types = CreateTypesTable(env) - c.MapEnv = mapEnv - c.DefaultType = mapValueType - c.Strict = true -} - -func (c *Config) Operator(operator string, fns ...string) { - c.Operators[operator] = append(c.Operators[operator], fns...) -} - -func (c *Config) ConstExpr(name string) { - if c.Env == nil { - panic("no environment is specified for ConstExpr()") - } - fn := reflect.ValueOf(runtime.Fetch(c.Env, name)) - if fn.Kind() != reflect.Func { - panic(fmt.Errorf("const expression %q must be a function", name)) - } - c.ConstFns[name] = fn -} - -func (c *Config) Check() { - for operator, fns := range c.Operators { - for _, fn := range fns { - fnType, ok := c.Types[fn] - if !ok || fnType.Type.Kind() != reflect.Func { - panic(fmt.Errorf("function %s for %s operator does not exist in the environment", fn, operator)) - } - requiredNumIn := 2 - if fnType.Method { - requiredNumIn = 3 // As first argument of method is receiver. - } - if fnType.Type.NumIn() != requiredNumIn || fnType.Type.NumOut() != 1 { - panic(fmt.Errorf("function %s for %s operator does not have a correct signature", fn, operator)) - } - } - } -} diff --git a/vendor/github.com/antonmedv/expr/conf/functions.go b/vendor/github.com/antonmedv/expr/conf/functions.go deleted file mode 100644 index 8f52a955753..00000000000 --- a/vendor/github.com/antonmedv/expr/conf/functions.go +++ /dev/null @@ -1 +0,0 @@ -package conf diff --git a/vendor/github.com/antonmedv/expr/conf/operators.go b/vendor/github.com/antonmedv/expr/conf/operators.go deleted file mode 100644 index 13e069d76ca..00000000000 --- a/vendor/github.com/antonmedv/expr/conf/operators.go +++ /dev/null @@ -1,59 +0,0 @@ -package conf - -import ( - "reflect" - - "github.com/antonmedv/expr/ast" -) - -// OperatorsTable maps binary operators to corresponding list of functions. -// Functions should be provided in the environment to allow operator overloading. -type OperatorsTable map[string][]string - -func FindSuitableOperatorOverload(fns []string, types TypesTable, l, r reflect.Type) (reflect.Type, string, bool) { - for _, fn := range fns { - fnType := types[fn] - firstInIndex := 0 - if fnType.Method { - firstInIndex = 1 // As first argument to method is receiver. - } - firstArgType := fnType.Type.In(firstInIndex) - secondArgType := fnType.Type.In(firstInIndex + 1) - - firstArgumentFit := l == firstArgType || (firstArgType.Kind() == reflect.Interface && (l == nil || l.Implements(firstArgType))) - secondArgumentFit := r == secondArgType || (secondArgType.Kind() == reflect.Interface && (r == nil || r.Implements(secondArgType))) - if firstArgumentFit && secondArgumentFit { - return fnType.Type.Out(0), fn, true - } - } - return nil, "", false -} - -type OperatorPatcher struct { - Operators OperatorsTable - Types TypesTable -} - -func (p *OperatorPatcher) Visit(node *ast.Node) { - binaryNode, ok := (*node).(*ast.BinaryNode) - if !ok { - return - } - - fns, ok := p.Operators[binaryNode.Operator] - if !ok { - return - } - - leftType := binaryNode.Left.Type() - rightType := binaryNode.Right.Type() - - _, fn, ok := FindSuitableOperatorOverload(fns, p.Types, leftType, rightType) - if ok { - newNode := &ast.CallNode{ - Callee: &ast.IdentifierNode{Value: fn}, - Arguments: []ast.Node{binaryNode.Left, binaryNode.Right}, - } - ast.Patch(node, newNode) - } -} diff --git a/vendor/github.com/antonmedv/expr/conf/types_table.go b/vendor/github.com/antonmedv/expr/conf/types_table.go deleted file mode 100644 index e917f5fa844..00000000000 --- a/vendor/github.com/antonmedv/expr/conf/types_table.go +++ /dev/null @@ -1,123 +0,0 @@ -package conf - -import ( - "reflect" -) - -type Tag struct { - Type reflect.Type - Ambiguous bool - FieldIndex []int - Method bool - MethodIndex int -} - -type TypesTable map[string]Tag - -// CreateTypesTable creates types table for type checks during parsing. -// If struct is passed, all fields will be treated as variables, -// as well as all fields of embedded structs and struct itself. -// -// If map is passed, all items will be treated as variables -// (key as name, value as type). -func CreateTypesTable(i interface{}) TypesTable { - if i == nil { - return nil - } - - types := make(TypesTable) - v := reflect.ValueOf(i) - t := reflect.TypeOf(i) - - d := t - if t.Kind() == reflect.Ptr { - d = t.Elem() - } - - switch d.Kind() { - case reflect.Struct: - types = FieldsFromStruct(d) - - // Methods of struct should be gathered from original struct with pointer, - // as methods maybe declared on pointer receiver. Also this method retrieves - // all embedded structs methods as well, no need to recursion. - for i := 0; i < t.NumMethod(); i++ { - m := t.Method(i) - types[m.Name] = Tag{ - Type: m.Type, - Method: true, - MethodIndex: i, - } - } - - case reflect.Map: - for _, key := range v.MapKeys() { - value := v.MapIndex(key) - if key.Kind() == reflect.String && value.IsValid() && value.CanInterface() { - types[key.String()] = Tag{Type: reflect.TypeOf(value.Interface())} - } - } - - // A map may have method too. - for i := 0; i < t.NumMethod(); i++ { - m := t.Method(i) - types[m.Name] = Tag{ - Type: m.Type, - Method: true, - MethodIndex: i, - } - } - } - - return types -} - -func FieldsFromStruct(t reflect.Type) TypesTable { - types := make(TypesTable) - t = dereference(t) - if t == nil { - return types - } - - switch t.Kind() { - case reflect.Struct: - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - - if f.Anonymous { - for name, typ := range FieldsFromStruct(f.Type) { - if _, ok := types[name]; ok { - types[name] = Tag{Ambiguous: true} - } else { - typ.FieldIndex = append(f.Index, typ.FieldIndex...) - types[name] = typ - } - } - } - - types[FieldName(f)] = Tag{ - Type: f.Type, - FieldIndex: f.Index, - } - } - } - - return types -} - -func dereference(t reflect.Type) reflect.Type { - if t == nil { - return nil - } - if t.Kind() == reflect.Ptr { - t = dereference(t.Elem()) - } - return t -} - -func FieldName(field reflect.StructField) string { - if taggedName := field.Tag.Get("expr"); taggedName != "" { - return taggedName - } - return field.Name -} diff --git a/vendor/github.com/antonmedv/expr/expr.go b/vendor/github.com/antonmedv/expr/expr.go deleted file mode 100644 index 14f6af285c5..00000000000 --- a/vendor/github.com/antonmedv/expr/expr.go +++ /dev/null @@ -1,205 +0,0 @@ -package expr - -import ( - "fmt" - "reflect" - - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/builtin" - "github.com/antonmedv/expr/checker" - "github.com/antonmedv/expr/compiler" - "github.com/antonmedv/expr/conf" - "github.com/antonmedv/expr/file" - "github.com/antonmedv/expr/optimizer" - "github.com/antonmedv/expr/parser" - "github.com/antonmedv/expr/vm" -) - -// Option for configuring config. -type Option func(c *conf.Config) - -// Env specifies expected input of env for type checks. -// If struct is passed, all fields will be treated as variables, -// as well as all fields of embedded structs and struct itself. -// If map is passed, all items will be treated as variables. -// Methods defined on this type will be available as functions. -func Env(env interface{}) Option { - return func(c *conf.Config) { - c.WithEnv(env) - } -} - -// AllowUndefinedVariables allows to use undefined variables inside expressions. -// This can be used with expr.Env option to partially define a few variables. -func AllowUndefinedVariables() Option { - return func(c *conf.Config) { - c.Strict = false - } -} - -// Operator allows to replace a binary operator with a function. -func Operator(operator string, fn ...string) Option { - return func(c *conf.Config) { - c.Operator(operator, fn...) - } -} - -// ConstExpr defines func expression as constant. If all argument to this function is constants, -// then it can be replaced by result of this func call on compile step. -func ConstExpr(fn string) Option { - return func(c *conf.Config) { - c.ConstExpr(fn) - } -} - -// AsKind tells the compiler to expect kind of the result. -func AsKind(kind reflect.Kind) Option { - return func(c *conf.Config) { - c.Expect = kind - } -} - -// AsBool tells the compiler to expect a boolean result. -func AsBool() Option { - return func(c *conf.Config) { - c.Expect = reflect.Bool - } -} - -// AsInt tells the compiler to expect an int result. -func AsInt() Option { - return func(c *conf.Config) { - c.Expect = reflect.Int - } -} - -// AsInt64 tells the compiler to expect an int64 result. -func AsInt64() Option { - return func(c *conf.Config) { - c.Expect = reflect.Int64 - } -} - -// AsFloat64 tells the compiler to expect a float64 result. -func AsFloat64() Option { - return func(c *conf.Config) { - c.Expect = reflect.Float64 - } -} - -// Optimize turns optimizations on or off. -func Optimize(b bool) Option { - return func(c *conf.Config) { - c.Optimize = b - } -} - -// Patch adds visitor to list of visitors what will be applied before compiling AST to bytecode. -func Patch(visitor ast.Visitor) Option { - return func(c *conf.Config) { - c.Visitors = append(c.Visitors, visitor) - } -} - -// Function adds function to list of functions what will be available in expressions. -func Function(name string, fn func(params ...interface{}) (interface{}, error), types ...interface{}) Option { - return func(c *conf.Config) { - ts := make([]reflect.Type, len(types)) - for i, t := range types { - t := reflect.TypeOf(t) - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - if t.Kind() != reflect.Func { - panic(fmt.Sprintf("expr: type of %s is not a function", name)) - } - ts[i] = t - } - c.Functions[name] = &builtin.Function{ - Name: name, - Func: fn, - Types: ts, - } - } -} - -// Compile parses and compiles given input expression to bytecode program. -func Compile(input string, ops ...Option) (*vm.Program, error) { - config := conf.CreateNew() - - for _, op := range ops { - op(config) - } - config.Check() - - if len(config.Operators) > 0 { - config.Visitors = append(config.Visitors, &conf.OperatorPatcher{ - Operators: config.Operators, - Types: config.Types, - }) - } - - tree, err := parser.Parse(input) - if err != nil { - return nil, err - } - - if len(config.Visitors) > 0 { - for _, v := range config.Visitors { - // We need to perform types check, because some visitors may rely on - // types information available in the tree. - _, _ = checker.Check(tree, config) - ast.Walk(&tree.Node, v) - } - _, err = checker.Check(tree, config) - if err != nil { - return nil, err - } - } else { - _, err = checker.Check(tree, config) - if err != nil { - return nil, err - } - } - - if config.Optimize { - err = optimizer.Optimize(&tree.Node, config) - if err != nil { - if fileError, ok := err.(*file.Error); ok { - return nil, fileError.Bind(tree.Source) - } - return nil, err - } - } - - program, err := compiler.Compile(tree, config) - if err != nil { - return nil, err - } - - return program, nil -} - -// Run evaluates given bytecode program. -func Run(program *vm.Program, env interface{}) (interface{}, error) { - return vm.Run(program, env) -} - -// Eval parses, compiles and runs given input. -func Eval(input string, env interface{}) (interface{}, error) { - if _, ok := env.(Option); ok { - return nil, fmt.Errorf("misused expr.Eval: second argument (env) should be passed without expr.Env") - } - - program, err := Compile(input) - if err != nil { - return nil, err - } - - output, err := Run(program, env) - if err != nil { - return nil, err - } - - return output, nil -} diff --git a/vendor/github.com/antonmedv/expr/file/error.go b/vendor/github.com/antonmedv/expr/file/error.go deleted file mode 100644 index 1e7e81b947b..00000000000 --- a/vendor/github.com/antonmedv/expr/file/error.go +++ /dev/null @@ -1,69 +0,0 @@ -package file - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -type Error struct { - Location - Message string - Snippet string - Prev error -} - -func (e *Error) Error() string { - return e.format() -} - -func (e *Error) Bind(source *Source) *Error { - if snippet, found := source.Snippet(e.Location.Line); found { - snippet := strings.Replace(snippet, "\t", " ", -1) - srcLine := "\n | " + snippet - var bytes = []byte(snippet) - var indLine = "\n | " - for i := 0; i < e.Location.Column && len(bytes) > 0; i++ { - _, sz := utf8.DecodeRune(bytes) - bytes = bytes[sz:] - if sz > 1 { - goto noind - } else { - indLine += "." - } - } - if _, sz := utf8.DecodeRune(bytes); sz > 1 { - goto noind - } else { - indLine += "^" - } - srcLine += indLine - - noind: - e.Snippet = srcLine - } - return e -} - - -func (e *Error) Unwrap() error { - return e.Prev -} - -func (e *Error) Wrap(err error) { - e.Prev = err -} - - -func (e *Error) format() string { - if e.Location.Empty() { - return e.Message - } - return fmt.Sprintf( - "%s (%d:%d)%s", - e.Message, - e.Line, - e.Column+1, // add one to the 0-based column for display - e.Snippet, - ) -} diff --git a/vendor/github.com/antonmedv/expr/file/location.go b/vendor/github.com/antonmedv/expr/file/location.go deleted file mode 100644 index a92e27f0b1c..00000000000 --- a/vendor/github.com/antonmedv/expr/file/location.go +++ /dev/null @@ -1,10 +0,0 @@ -package file - -type Location struct { - Line int // The 1-based line of the location. - Column int // The 0-based column number of the location. -} - -func (l Location) Empty() bool { - return l.Column == 0 && l.Line == 0 -} diff --git a/vendor/github.com/antonmedv/expr/file/source.go b/vendor/github.com/antonmedv/expr/file/source.go deleted file mode 100644 index 9ee297b5802..00000000000 --- a/vendor/github.com/antonmedv/expr/file/source.go +++ /dev/null @@ -1,76 +0,0 @@ -package file - -import ( - "encoding/json" - "strings" - "unicode/utf8" -) - -type Source struct { - contents []rune - lineOffsets []int32 -} - -func NewSource(contents string) *Source { - s := &Source{ - contents: []rune(contents), - } - s.updateOffsets() - return s -} - -func (s *Source) MarshalJSON() ([]byte, error) { - return json.Marshal(s.contents) -} - -func (s *Source) UnmarshalJSON(b []byte) error { - contents := make([]rune, 0) - err := json.Unmarshal(b, &contents) - if err != nil { - return err - } - - s.contents = contents - s.updateOffsets() - return nil -} - -func (s *Source) Content() string { - return string(s.contents) -} - -func (s *Source) Snippet(line int) (string, bool) { - charStart, found := s.findLineOffset(line) - if !found || len(s.contents) == 0 { - return "", false - } - charEnd, found := s.findLineOffset(line + 1) - if found { - return string(s.contents[charStart : charEnd-1]), true - } - return string(s.contents[charStart:]), true -} - -// updateOffsets compute line offsets up front as they are referred to frequently. -func (s *Source) updateOffsets() { - lines := strings.Split(string(s.contents), "\n") - offsets := make([]int32, len(lines)) - var offset int32 - for i, line := range lines { - offset = offset + int32(utf8.RuneCountInString(line)) + 1 - offsets[int32(i)] = offset - } - s.lineOffsets = offsets -} - -// findLineOffset returns the offset where the (1-indexed) line begins, -// or false if line doesn't exist. -func (s *Source) findLineOffset(line int) (int32, bool) { - if line == 1 { - return 0, true - } else if line > 1 && line <= len(s.lineOffsets) { - offset := s.lineOffsets[line-2] - return offset, true - } - return -1, false -} diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_expr.go b/vendor/github.com/antonmedv/expr/optimizer/const_expr.go deleted file mode 100644 index 7ececb3dbad..00000000000 --- a/vendor/github.com/antonmedv/expr/optimizer/const_expr.go +++ /dev/null @@ -1,85 +0,0 @@ -package optimizer - -import ( - "fmt" - "reflect" - "strings" - - . "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/file" -) - -var errorType = reflect.TypeOf((*error)(nil)).Elem() - -type constExpr struct { - applied bool - err error - fns map[string]reflect.Value -} - -func (c *constExpr) Visit(node *Node) { - defer func() { - if r := recover(); r != nil { - msg := fmt.Sprintf("%v", r) - // Make message more actual, it's a runtime error, but at compile step. - msg = strings.Replace(msg, "runtime error:", "compile error:", 1) - c.err = &file.Error{ - Location: (*node).Location(), - Message: msg, - } - } - }() - - patch := func(newNode Node) { - c.applied = true - Patch(node, newNode) - } - - if call, ok := (*node).(*CallNode); ok { - if name, ok := call.Callee.(*IdentifierNode); ok { - fn, ok := c.fns[name.Value] - if ok { - in := make([]reflect.Value, len(call.Arguments)) - for i := 0; i < len(call.Arguments); i++ { - arg := call.Arguments[i] - var param interface{} - - switch a := arg.(type) { - case *NilNode: - param = nil - case *IntegerNode: - param = a.Value - case *FloatNode: - param = a.Value - case *BoolNode: - param = a.Value - case *StringNode: - param = a.Value - case *ConstantNode: - param = a.Value - - default: - return // Const expr optimization not applicable. - } - - if param == nil && reflect.TypeOf(param) == nil { - // In case of nil value and nil type use this hack, - // otherwise reflect.Call will panic on zero value. - in[i] = reflect.ValueOf(¶m).Elem() - } else { - in[i] = reflect.ValueOf(param) - } - } - - out := fn.Call(in) - value := out[0].Interface() - if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() { - c.err = out[1].Interface().(error) - return - } - constNode := &ConstantNode{Value: value} - patch(constNode) - } - } - } -} diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_range.go b/vendor/github.com/antonmedv/expr/optimizer/const_range.go deleted file mode 100644 index 26d6d6f571b..00000000000 --- a/vendor/github.com/antonmedv/expr/optimizer/const_range.go +++ /dev/null @@ -1,40 +0,0 @@ -package optimizer - -import ( - . "github.com/antonmedv/expr/ast" -) - -type constRange struct{} - -func (*constRange) Visit(node *Node) { - switch n := (*node).(type) { - case *BinaryNode: - if n.Operator == ".." { - if min, ok := n.Left.(*IntegerNode); ok { - if max, ok := n.Right.(*IntegerNode); ok { - size := max.Value - min.Value + 1 - // In case the max < min, patch empty slice - // as max must be greater than equal to min. - if size < 1 { - Patch(node, &ConstantNode{ - Value: make([]int, 0), - }) - return - } - // In this case array is too big. Skip generation, - // and wait for memory budget detection on runtime. - if size > 1e6 { - return - } - value := make([]int, size) - for i := range value { - value[i] = min.Value + i - } - Patch(node, &ConstantNode{ - Value: value, - }) - } - } - } - } -} diff --git a/vendor/github.com/antonmedv/expr/optimizer/fold.go b/vendor/github.com/antonmedv/expr/optimizer/fold.go deleted file mode 100644 index b62b2d7ed42..00000000000 --- a/vendor/github.com/antonmedv/expr/optimizer/fold.go +++ /dev/null @@ -1,343 +0,0 @@ -package optimizer - -import ( - "math" - "reflect" - - . "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/file" -) - -type fold struct { - applied bool - err *file.Error -} - -func (fold *fold) Visit(node *Node) { - patch := func(newNode Node) { - fold.applied = true - Patch(node, newNode) - } - // for IntegerNode the type may have been changed from int->float - // preserve this information by setting the type after the Patch - patchWithType := func(newNode Node, leafType reflect.Type) { - patch(newNode) - newNode.SetType(leafType) - } - - switch n := (*node).(type) { - case *UnaryNode: - switch n.Operator { - case "-": - if i, ok := n.Node.(*IntegerNode); ok { - patchWithType(&IntegerNode{Value: -i.Value}, n.Node.Type()) - } - if i, ok := n.Node.(*FloatNode); ok { - patchWithType(&FloatNode{Value: -i.Value}, n.Node.Type()) - } - case "+": - if i, ok := n.Node.(*IntegerNode); ok { - patchWithType(&IntegerNode{Value: i.Value}, n.Node.Type()) - } - if i, ok := n.Node.(*FloatNode); ok { - patchWithType(&FloatNode{Value: i.Value}, n.Node.Type()) - } - case "!", "not": - if a := toBool(n.Node); a != nil { - patch(&BoolNode{Value: !a.Value}) - } - } - - case *BinaryNode: - switch n.Operator { - case "+": - { - a := toInteger(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&IntegerNode{Value: a.Value + b.Value}, a.Type()) - } - } - { - a := toInteger(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: float64(a.Value) + b.Value}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value + float64(b.Value)}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value + b.Value}, a.Type()) - } - } - { - a := toString(n.Left) - b := toString(n.Right) - if a != nil && b != nil { - patch(&StringNode{Value: a.Value + b.Value}) - } - } - case "-": - { - a := toInteger(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&IntegerNode{Value: a.Value - b.Value}, a.Type()) - } - } - { - a := toInteger(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: float64(a.Value) - b.Value}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value - float64(b.Value)}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value - b.Value}, a.Type()) - } - } - case "*": - { - a := toInteger(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&IntegerNode{Value: a.Value * b.Value}, a.Type()) - } - } - { - a := toInteger(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: float64(a.Value) * b.Value}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value * float64(b.Value)}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value * b.Value}, a.Type()) - } - } - case "/": - { - a := toInteger(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: float64(a.Value) / float64(b.Value)}, a.Type()) - } - } - { - a := toInteger(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: float64(a.Value) / b.Value}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value / float64(b.Value)}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: a.Value / b.Value}, a.Type()) - } - } - case "%": - if a, ok := n.Left.(*IntegerNode); ok { - if b, ok := n.Right.(*IntegerNode); ok { - if b.Value == 0 { - fold.err = &file.Error{ - Location: (*node).Location(), - Message: "integer divide by zero", - } - return - } - patch(&IntegerNode{Value: a.Value % b.Value}) - } - } - case "**", "^": - { - a := toInteger(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), float64(b.Value))}, a.Type()) - } - } - { - a := toInteger(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), b.Value)}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: math.Pow(a.Value, float64(b.Value))}, a.Type()) - } - } - { - a := toFloat(n.Left) - b := toFloat(n.Right) - if a != nil && b != nil { - patchWithType(&FloatNode{Value: math.Pow(a.Value, b.Value)}, a.Type()) - } - } - case "and", "&&": - a := toBool(n.Left) - b := toBool(n.Right) - - if a != nil && a.Value { // true and x - patch(n.Right) - } else if b != nil && b.Value { // x and true - patch(n.Left) - } else if (a != nil && !a.Value) || (b != nil && !b.Value) { // "x and false" or "false and x" - patch(&BoolNode{Value: false}) - } - case "or", "||": - a := toBool(n.Left) - b := toBool(n.Right) - - if a != nil && !a.Value { // false or x - patch(n.Right) - } else if b != nil && !b.Value { // x or false - patch(n.Left) - } else if (a != nil && a.Value) || (b != nil && b.Value) { // "x or true" or "true or x" - patch(&BoolNode{Value: true}) - } - case "==": - { - a := toInteger(n.Left) - b := toInteger(n.Right) - if a != nil && b != nil { - patch(&BoolNode{Value: a.Value == b.Value}) - } - } - { - a := toString(n.Left) - b := toString(n.Right) - if a != nil && b != nil { - patch(&BoolNode{Value: a.Value == b.Value}) - } - } - { - a := toBool(n.Left) - b := toBool(n.Right) - if a != nil && b != nil { - patch(&BoolNode{Value: a.Value == b.Value}) - } - } - } - - case *ArrayNode: - if len(n.Nodes) > 0 { - for _, a := range n.Nodes { - switch a.(type) { - case *IntegerNode, *FloatNode, *StringNode, *BoolNode: - continue - default: - return - } - } - value := make([]interface{}, len(n.Nodes)) - for i, a := range n.Nodes { - switch b := a.(type) { - case *IntegerNode: - value[i] = b.Value - case *FloatNode: - value[i] = b.Value - case *StringNode: - value[i] = b.Value - case *BoolNode: - value[i] = b.Value - } - } - patch(&ConstantNode{Value: value}) - } - - case *BuiltinNode: - switch n.Name { - case "filter": - if len(n.Arguments) != 2 { - return - } - if base, ok := n.Arguments[0].(*BuiltinNode); ok && base.Name == "filter" { - patch(&BuiltinNode{ - Name: "filter", - Arguments: []Node{ - base.Arguments[0], - &BinaryNode{ - Operator: "&&", - Left: base.Arguments[1], - Right: n.Arguments[1], - }, - }, - }) - } - } - } -} - -func toString(n Node) *StringNode { - switch a := n.(type) { - case *StringNode: - return a - } - return nil -} - -func toInteger(n Node) *IntegerNode { - switch a := n.(type) { - case *IntegerNode: - return a - } - return nil -} - -func toFloat(n Node) *FloatNode { - switch a := n.(type) { - case *FloatNode: - return a - } - return nil -} - -func toBool(n Node) *BoolNode { - switch a := n.(type) { - case *BoolNode: - return a - } - return nil -} diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_array.go b/vendor/github.com/antonmedv/expr/optimizer/in_array.go deleted file mode 100644 index a51957631c0..00000000000 --- a/vendor/github.com/antonmedv/expr/optimizer/in_array.go +++ /dev/null @@ -1,64 +0,0 @@ -package optimizer - -import ( - "reflect" - - . "github.com/antonmedv/expr/ast" -) - -type inArray struct{} - -func (*inArray) Visit(node *Node) { - switch n := (*node).(type) { - case *BinaryNode: - if n.Operator == "in" { - if array, ok := n.Right.(*ArrayNode); ok { - if len(array.Nodes) > 0 { - t := n.Left.Type() - if t == nil || t.Kind() != reflect.Int { - // This optimization can be only performed if left side is int type, - // as runtime.in func uses reflect.Map.MapIndex and keys of map must, - // be same as checked value type. - goto string - } - - for _, a := range array.Nodes { - if _, ok := a.(*IntegerNode); !ok { - goto string - } - } - { - value := make(map[int]struct{}) - for _, a := range array.Nodes { - value[a.(*IntegerNode).Value] = struct{}{} - } - Patch(node, &BinaryNode{ - Operator: n.Operator, - Left: n.Left, - Right: &ConstantNode{Value: value}, - }) - } - - string: - for _, a := range array.Nodes { - if _, ok := a.(*StringNode); !ok { - return - } - } - { - value := make(map[string]struct{}) - for _, a := range array.Nodes { - value[a.(*StringNode).Value] = struct{}{} - } - Patch(node, &BinaryNode{ - Operator: n.Operator, - Left: n.Left, - Right: &ConstantNode{Value: value}, - }) - } - - } - } - } - } -} diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_range.go b/vendor/github.com/antonmedv/expr/optimizer/in_range.go deleted file mode 100644 index 7895249e0be..00000000000 --- a/vendor/github.com/antonmedv/expr/optimizer/in_range.go +++ /dev/null @@ -1,34 +0,0 @@ -package optimizer - -import ( - . "github.com/antonmedv/expr/ast" -) - -type inRange struct{} - -func (*inRange) Visit(node *Node) { - switch n := (*node).(type) { - case *BinaryNode: - if n.Operator == "in" { - if rng, ok := n.Right.(*BinaryNode); ok && rng.Operator == ".." { - if from, ok := rng.Left.(*IntegerNode); ok { - if to, ok := rng.Right.(*IntegerNode); ok { - Patch(node, &BinaryNode{ - Operator: "and", - Left: &BinaryNode{ - Operator: ">=", - Left: n.Left, - Right: from, - }, - Right: &BinaryNode{ - Operator: "<=", - Left: n.Left, - Right: to, - }, - }) - } - } - } - } - } -} diff --git a/vendor/github.com/antonmedv/expr/optimizer/optimizer.go b/vendor/github.com/antonmedv/expr/optimizer/optimizer.go deleted file mode 100644 index 9c97496c8d6..00000000000 --- a/vendor/github.com/antonmedv/expr/optimizer/optimizer.go +++ /dev/null @@ -1,37 +0,0 @@ -package optimizer - -import ( - . "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/conf" -) - -func Optimize(node *Node, config *conf.Config) error { - Walk(node, &inArray{}) - for limit := 1000; limit >= 0; limit-- { - fold := &fold{} - Walk(node, fold) - if fold.err != nil { - return fold.err - } - if !fold.applied { - break - } - } - if config != nil && len(config.ConstFns) > 0 { - for limit := 100; limit >= 0; limit-- { - constExpr := &constExpr{ - fns: config.ConstFns, - } - Walk(node, constExpr) - if constExpr.err != nil { - return constExpr.err - } - if !constExpr.applied { - break - } - } - } - Walk(node, &inRange{}) - Walk(node, &constRange{}) - return nil -} diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go b/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go deleted file mode 100644 index cfb1e8c61b8..00000000000 --- a/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go +++ /dev/null @@ -1,221 +0,0 @@ -package lexer - -import ( - "fmt" - "strings" - "unicode/utf8" - - "github.com/antonmedv/expr/file" -) - -func Lex(source *file.Source) ([]Token, error) { - l := &lexer{ - input: source.Content(), - tokens: make([]Token, 0), - } - - l.loc = file.Location{Line: 1, Column: 0} - l.prev = l.loc - l.startLoc = l.loc - - for state := root; state != nil; { - state = state(l) - } - - if l.err != nil { - return nil, l.err.Bind(source) - } - - return l.tokens, nil -} - -type lexer struct { - input string - tokens []Token - start, end int // current position in input - width int // last rune width - startLoc file.Location // start location - prev, loc file.Location // prev location of end location, end location - err *file.Error -} - -const eof rune = -1 - -func (l *lexer) next() rune { - if l.end >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.end:]) - l.width = w - l.end += w - - l.prev = l.loc - if r == '\n' { - l.loc.Line++ - l.loc.Column = 0 - } else { - l.loc.Column++ - } - - return r -} - -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -func (l *lexer) backup() { - l.end -= l.width - l.loc = l.prev -} - -func (l *lexer) emit(t Kind) { - l.emitValue(t, l.word()) -} - -func (l *lexer) emitValue(t Kind, value string) { - l.tokens = append(l.tokens, Token{ - Location: l.startLoc, - Kind: t, - Value: value, - }) - l.start = l.end - l.startLoc = l.loc -} - -func (l *lexer) emitEOF() { - l.tokens = append(l.tokens, Token{ - Location: l.prev, // Point to previous position for better error messages. - Kind: EOF, - }) - l.start = l.end - l.startLoc = l.loc -} - -func (l *lexer) skip() { - l.start = l.end - l.startLoc = l.loc -} - -func (l *lexer) word() string { - return l.input[l.start:l.end] -} - -func (l *lexer) ignore() { - l.start = l.end - l.startLoc = l.loc -} - -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -func (l *lexer) skipSpaces() { - r := l.peek() - for ; r == ' '; r = l.peek() { - l.next() - } - l.skip() -} - -func (l *lexer) acceptWord(word string) bool { - pos, loc, prev := l.end, l.loc, l.prev - - l.skipSpaces() - - for _, ch := range word { - if l.next() != ch { - l.end, l.loc, l.prev = pos, loc, prev - return false - } - } - if r := l.peek(); r != ' ' && r != eof { - l.end, l.loc, l.prev = pos, loc, prev - return false - } - - return true -} - -func (l *lexer) error(format string, args ...interface{}) stateFn { - if l.err == nil { // show first error - l.err = &file.Error{ - Location: l.loc, - Message: fmt.Sprintf(format, args...), - } - } - return nil -} - -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= lower(ch) && lower(ch) <= 'f': - return int(lower(ch) - 'a' + 10) - } - return 16 // larger than any legal digit val -} - -func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter - -func (l *lexer) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = l.next() - n-- - } - if n > 0 { - l.error("invalid char escape") - } - return ch -} - -func (l *lexer) scanEscape(quote rune) rune { - ch := l.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: - // nothing to do - ch = l.next() - case '0', '1', '2', '3', '4', '5', '6', '7': - ch = l.scanDigits(ch, 8, 3) - case 'x': - ch = l.scanDigits(l.next(), 16, 2) - case 'u': - ch = l.scanDigits(l.next(), 16, 4) - case 'U': - ch = l.scanDigits(l.next(), 16, 8) - default: - l.error("invalid char escape") - } - return ch -} - -func (l *lexer) scanString(quote rune) (n int) { - ch := l.next() // read character after quote - for ch != quote { - if ch == '\n' || ch == eof { - l.error("literal not terminated") - return - } - if ch == '\\' { - ch = l.scanEscape(quote) - } else { - ch = l.next() - } - n++ - } - return -} diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/state.go b/vendor/github.com/antonmedv/expr/parser/lexer/state.go deleted file mode 100644 index 1212aa3217f..00000000000 --- a/vendor/github.com/antonmedv/expr/parser/lexer/state.go +++ /dev/null @@ -1,198 +0,0 @@ -package lexer - -import ( - "strings" -) - -type stateFn func(*lexer) stateFn - -func root(l *lexer) stateFn { - switch r := l.next(); { - case r == eof: - l.emitEOF() - return nil - case IsSpace(r): - l.ignore() - return root - case r == '\'' || r == '"': - l.scanString(r) - str, err := unescape(l.word()) - if err != nil { - l.error("%v", err) - } - l.emitValue(String, str) - case '0' <= r && r <= '9': - l.backup() - return number - case r == '?': - return questionMark - case r == '/': - return slash - case strings.ContainsRune("([{", r): - l.emit(Bracket) - case strings.ContainsRune(")]}", r): - l.emit(Bracket) - case strings.ContainsRune("#,:%+-^", r): // single rune operator - l.emit(Operator) - case strings.ContainsRune("&|!=*<>", r): // possible double rune operator - l.accept("&|=*") - l.emit(Operator) - case r == '.': - l.backup() - return dot - case IsAlphaNumeric(r): - l.backup() - return identifier - default: - return l.error("unrecognized character: %#U", r) - } - return root -} - -func number(l *lexer) stateFn { - if !l.scanNumber() { - return l.error("bad number syntax: %q", l.word()) - } - l.emit(Number) - return root -} - -func (l *lexer) scanNumber() bool { - digits := "0123456789_" - // Is it hex? - if l.accept("0") { - // Note: Leading 0 does not mean octal in floats. - if l.accept("xX") { - digits = "0123456789abcdefABCDEF_" - } else if l.accept("oO") { - digits = "01234567_" - } else if l.accept("bB") { - digits = "01_" - } - } - l.acceptRun(digits) - loc, prev, end := l.loc, l.prev, l.end - if l.accept(".") { - // Lookup for .. operator: if after dot there is another dot (1..2), it maybe a range operator. - if l.peek() == '.' { - // We can't backup() here, as it would require two backups, - // and backup() func supports only one for now. So, save and - // restore it here. - l.loc, l.prev, l.end = loc, prev, end - return true - } - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun(digits) - } - // Next thing mustn't be alphanumeric. - if IsAlphaNumeric(l.peek()) { - l.next() - return false - } - return true -} - -func dot(l *lexer) stateFn { - l.next() - if l.accept("0123456789") { - l.backup() - return number - } - l.accept(".") - l.emit(Operator) - return root -} - -func identifier(l *lexer) stateFn { -loop: - for { - switch r := l.next(); { - case IsAlphaNumeric(r): - // absorb - default: - l.backup() - switch l.word() { - case "not": - return not - case "in", "or", "and", "matches", "contains", "startsWith", "endsWith": - l.emit(Operator) - default: - l.emit(Identifier) - } - break loop - } - } - return root -} - -func not(l *lexer) stateFn { - l.emit(Operator) - - l.skipSpaces() - - pos, loc, prev := l.end, l.loc, l.prev - - // Get the next word. - for { - r := l.next() - if IsAlphaNumeric(r) { - // absorb - } else { - l.backup() - break - } - } - - switch l.word() { - case "in", "matches", "contains", "startsWith", "endsWith": - l.emit(Operator) - default: - l.end, l.loc, l.prev = pos, loc, prev - } - return root -} - -func questionMark(l *lexer) stateFn { - l.accept(".?") - l.emit(Operator) - return root -} - -func slash(l *lexer) stateFn { - if l.accept("/") { - return singleLineComment - } - if l.accept("*") { - return multiLineComment - } - l.emit(Operator) - return root -} - -func singleLineComment(l *lexer) stateFn { - for { - r := l.next() - if r == eof || r == '\n' { - break - } - } - l.ignore() - return root -} - -func multiLineComment(l *lexer) stateFn { - for { - r := l.next() - if r == eof { - return l.error("unclosed comment") - } - if r == '*' && l.accept("/") { - break - } - } - l.ignore() - return root -} diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/token.go b/vendor/github.com/antonmedv/expr/parser/lexer/token.go deleted file mode 100644 index 8917b26dce6..00000000000 --- a/vendor/github.com/antonmedv/expr/parser/lexer/token.go +++ /dev/null @@ -1,47 +0,0 @@ -package lexer - -import ( - "fmt" - - "github.com/antonmedv/expr/file" -) - -type Kind string - -const ( - Identifier Kind = "Identifier" - Number Kind = "Number" - String Kind = "String" - Operator Kind = "Operator" - Bracket Kind = "Bracket" - EOF Kind = "EOF" -) - -type Token struct { - file.Location - Kind Kind - Value string -} - -func (t Token) String() string { - if t.Value == "" { - return string(t.Kind) - } - return fmt.Sprintf("%s(%#v)", t.Kind, t.Value) -} - -func (t Token) Is(kind Kind, values ...string) bool { - if len(values) == 0 { - return kind == t.Kind - } - - for _, v := range values { - if v == t.Value { - goto found - } - } - return false - -found: - return kind == t.Kind -} diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/utils.go b/vendor/github.com/antonmedv/expr/parser/lexer/utils.go deleted file mode 100644 index 72e3cf20c97..00000000000 --- a/vendor/github.com/antonmedv/expr/parser/lexer/utils.go +++ /dev/null @@ -1,194 +0,0 @@ -package lexer - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -func IsSpace(r rune) bool { - return unicode.IsSpace(r) -} - -func IsAlphaNumeric(r rune) bool { - return IsAlphabetic(r) || unicode.IsDigit(r) -} - -func IsAlphabetic(r rune) bool { - return r == '_' || r == '$' || unicode.IsLetter(r) -} - -var ( - newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n") -) - -// Unescape takes a quoted string, unquotes, and unescapes it. -func unescape(value string) (string, error) { - // All strings normalize newlines to the \n representation. - value = newlineNormalizer.Replace(value) - n := len(value) - - // Nothing to unescape / decode. - if n < 2 { - return value, fmt.Errorf("unable to unescape string") - } - - // Quoted string of some form, must have same first and last char. - if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') { - return value, fmt.Errorf("unable to unescape string") - } - - value = value[1 : n-1] - - // The string contains escape characters. - // The following logic is adapted from `strconv/quote.go` - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*n/2) - for len(value) > 0 { - c, multibyte, rest, err := unescapeChar(value) - if err != nil { - return "", err - } - value = rest - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - } - return string(buf), nil -} - -// unescapeChar takes a string input and returns the following info: -// -// value - the escaped unicode rune at the front of the string. -// multibyte - whether the rune value might require multiple bytes to represent. -// tail - the remainder of the input string. -// err - error value, if the character could not be unescaped. -// -// When multibyte is true the return value may still fit within a single byte, -// but a multibyte conversion is attempted which is more expensive than when the -// value is known to fit within one byte. -func unescapeChar(s string) (value rune, multibyte bool, tail string, err error) { - // 1. Character is not an escape sequence. - switch c := s[0]; { - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // 2. Last character is the start of an escape sequence. - if len(s) <= 1 { - err = fmt.Errorf("unable to unescape string, found '\\' as last character") - return - } - - c := s[1] - s = s[2:] - // 3. Common escape sequences shared with Google SQL - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case '\\': - value = '\\' - case '\'': - value = '\'' - case '"': - value = '"' - case '`': - value = '`' - case '?': - value = '?' - - // 4. Unicode escape sequences, reproduced from `strconv/quote.go` - case 'x', 'X', 'u', 'U': - n := 0 - switch c { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = fmt.Errorf("unable to unescape string") - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = fmt.Errorf("unable to unescape string") - return - } - v = v<<4 | x - } - s = s[n:] - if v > utf8.MaxRune { - err = fmt.Errorf("unable to unescape string") - return - } - value = v - multibyte = true - - // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7] - case '0', '1', '2', '3': - if len(s) < 2 { - err = fmt.Errorf("unable to unescape octal sequence in string") - return - } - v := rune(c - '0') - for j := 0; j < 2; j++ { - x := s[j] - if x < '0' || x > '7' { - err = fmt.Errorf("unable to unescape octal sequence in string") - return - } - v = v*8 + rune(x-'0') - } - if v > utf8.MaxRune { - err = fmt.Errorf("unable to unescape string") - return - } - value = v - s = s[2:] - multibyte = true - - // Unknown escape sequence. - default: - err = fmt.Errorf("unable to unescape string") - } - - tail = s - return -} - -func unhex(b byte) (rune, bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return 0, false -} diff --git a/vendor/github.com/antonmedv/expr/parser/parser.go b/vendor/github.com/antonmedv/expr/parser/parser.go deleted file mode 100644 index fd26fe18bdc..00000000000 --- a/vendor/github.com/antonmedv/expr/parser/parser.go +++ /dev/null @@ -1,610 +0,0 @@ -package parser - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" - - . "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/file" - . "github.com/antonmedv/expr/parser/lexer" -) - -type associativity int - -const ( - left associativity = iota + 1 - right -) - -type operator struct { - precedence int - associativity associativity -} - -type builtin struct { - arity int -} - -var unaryOperators = map[string]operator{ - "not": {50, left}, - "!": {50, left}, - "-": {90, left}, - "+": {90, left}, -} - -var binaryOperators = map[string]operator{ - "or": {10, left}, - "||": {10, left}, - "and": {15, left}, - "&&": {15, left}, - "==": {20, left}, - "!=": {20, left}, - "<": {20, left}, - ">": {20, left}, - ">=": {20, left}, - "<=": {20, left}, - "in": {20, left}, - "matches": {20, left}, - "contains": {20, left}, - "startsWith": {20, left}, - "endsWith": {20, left}, - "..": {25, left}, - "+": {30, left}, - "-": {30, left}, - "*": {60, left}, - "/": {60, left}, - "%": {60, left}, - "**": {100, right}, - "^": {100, right}, - "??": {500, left}, -} - -var builtins = map[string]builtin{ - "all": {2}, - "none": {2}, - "any": {2}, - "one": {2}, - "filter": {2}, - "map": {2}, - "count": {2}, -} - -type parser struct { - tokens []Token - current Token - pos int - err *file.Error - depth int // closure call depth -} - -type Tree struct { - Node Node - Source *file.Source -} - -func Parse(input string) (*Tree, error) { - source := file.NewSource(input) - - tokens, err := Lex(source) - if err != nil { - return nil, err - } - - p := &parser{ - tokens: tokens, - current: tokens[0], - } - - node := p.parseExpression(0) - - if !p.current.Is(EOF) { - p.error("unexpected token %v", p.current) - } - - if p.err != nil { - return nil, p.err.Bind(source) - } - - return &Tree{ - Node: node, - Source: source, - }, nil -} - -func (p *parser) error(format string, args ...interface{}) { - p.errorAt(p.current, format, args...) -} - -func (p *parser) errorAt(token Token, format string, args ...interface{}) { - if p.err == nil { // show first error - p.err = &file.Error{ - Location: token.Location, - Message: fmt.Sprintf(format, args...), - } - } -} - -func (p *parser) next() { - p.pos++ - if p.pos >= len(p.tokens) { - p.error("unexpected end of expression") - return - } - p.current = p.tokens[p.pos] -} - -func (p *parser) expect(kind Kind, values ...string) { - if p.current.Is(kind, values...) { - p.next() - return - } - p.error("unexpected token %v", p.current) -} - -// parse functions - -func (p *parser) parseExpression(precedence int) Node { - nodeLeft := p.parsePrimary() - - lastOperator := "" - opToken := p.current - for opToken.Is(Operator) && p.err == nil { - negate := false - var notToken Token - - if opToken.Is(Operator, "not") { - p.next() - notToken = p.current - negate = true - opToken = p.current - } - - if op, ok := binaryOperators[opToken.Value]; ok { - if op.precedence >= precedence { - p.next() - - if lastOperator == "??" && opToken.Value != "??" && !opToken.Is(Bracket, "(") { - p.errorAt(opToken, "Operator (%v) and coalesce expressions (??) cannot be mixed. Wrap either by parentheses.", opToken.Value) - break - } - - var nodeRight Node - if op.associativity == left { - nodeRight = p.parseExpression(op.precedence + 1) - } else { - nodeRight = p.parseExpression(op.precedence) - } - - nodeLeft = &BinaryNode{ - Operator: opToken.Value, - Left: nodeLeft, - Right: nodeRight, - } - nodeLeft.SetLocation(opToken.Location) - - if negate { - nodeLeft = &UnaryNode{ - Operator: "not", - Node: nodeLeft, - } - nodeLeft.SetLocation(notToken.Location) - } - - lastOperator = opToken.Value - opToken = p.current - continue - } - } - break - } - - if precedence == 0 { - nodeLeft = p.parseConditionalExpression(nodeLeft) - } - - return nodeLeft -} - -func (p *parser) parsePrimary() Node { - token := p.current - - if token.Is(Operator) { - if op, ok := unaryOperators[token.Value]; ok { - p.next() - expr := p.parseExpression(op.precedence) - node := &UnaryNode{ - Operator: token.Value, - Node: expr, - } - node.SetLocation(token.Location) - return p.parsePostfixExpression(node) - } - } - - if token.Is(Bracket, "(") { - p.next() - expr := p.parseExpression(0) - p.expect(Bracket, ")") // "an opened parenthesis is not properly closed" - return p.parsePostfixExpression(expr) - } - - if p.depth > 0 { - if token.Is(Operator, "#") || token.Is(Operator, ".") { - if token.Is(Operator, "#") { - p.next() - } - node := &PointerNode{} - node.SetLocation(token.Location) - return p.parsePostfixExpression(node) - } - } else { - if token.Is(Operator, "#") || token.Is(Operator, ".") { - p.error("cannot use pointer accessor outside closure") - } - } - - return p.parsePrimaryExpression() -} - -func (p *parser) parseConditionalExpression(node Node) Node { - var expr1, expr2 Node - for p.current.Is(Operator, "?") && p.err == nil { - p.next() - - if !p.current.Is(Operator, ":") { - expr1 = p.parseExpression(0) - p.expect(Operator, ":") - expr2 = p.parseExpression(0) - } else { - p.next() - expr1 = node - expr2 = p.parseExpression(0) - } - - node = &ConditionalNode{ - Cond: node, - Exp1: expr1, - Exp2: expr2, - } - } - return node -} - -func (p *parser) parsePrimaryExpression() Node { - var node Node - token := p.current - - switch token.Kind { - - case Identifier: - p.next() - switch token.Value { - case "true": - node := &BoolNode{Value: true} - node.SetLocation(token.Location) - return node - case "false": - node := &BoolNode{Value: false} - node.SetLocation(token.Location) - return node - case "nil": - node := &NilNode{} - node.SetLocation(token.Location) - return node - default: - node = p.parseIdentifierExpression(token) - } - - case Number: - p.next() - value := strings.Replace(token.Value, "_", "", -1) - if strings.Contains(value, "x") { - number, err := strconv.ParseInt(value, 0, 64) - if err != nil { - p.error("invalid hex literal: %v", err) - } - node := &IntegerNode{Value: int(number)} - node.SetLocation(token.Location) - return node - } else if strings.ContainsAny(value, ".eE") { - number, err := strconv.ParseFloat(value, 64) - if err != nil { - p.error("invalid float literal: %v", err) - } - node := &FloatNode{Value: number} - node.SetLocation(token.Location) - return node - } else { - number, err := strconv.ParseInt(value, 10, 64) - if err != nil { - p.error("invalid integer literal: %v", err) - } - node := &IntegerNode{Value: int(number)} - node.SetLocation(token.Location) - return node - } - - case String: - p.next() - node := &StringNode{Value: token.Value} - node.SetLocation(token.Location) - return node - - default: - if token.Is(Bracket, "[") { - node = p.parseArrayExpression(token) - } else if token.Is(Bracket, "{") { - node = p.parseMapExpression(token) - } else { - p.error("unexpected token %v", token) - } - } - - return p.parsePostfixExpression(node) -} - -func (p *parser) parseIdentifierExpression(token Token) Node { - var node Node - if p.current.Is(Bracket, "(") { - var arguments []Node - - if b, ok := builtins[token.Value]; ok { - p.expect(Bracket, "(") - // TODO: Add builtins signatures. - if b.arity == 1 { - arguments = make([]Node, 1) - arguments[0] = p.parseExpression(0) - } else if b.arity == 2 { - arguments = make([]Node, 2) - arguments[0] = p.parseExpression(0) - p.expect(Operator, ",") - arguments[1] = p.parseClosure() - } - p.expect(Bracket, ")") - - node = &BuiltinNode{ - Name: token.Value, - Arguments: arguments, - } - node.SetLocation(token.Location) - } else { - callee := &IdentifierNode{Value: token.Value} - callee.SetLocation(token.Location) - node = &CallNode{ - Callee: callee, - Arguments: p.parseArguments(), - } - node.SetLocation(token.Location) - } - } else { - node = &IdentifierNode{Value: token.Value} - node.SetLocation(token.Location) - } - return node -} - -func (p *parser) parseClosure() Node { - startToken := p.current - expectClosingBracket := false - if p.current.Is(Bracket, "{") { - p.next() - expectClosingBracket = true - } - - p.depth++ - node := p.parseExpression(0) - p.depth-- - - if expectClosingBracket { - p.expect(Bracket, "}") - } - closure := &ClosureNode{ - Node: node, - } - closure.SetLocation(startToken.Location) - return closure -} - -func (p *parser) parseArrayExpression(token Token) Node { - nodes := make([]Node, 0) - - p.expect(Bracket, "[") - for !p.current.Is(Bracket, "]") && p.err == nil { - if len(nodes) > 0 { - p.expect(Operator, ",") - if p.current.Is(Bracket, "]") { - goto end - } - } - node := p.parseExpression(0) - nodes = append(nodes, node) - } -end: - p.expect(Bracket, "]") - - node := &ArrayNode{Nodes: nodes} - node.SetLocation(token.Location) - return node -} - -func (p *parser) parseMapExpression(token Token) Node { - p.expect(Bracket, "{") - - nodes := make([]Node, 0) - for !p.current.Is(Bracket, "}") && p.err == nil { - if len(nodes) > 0 { - p.expect(Operator, ",") - if p.current.Is(Bracket, "}") { - goto end - } - if p.current.Is(Operator, ",") { - p.error("unexpected token %v", p.current) - } - } - - var key Node - // Map key can be one of: - // * number - // * string - // * identifier, which is equivalent to a string - // * expression, which must be enclosed in parentheses -- (1 + 2) - if p.current.Is(Number) || p.current.Is(String) || p.current.Is(Identifier) { - key = &StringNode{Value: p.current.Value} - key.SetLocation(token.Location) - p.next() - } else if p.current.Is(Bracket, "(") { - key = p.parseExpression(0) - } else { - p.error("a map key must be a quoted string, a number, a identifier, or an expression enclosed in parentheses (unexpected token %v)", p.current) - } - - p.expect(Operator, ":") - - node := p.parseExpression(0) - pair := &PairNode{Key: key, Value: node} - pair.SetLocation(token.Location) - nodes = append(nodes, pair) - } - -end: - p.expect(Bracket, "}") - - node := &MapNode{Pairs: nodes} - node.SetLocation(token.Location) - return node -} - -func (p *parser) parsePostfixExpression(node Node) Node { - postfixToken := p.current - for (postfixToken.Is(Operator) || postfixToken.Is(Bracket)) && p.err == nil { - if postfixToken.Value == "." || postfixToken.Value == "?." { - p.next() - - propertyToken := p.current - p.next() - - if propertyToken.Kind != Identifier && - // Operators like "not" and "matches" are valid methods or property names. - (propertyToken.Kind != Operator || !isValidIdentifier(propertyToken.Value)) { - p.error("expected name") - } - - property := &StringNode{Value: propertyToken.Value} - property.SetLocation(propertyToken.Location) - - chainNode, isChain := node.(*ChainNode) - optional := postfixToken.Value == "?." - - if isChain { - node = chainNode.Node - } - - memberNode := &MemberNode{ - Node: node, - Property: property, - Optional: optional, - } - memberNode.SetLocation(propertyToken.Location) - - if p.current.Is(Bracket, "(") { - node = &CallNode{ - Callee: memberNode, - Arguments: p.parseArguments(), - } - node.SetLocation(propertyToken.Location) - } else { - node = memberNode - } - - if isChain || optional { - node = &ChainNode{Node: node} - } - - } else if postfixToken.Value == "[" { - p.next() - var from, to Node - - if p.current.Is(Operator, ":") { // slice without from [:1] - p.next() - - if !p.current.Is(Bracket, "]") { // slice without from and to [:] - to = p.parseExpression(0) - } - - node = &SliceNode{ - Node: node, - To: to, - } - node.SetLocation(postfixToken.Location) - p.expect(Bracket, "]") - - } else { - - from = p.parseExpression(0) - - if p.current.Is(Operator, ":") { - p.next() - - if !p.current.Is(Bracket, "]") { // slice without to [1:] - to = p.parseExpression(0) - } - - node = &SliceNode{ - Node: node, - From: from, - To: to, - } - node.SetLocation(postfixToken.Location) - p.expect(Bracket, "]") - - } else { - // Slice operator [:] was not found, - // it should be just an index node. - node = &MemberNode{ - Node: node, - Property: from, - } - node.SetLocation(postfixToken.Location) - p.expect(Bracket, "]") - } - } - } else { - break - } - postfixToken = p.current - } - return node -} - -func isValidIdentifier(str string) bool { - if len(str) == 0 { - return false - } - h, w := utf8.DecodeRuneInString(str) - if !IsAlphabetic(h) { - return false - } - for _, r := range str[w:] { - if !IsAlphaNumeric(r) { - return false - } - } - return true -} - -func (p *parser) parseArguments() []Node { - p.expect(Bracket, "(") - nodes := make([]Node, 0) - for !p.current.Is(Bracket, ")") && p.err == nil { - if len(nodes) > 0 { - p.expect(Operator, ",") - } - node := p.parseExpression(0) - nodes = append(nodes, node) - } - p.expect(Bracket, ")") - - return nodes -} diff --git a/vendor/github.com/antonmedv/expr/vm/generated.go b/vendor/github.com/antonmedv/expr/vm/generated.go deleted file mode 100644 index 9fc7883e2df..00000000000 --- a/vendor/github.com/antonmedv/expr/vm/generated.go +++ /dev/null @@ -1,262 +0,0 @@ -// Code generated by vm/func_types/main.go. DO NOT EDIT. - -package vm - -import ( - "fmt" - "time" -) - -var FuncTypes = []interface{}{ - 1: new(func() time.Duration), - 2: new(func() time.Month), - 3: new(func() time.Time), - 4: new(func() time.Weekday), - 5: new(func() []uint8), - 6: new(func() []interface{}), - 7: new(func() bool), - 8: new(func() uint8), - 9: new(func() float64), - 10: new(func() int), - 11: new(func() int64), - 12: new(func() interface{}), - 13: new(func() map[string]interface{}), - 14: new(func() int32), - 15: new(func() string), - 16: new(func() uint), - 17: new(func() uint64), - 18: new(func(time.Duration) time.Duration), - 19: new(func(time.Duration) time.Time), - 20: new(func(time.Time) time.Duration), - 21: new(func(time.Time) bool), - 22: new(func([]interface{}, string) string), - 23: new(func([]string, string) string), - 24: new(func(bool) bool), - 25: new(func(bool) float64), - 26: new(func(bool) int), - 27: new(func(bool) string), - 28: new(func(float64) bool), - 29: new(func(float64) float64), - 30: new(func(float64) int), - 31: new(func(float64) string), - 32: new(func(int) bool), - 33: new(func(int) float64), - 34: new(func(int) int), - 35: new(func(int) string), - 36: new(func(int, int) int), - 37: new(func(int, int) string), - 38: new(func(int64) time.Time), - 39: new(func(string) []string), - 40: new(func(string) bool), - 41: new(func(string) float64), - 42: new(func(string) int), - 43: new(func(string) string), - 44: new(func(string, uint8) int), - 45: new(func(string, int) int), - 46: new(func(string, int32) int), - 47: new(func(string, string) bool), - 48: new(func(string, string) string), - 49: new(func(interface{}) bool), - 50: new(func(interface{}) float64), - 51: new(func(interface{}) int), - 52: new(func(interface{}) string), - 53: new(func(interface{}) interface{}), - 54: new(func(interface{}) []interface{}), - 55: new(func(interface{}) map[string]interface{}), - 56: new(func([]interface{}) interface{}), - 57: new(func([]interface{}) []interface{}), - 58: new(func([]interface{}) map[string]interface{}), - 59: new(func(interface{}, interface{}) bool), - 60: new(func(interface{}, interface{}) string), - 61: new(func(interface{}, interface{}) interface{}), - 62: new(func(interface{}, interface{}) []interface{}), -} - -func (vm *VM) call(fn interface{}, kind int) interface{} { - switch kind { - case 1: - return fn.(func() time.Duration)() - case 2: - return fn.(func() time.Month)() - case 3: - return fn.(func() time.Time)() - case 4: - return fn.(func() time.Weekday)() - case 5: - return fn.(func() []uint8)() - case 6: - return fn.(func() []interface{})() - case 7: - return fn.(func() bool)() - case 8: - return fn.(func() uint8)() - case 9: - return fn.(func() float64)() - case 10: - return fn.(func() int)() - case 11: - return fn.(func() int64)() - case 12: - return fn.(func() interface{})() - case 13: - return fn.(func() map[string]interface{})() - case 14: - return fn.(func() int32)() - case 15: - return fn.(func() string)() - case 16: - return fn.(func() uint)() - case 17: - return fn.(func() uint64)() - case 18: - arg1 := vm.pop().(time.Duration) - return fn.(func(time.Duration) time.Duration)(arg1) - case 19: - arg1 := vm.pop().(time.Duration) - return fn.(func(time.Duration) time.Time)(arg1) - case 20: - arg1 := vm.pop().(time.Time) - return fn.(func(time.Time) time.Duration)(arg1) - case 21: - arg1 := vm.pop().(time.Time) - return fn.(func(time.Time) bool)(arg1) - case 22: - arg2 := vm.pop().(string) - arg1 := vm.pop().([]interface{}) - return fn.(func([]interface{}, string) string)(arg1, arg2) - case 23: - arg2 := vm.pop().(string) - arg1 := vm.pop().([]string) - return fn.(func([]string, string) string)(arg1, arg2) - case 24: - arg1 := vm.pop().(bool) - return fn.(func(bool) bool)(arg1) - case 25: - arg1 := vm.pop().(bool) - return fn.(func(bool) float64)(arg1) - case 26: - arg1 := vm.pop().(bool) - return fn.(func(bool) int)(arg1) - case 27: - arg1 := vm.pop().(bool) - return fn.(func(bool) string)(arg1) - case 28: - arg1 := vm.pop().(float64) - return fn.(func(float64) bool)(arg1) - case 29: - arg1 := vm.pop().(float64) - return fn.(func(float64) float64)(arg1) - case 30: - arg1 := vm.pop().(float64) - return fn.(func(float64) int)(arg1) - case 31: - arg1 := vm.pop().(float64) - return fn.(func(float64) string)(arg1) - case 32: - arg1 := vm.pop().(int) - return fn.(func(int) bool)(arg1) - case 33: - arg1 := vm.pop().(int) - return fn.(func(int) float64)(arg1) - case 34: - arg1 := vm.pop().(int) - return fn.(func(int) int)(arg1) - case 35: - arg1 := vm.pop().(int) - return fn.(func(int) string)(arg1) - case 36: - arg2 := vm.pop().(int) - arg1 := vm.pop().(int) - return fn.(func(int, int) int)(arg1, arg2) - case 37: - arg2 := vm.pop().(int) - arg1 := vm.pop().(int) - return fn.(func(int, int) string)(arg1, arg2) - case 38: - arg1 := vm.pop().(int64) - return fn.(func(int64) time.Time)(arg1) - case 39: - arg1 := vm.pop().(string) - return fn.(func(string) []string)(arg1) - case 40: - arg1 := vm.pop().(string) - return fn.(func(string) bool)(arg1) - case 41: - arg1 := vm.pop().(string) - return fn.(func(string) float64)(arg1) - case 42: - arg1 := vm.pop().(string) - return fn.(func(string) int)(arg1) - case 43: - arg1 := vm.pop().(string) - return fn.(func(string) string)(arg1) - case 44: - arg2 := vm.pop().(uint8) - arg1 := vm.pop().(string) - return fn.(func(string, uint8) int)(arg1, arg2) - case 45: - arg2 := vm.pop().(int) - arg1 := vm.pop().(string) - return fn.(func(string, int) int)(arg1, arg2) - case 46: - arg2 := vm.pop().(int32) - arg1 := vm.pop().(string) - return fn.(func(string, int32) int)(arg1, arg2) - case 47: - arg2 := vm.pop().(string) - arg1 := vm.pop().(string) - return fn.(func(string, string) bool)(arg1, arg2) - case 48: - arg2 := vm.pop().(string) - arg1 := vm.pop().(string) - return fn.(func(string, string) string)(arg1, arg2) - case 49: - arg1 := vm.pop() - return fn.(func(interface{}) bool)(arg1) - case 50: - arg1 := vm.pop() - return fn.(func(interface{}) float64)(arg1) - case 51: - arg1 := vm.pop() - return fn.(func(interface{}) int)(arg1) - case 52: - arg1 := vm.pop() - return fn.(func(interface{}) string)(arg1) - case 53: - arg1 := vm.pop() - return fn.(func(interface{}) interface{})(arg1) - case 54: - arg1 := vm.pop() - return fn.(func(interface{}) []interface{})(arg1) - case 55: - arg1 := vm.pop() - return fn.(func(interface{}) map[string]interface{})(arg1) - case 56: - arg1 := vm.pop().([]interface{}) - return fn.(func([]interface{}) interface{})(arg1) - case 57: - arg1 := vm.pop().([]interface{}) - return fn.(func([]interface{}) []interface{})(arg1) - case 58: - arg1 := vm.pop().([]interface{}) - return fn.(func([]interface{}) map[string]interface{})(arg1) - case 59: - arg2 := vm.pop() - arg1 := vm.pop() - return fn.(func(interface{}, interface{}) bool)(arg1, arg2) - case 60: - arg2 := vm.pop() - arg1 := vm.pop() - return fn.(func(interface{}, interface{}) string)(arg1, arg2) - case 61: - arg2 := vm.pop() - arg1 := vm.pop() - return fn.(func(interface{}, interface{}) interface{})(arg1, arg2) - case 62: - arg2 := vm.pop() - arg1 := vm.pop() - return fn.(func(interface{}, interface{}) []interface{})(arg1, arg2) - - } - panic(fmt.Sprintf("unknown function kind (%v)", kind)) -} diff --git a/vendor/github.com/antonmedv/expr/vm/opcodes.go b/vendor/github.com/antonmedv/expr/vm/opcodes.go deleted file mode 100644 index b3117e73c2b..00000000000 --- a/vendor/github.com/antonmedv/expr/vm/opcodes.go +++ /dev/null @@ -1,71 +0,0 @@ -package vm - -type Opcode byte - -const ( - OpPush Opcode = iota - OpPushInt - OpPop - OpLoadConst - OpLoadField - OpLoadFast - OpLoadMethod - OpLoadFunc - OpFetch - OpFetchField - OpMethod - OpTrue - OpFalse - OpNil - OpNegate - OpNot - OpEqual - OpEqualInt - OpEqualString - OpJump - OpJumpIfTrue - OpJumpIfFalse - OpJumpIfNil - OpJumpIfNotNil - OpJumpIfEnd - OpJumpBackward - OpIn - OpLess - OpMore - OpLessOrEqual - OpMoreOrEqual - OpAdd - OpSubtract - OpMultiply - OpDivide - OpModulo - OpExponent - OpRange - OpMatches - OpMatchesConst - OpContains - OpStartsWith - OpEndsWith - OpSlice - OpCall - OpCall0 - OpCall1 - OpCall2 - OpCall3 - OpCallN - OpCallFast - OpCallTyped - OpBuiltin - OpArray - OpMap - OpLen - OpCast - OpDeref - OpIncrementIt - OpIncrementCount - OpGetCount - OpGetLen - OpPointer - OpBegin - OpEnd // This opcode must be at the end of this list. -) diff --git a/vendor/github.com/antonmedv/expr/vm/program.go b/vendor/github.com/antonmedv/expr/vm/program.go deleted file mode 100644 index d424df14f47..00000000000 --- a/vendor/github.com/antonmedv/expr/vm/program.go +++ /dev/null @@ -1,278 +0,0 @@ -package vm - -import ( - "bytes" - "fmt" - "reflect" - "regexp" - "strings" - "text/tabwriter" - - "github.com/antonmedv/expr/ast" - "github.com/antonmedv/expr/builtin" - "github.com/antonmedv/expr/file" - "github.com/antonmedv/expr/vm/runtime" -) - -type Program struct { - Node ast.Node - Source *file.Source - Locations []file.Location - Constants []interface{} - Bytecode []Opcode - Arguments []int - Functions []Function -} - -func (program *Program) Disassemble() string { - var buf bytes.Buffer - w := tabwriter.NewWriter(&buf, 0, 0, 2, ' ', 0) - ip := 0 - for ip < len(program.Bytecode) { - pp := ip - op := program.Bytecode[ip] - arg := program.Arguments[ip] - ip += 1 - - code := func(label string) { - _, _ = fmt.Fprintf(w, "%v\t%v\n", pp, label) - } - jump := func(label string) { - _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t(%v)\n", pp, label, arg, ip+arg) - } - jumpBack := func(label string) { - _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t(%v)\n", pp, label, arg, ip-arg) - } - argument := func(label string) { - _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\n", pp, label, arg) - } - constant := func(label string) { - var c interface{} - if arg < len(program.Constants) { - c = program.Constants[arg] - } else { - c = "out of range" - } - if r, ok := c.(*regexp.Regexp); ok { - c = r.String() - } - if field, ok := c.(*runtime.Field); ok { - c = fmt.Sprintf("{%v %v}", strings.Join(field.Path, "."), field.Index) - } - if method, ok := c.(*runtime.Method); ok { - c = fmt.Sprintf("{%v %v}", method.Name, method.Index) - } - _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t%v\n", pp, label, arg, c) - } - builtIn := func(label string) { - f, ok := builtin.Builtins[arg] - if !ok { - panic(fmt.Sprintf("unknown builtin %v", arg)) - } - _, _ = fmt.Fprintf(w, "%v\t%v\t%v\n", pp, "OpBuiltin", f.Name) - } - - switch op { - case OpPush: - constant("OpPush") - - case OpPushInt: - argument("OpPushInt") - - case OpPop: - code("OpPop") - - case OpLoadConst: - constant("OpLoadConst") - - case OpLoadField: - constant("OpLoadField") - - case OpLoadFast: - constant("OpLoadFast") - - case OpLoadMethod: - constant("OpLoadMethod") - - case OpLoadFunc: - argument("OpLoadFunc") - - case OpFetch: - code("OpFetch") - - case OpFetchField: - constant("OpFetchField") - - case OpMethod: - constant("OpMethod") - - case OpTrue: - code("OpTrue") - - case OpFalse: - code("OpFalse") - - case OpNil: - code("OpNil") - - case OpNegate: - code("OpNegate") - - case OpNot: - code("OpNot") - - case OpEqual: - code("OpEqual") - - case OpEqualInt: - code("OpEqualInt") - - case OpEqualString: - code("OpEqualString") - - case OpJump: - jump("OpJump") - - case OpJumpIfTrue: - jump("OpJumpIfTrue") - - case OpJumpIfFalse: - jump("OpJumpIfFalse") - - case OpJumpIfNil: - jump("OpJumpIfNil") - - case OpJumpIfNotNil: - jump("OpJumpIfNotNil") - - case OpJumpIfEnd: - jump("OpJumpIfEnd") - - case OpJumpBackward: - jumpBack("OpJumpBackward") - - case OpIn: - code("OpIn") - - case OpLess: - code("OpLess") - - case OpMore: - code("OpMore") - - case OpLessOrEqual: - code("OpLessOrEqual") - - case OpMoreOrEqual: - code("OpMoreOrEqual") - - case OpAdd: - code("OpAdd") - - case OpSubtract: - code("OpSubtract") - - case OpMultiply: - code("OpMultiply") - - case OpDivide: - code("OpDivide") - - case OpModulo: - code("OpModulo") - - case OpExponent: - code("OpExponent") - - case OpRange: - code("OpRange") - - case OpMatches: - code("OpMatches") - - case OpMatchesConst: - constant("OpMatchesConst") - - case OpContains: - code("OpContains") - - case OpStartsWith: - code("OpStartsWith") - - case OpEndsWith: - code("OpEndsWith") - - case OpSlice: - code("OpSlice") - - case OpCall: - argument("OpCall") - - case OpCall0: - argument("OpCall0") - - case OpCall1: - argument("OpCall1") - - case OpCall2: - argument("OpCall2") - - case OpCall3: - argument("OpCall3") - - case OpCallN: - argument("OpCallN") - - case OpCallFast: - argument("OpCallFast") - - case OpCallTyped: - signature := reflect.TypeOf(FuncTypes[arg]).Elem().String() - _, _ = fmt.Fprintf(w, "%v\t%v\t<%v>\t%v\n", pp, "OpCallTyped", arg, signature) - - case OpBuiltin: - builtIn("OpBuiltin") - - case OpArray: - code("OpArray") - - case OpMap: - code("OpMap") - - case OpLen: - code("OpLen") - - case OpCast: - argument("OpCast") - - case OpDeref: - code("OpDeref") - - case OpIncrementIt: - code("OpIncrementIt") - - case OpIncrementCount: - code("OpIncrementCount") - - case OpGetCount: - code("OpGetCount") - - case OpGetLen: - code("OpGetLen") - - case OpPointer: - code("OpPointer") - - case OpBegin: - code("OpBegin") - - case OpEnd: - code("OpEnd") - - default: - _, _ = fmt.Fprintf(w, "%v\t%#x\n", ip, op) - } - } - _ = w.Flush() - return buf.String() -} diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/generated.go b/vendor/github.com/antonmedv/expr/vm/runtime/generated.go deleted file mode 100644 index 09a4a200ed2..00000000000 --- a/vendor/github.com/antonmedv/expr/vm/runtime/generated.go +++ /dev/null @@ -1,3288 +0,0 @@ -// Code generated by vm/runtime/helpers/main.go. DO NOT EDIT. - -package runtime - -import ( - "fmt" - "reflect" - "time" -) - -func Equal(a, b interface{}) bool { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) == int(y) - case uint8: - return int(x) == int(y) - case uint16: - return int(x) == int(y) - case uint32: - return int(x) == int(y) - case uint64: - return int(x) == int(y) - case int: - return int(x) == int(y) - case int8: - return int(x) == int(y) - case int16: - return int(x) == int(y) - case int32: - return int(x) == int(y) - case int64: - return int(x) == int(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) == float64(y) - case uint8: - return float64(x) == float64(y) - case uint16: - return float64(x) == float64(y) - case uint32: - return float64(x) == float64(y) - case uint64: - return float64(x) == float64(y) - case int: - return float64(x) == float64(y) - case int8: - return float64(x) == float64(y) - case int16: - return float64(x) == float64(y) - case int32: - return float64(x) == float64(y) - case int64: - return float64(x) == float64(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) == float64(y) - case uint8: - return float64(x) == float64(y) - case uint16: - return float64(x) == float64(y) - case uint32: - return float64(x) == float64(y) - case uint64: - return float64(x) == float64(y) - case int: - return float64(x) == float64(y) - case int8: - return float64(x) == float64(y) - case int16: - return float64(x) == float64(y) - case int32: - return float64(x) == float64(y) - case int64: - return float64(x) == float64(y) - case float32: - return float64(x) == float64(y) - case float64: - return float64(x) == float64(y) - } - case string: - switch y := b.(type) { - case string: - return x == y - } - case time.Time: - switch y := b.(type) { - case time.Time: - return x.Equal(y) - } - } - if IsNil(a) && IsNil(b) { - return true - } - return reflect.DeepEqual(a, b) -} - -func Less(a, b interface{}) bool { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) < int(y) - case uint8: - return int(x) < int(y) - case uint16: - return int(x) < int(y) - case uint32: - return int(x) < int(y) - case uint64: - return int(x) < int(y) - case int: - return int(x) < int(y) - case int8: - return int(x) < int(y) - case int16: - return int(x) < int(y) - case int32: - return int(x) < int(y) - case int64: - return int(x) < int(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) < float64(y) - case uint8: - return float64(x) < float64(y) - case uint16: - return float64(x) < float64(y) - case uint32: - return float64(x) < float64(y) - case uint64: - return float64(x) < float64(y) - case int: - return float64(x) < float64(y) - case int8: - return float64(x) < float64(y) - case int16: - return float64(x) < float64(y) - case int32: - return float64(x) < float64(y) - case int64: - return float64(x) < float64(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) < float64(y) - case uint8: - return float64(x) < float64(y) - case uint16: - return float64(x) < float64(y) - case uint32: - return float64(x) < float64(y) - case uint64: - return float64(x) < float64(y) - case int: - return float64(x) < float64(y) - case int8: - return float64(x) < float64(y) - case int16: - return float64(x) < float64(y) - case int32: - return float64(x) < float64(y) - case int64: - return float64(x) < float64(y) - case float32: - return float64(x) < float64(y) - case float64: - return float64(x) < float64(y) - } - case string: - switch y := b.(type) { - case string: - return x < y - } - case time.Time: - switch y := b.(type) { - case time.Time: - return x.Before(y) - } - } - panic(fmt.Sprintf("invalid operation: %T < %T", a, b)) -} - -func More(a, b interface{}) bool { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) > int(y) - case uint8: - return int(x) > int(y) - case uint16: - return int(x) > int(y) - case uint32: - return int(x) > int(y) - case uint64: - return int(x) > int(y) - case int: - return int(x) > int(y) - case int8: - return int(x) > int(y) - case int16: - return int(x) > int(y) - case int32: - return int(x) > int(y) - case int64: - return int(x) > int(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) > float64(y) - case uint8: - return float64(x) > float64(y) - case uint16: - return float64(x) > float64(y) - case uint32: - return float64(x) > float64(y) - case uint64: - return float64(x) > float64(y) - case int: - return float64(x) > float64(y) - case int8: - return float64(x) > float64(y) - case int16: - return float64(x) > float64(y) - case int32: - return float64(x) > float64(y) - case int64: - return float64(x) > float64(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) > float64(y) - case uint8: - return float64(x) > float64(y) - case uint16: - return float64(x) > float64(y) - case uint32: - return float64(x) > float64(y) - case uint64: - return float64(x) > float64(y) - case int: - return float64(x) > float64(y) - case int8: - return float64(x) > float64(y) - case int16: - return float64(x) > float64(y) - case int32: - return float64(x) > float64(y) - case int64: - return float64(x) > float64(y) - case float32: - return float64(x) > float64(y) - case float64: - return float64(x) > float64(y) - } - case string: - switch y := b.(type) { - case string: - return x > y - } - case time.Time: - switch y := b.(type) { - case time.Time: - return x.After(y) - } - } - panic(fmt.Sprintf("invalid operation: %T > %T", a, b)) -} - -func LessOrEqual(a, b interface{}) bool { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) <= int(y) - case uint8: - return int(x) <= int(y) - case uint16: - return int(x) <= int(y) - case uint32: - return int(x) <= int(y) - case uint64: - return int(x) <= int(y) - case int: - return int(x) <= int(y) - case int8: - return int(x) <= int(y) - case int16: - return int(x) <= int(y) - case int32: - return int(x) <= int(y) - case int64: - return int(x) <= int(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) <= float64(y) - case uint8: - return float64(x) <= float64(y) - case uint16: - return float64(x) <= float64(y) - case uint32: - return float64(x) <= float64(y) - case uint64: - return float64(x) <= float64(y) - case int: - return float64(x) <= float64(y) - case int8: - return float64(x) <= float64(y) - case int16: - return float64(x) <= float64(y) - case int32: - return float64(x) <= float64(y) - case int64: - return float64(x) <= float64(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) <= float64(y) - case uint8: - return float64(x) <= float64(y) - case uint16: - return float64(x) <= float64(y) - case uint32: - return float64(x) <= float64(y) - case uint64: - return float64(x) <= float64(y) - case int: - return float64(x) <= float64(y) - case int8: - return float64(x) <= float64(y) - case int16: - return float64(x) <= float64(y) - case int32: - return float64(x) <= float64(y) - case int64: - return float64(x) <= float64(y) - case float32: - return float64(x) <= float64(y) - case float64: - return float64(x) <= float64(y) - } - case string: - switch y := b.(type) { - case string: - return x <= y - } - case time.Time: - switch y := b.(type) { - case time.Time: - return x.Before(y) || x.Equal(y) - } - } - panic(fmt.Sprintf("invalid operation: %T <= %T", a, b)) -} - -func MoreOrEqual(a, b interface{}) bool { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) >= int(y) - case uint8: - return int(x) >= int(y) - case uint16: - return int(x) >= int(y) - case uint32: - return int(x) >= int(y) - case uint64: - return int(x) >= int(y) - case int: - return int(x) >= int(y) - case int8: - return int(x) >= int(y) - case int16: - return int(x) >= int(y) - case int32: - return int(x) >= int(y) - case int64: - return int(x) >= int(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) >= float64(y) - case uint8: - return float64(x) >= float64(y) - case uint16: - return float64(x) >= float64(y) - case uint32: - return float64(x) >= float64(y) - case uint64: - return float64(x) >= float64(y) - case int: - return float64(x) >= float64(y) - case int8: - return float64(x) >= float64(y) - case int16: - return float64(x) >= float64(y) - case int32: - return float64(x) >= float64(y) - case int64: - return float64(x) >= float64(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) >= float64(y) - case uint8: - return float64(x) >= float64(y) - case uint16: - return float64(x) >= float64(y) - case uint32: - return float64(x) >= float64(y) - case uint64: - return float64(x) >= float64(y) - case int: - return float64(x) >= float64(y) - case int8: - return float64(x) >= float64(y) - case int16: - return float64(x) >= float64(y) - case int32: - return float64(x) >= float64(y) - case int64: - return float64(x) >= float64(y) - case float32: - return float64(x) >= float64(y) - case float64: - return float64(x) >= float64(y) - } - case string: - switch y := b.(type) { - case string: - return x >= y - } - case time.Time: - switch y := b.(type) { - case time.Time: - return x.After(y) || x.Equal(y) - } - } - panic(fmt.Sprintf("invalid operation: %T >= %T", a, b)) -} - -func Add(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) + int(y) - case uint8: - return int(x) + int(y) - case uint16: - return int(x) + int(y) - case uint32: - return int(x) + int(y) - case uint64: - return int(x) + int(y) - case int: - return int(x) + int(y) - case int8: - return int(x) + int(y) - case int16: - return int(x) + int(y) - case int32: - return int(x) + int(y) - case int64: - return int(x) + int(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) + float64(y) - case uint8: - return float64(x) + float64(y) - case uint16: - return float64(x) + float64(y) - case uint32: - return float64(x) + float64(y) - case uint64: - return float64(x) + float64(y) - case int: - return float64(x) + float64(y) - case int8: - return float64(x) + float64(y) - case int16: - return float64(x) + float64(y) - case int32: - return float64(x) + float64(y) - case int64: - return float64(x) + float64(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) + float64(y) - case uint8: - return float64(x) + float64(y) - case uint16: - return float64(x) + float64(y) - case uint32: - return float64(x) + float64(y) - case uint64: - return float64(x) + float64(y) - case int: - return float64(x) + float64(y) - case int8: - return float64(x) + float64(y) - case int16: - return float64(x) + float64(y) - case int32: - return float64(x) + float64(y) - case int64: - return float64(x) + float64(y) - case float32: - return float64(x) + float64(y) - case float64: - return float64(x) + float64(y) - } - case string: - switch y := b.(type) { - case string: - return x + y - } - case time.Time: - switch y := b.(type) { - case time.Duration: - return x.Add(y) - } - case time.Duration: - switch y := b.(type) { - case time.Time: - return y.Add(x) - } - } - panic(fmt.Sprintf("invalid operation: %T + %T", a, b)) -} - -func Subtract(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) - int(y) - case uint8: - return int(x) - int(y) - case uint16: - return int(x) - int(y) - case uint32: - return int(x) - int(y) - case uint64: - return int(x) - int(y) - case int: - return int(x) - int(y) - case int8: - return int(x) - int(y) - case int16: - return int(x) - int(y) - case int32: - return int(x) - int(y) - case int64: - return int(x) - int(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) - float64(y) - case uint8: - return float64(x) - float64(y) - case uint16: - return float64(x) - float64(y) - case uint32: - return float64(x) - float64(y) - case uint64: - return float64(x) - float64(y) - case int: - return float64(x) - float64(y) - case int8: - return float64(x) - float64(y) - case int16: - return float64(x) - float64(y) - case int32: - return float64(x) - float64(y) - case int64: - return float64(x) - float64(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) - float64(y) - case uint8: - return float64(x) - float64(y) - case uint16: - return float64(x) - float64(y) - case uint32: - return float64(x) - float64(y) - case uint64: - return float64(x) - float64(y) - case int: - return float64(x) - float64(y) - case int8: - return float64(x) - float64(y) - case int16: - return float64(x) - float64(y) - case int32: - return float64(x) - float64(y) - case int64: - return float64(x) - float64(y) - case float32: - return float64(x) - float64(y) - case float64: - return float64(x) - float64(y) - } - case time.Time: - switch y := b.(type) { - case time.Time: - return x.Sub(y) - } - } - panic(fmt.Sprintf("invalid operation: %T - %T", a, b)) -} - -func Multiply(a, b interface{}) interface{} { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) * int(y) - case uint8: - return int(x) * int(y) - case uint16: - return int(x) * int(y) - case uint32: - return int(x) * int(y) - case uint64: - return int(x) * int(y) - case int: - return int(x) * int(y) - case int8: - return int(x) * int(y) - case int16: - return int(x) * int(y) - case int32: - return int(x) * int(y) - case int64: - return int(x) * int(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) * float64(y) - case uint8: - return float64(x) * float64(y) - case uint16: - return float64(x) * float64(y) - case uint32: - return float64(x) * float64(y) - case uint64: - return float64(x) * float64(y) - case int: - return float64(x) * float64(y) - case int8: - return float64(x) * float64(y) - case int16: - return float64(x) * float64(y) - case int32: - return float64(x) * float64(y) - case int64: - return float64(x) * float64(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) * float64(y) - case uint8: - return float64(x) * float64(y) - case uint16: - return float64(x) * float64(y) - case uint32: - return float64(x) * float64(y) - case uint64: - return float64(x) * float64(y) - case int: - return float64(x) * float64(y) - case int8: - return float64(x) * float64(y) - case int16: - return float64(x) * float64(y) - case int32: - return float64(x) * float64(y) - case int64: - return float64(x) * float64(y) - case float32: - return float64(x) * float64(y) - case float64: - return float64(x) * float64(y) - } - } - panic(fmt.Sprintf("invalid operation: %T * %T", a, b)) -} - -func Divide(a, b interface{}) float64 { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case uint8: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case uint16: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case uint32: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case uint64: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case int: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case int8: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case int16: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case int32: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case int64: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case float32: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - case float64: - switch y := b.(type) { - case uint: - return float64(x) / float64(y) - case uint8: - return float64(x) / float64(y) - case uint16: - return float64(x) / float64(y) - case uint32: - return float64(x) / float64(y) - case uint64: - return float64(x) / float64(y) - case int: - return float64(x) / float64(y) - case int8: - return float64(x) / float64(y) - case int16: - return float64(x) / float64(y) - case int32: - return float64(x) / float64(y) - case int64: - return float64(x) / float64(y) - case float32: - return float64(x) / float64(y) - case float64: - return float64(x) / float64(y) - } - } - panic(fmt.Sprintf("invalid operation: %T / %T", a, b)) -} - -func Modulo(a, b interface{}) int { - switch x := a.(type) { - case uint: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case uint8: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case uint16: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case uint32: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case uint64: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case int: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case int8: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case int16: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case int32: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - case int64: - switch y := b.(type) { - case uint: - return int(x) % int(y) - case uint8: - return int(x) % int(y) - case uint16: - return int(x) % int(y) - case uint32: - return int(x) % int(y) - case uint64: - return int(x) % int(y) - case int: - return int(x) % int(y) - case int8: - return int(x) % int(y) - case int16: - return int(x) % int(y) - case int32: - return int(x) % int(y) - case int64: - return int(x) % int(y) - } - } - panic(fmt.Sprintf("invalid operation: %T %% %T", a, b)) -} diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go deleted file mode 100644 index b2eeb65d83c..00000000000 --- a/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go +++ /dev/null @@ -1,517 +0,0 @@ -package runtime - -//go:generate sh -c "go run ./helpers > ./generated.go" - -import ( - "fmt" - "math" - "reflect" - "strconv" -) - -func Fetch(from, i interface{}) interface{} { - v := reflect.ValueOf(from) - kind := v.Kind() - if kind == reflect.Invalid { - panic(fmt.Sprintf("cannot fetch %v from %T", i, from)) - } - - // Methods can be defined on any type. - if v.NumMethod() > 0 { - if methodName, ok := i.(string); ok { - method := v.MethodByName(methodName) - if method.IsValid() { - return method.Interface() - } - } - } - - // Structs, maps, and slices can be access through a pointer or through - // a value, when they are accessed through a pointer we don't want to - // copy them to a value. - if kind == reflect.Ptr { - v = reflect.Indirect(v) - kind = v.Kind() - } - - // TODO: We can create separate opcodes for each of the cases below to make - // the little bit faster. - switch kind { - case reflect.Array, reflect.Slice, reflect.String: - index := ToInt(i) - if index < 0 { - index = v.Len() + index - } - value := v.Index(index) - if value.IsValid() { - return value.Interface() - } - - case reflect.Map: - var value reflect.Value - if i == nil { - value = v.MapIndex(reflect.Zero(v.Type().Key())) - } else { - value = v.MapIndex(reflect.ValueOf(i)) - } - if value.IsValid() { - return value.Interface() - } else { - elem := reflect.TypeOf(from).Elem() - return reflect.Zero(elem).Interface() - } - - case reflect.Struct: - fieldName := i.(string) - value := v.FieldByNameFunc(func(name string) bool { - field, _ := v.Type().FieldByName(name) - if field.Tag.Get("expr") == fieldName { - return true - } - return name == fieldName - }) - if value.IsValid() { - return value.Interface() - } - } - panic(fmt.Sprintf("cannot fetch %v from %T", i, from)) -} - -type Field struct { - Index []int - Path []string -} - -func FetchField(from interface{}, field *Field) interface{} { - v := reflect.ValueOf(from) - kind := v.Kind() - if kind != reflect.Invalid { - if kind == reflect.Ptr { - v = reflect.Indirect(v) - } - // We can use v.FieldByIndex here, but it will panic if the field - // is not exists. And we need to recover() to generate a more - // user-friendly error message. - // Also, our fieldByIndex() function is slightly faster than the - // v.FieldByIndex() function as we don't need to verify what a field - // is a struct as we already did it on compilation step. - value := fieldByIndex(v, field) - if value.IsValid() { - return value.Interface() - } - } - panic(fmt.Sprintf("cannot get %v from %T", field.Path[0], from)) -} - -func fieldByIndex(v reflect.Value, field *Field) reflect.Value { - if len(field.Index) == 1 { - return v.Field(field.Index[0]) - } - for i, x := range field.Index { - if i > 0 { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - panic(fmt.Sprintf("cannot get %v from %v", field.Path[i], field.Path[i-1])) - } - v = v.Elem() - } - } - v = v.Field(x) - } - return v -} - -type Method struct { - Index int - Name string -} - -func FetchMethod(from interface{}, method *Method) interface{} { - v := reflect.ValueOf(from) - kind := v.Kind() - if kind != reflect.Invalid { - // Methods can be defined on any type, no need to dereference. - method := v.Method(method.Index) - if method.IsValid() { - return method.Interface() - } - } - panic(fmt.Sprintf("cannot fetch %v from %T", method.Name, from)) -} - -func Deref(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i) - - if v.Kind() == reflect.Interface { - if v.IsNil() { - return i - } - v = v.Elem() - } - - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return i - } - indirect := reflect.Indirect(v) - switch indirect.Kind() { - case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice: - default: - v = v.Elem() - } - } - - if v.IsValid() { - return v.Interface() - } - - panic(fmt.Sprintf("cannot dereference %v", i)) -} - -func Slice(array, from, to interface{}) interface{} { - v := reflect.ValueOf(array) - - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - length := v.Len() - a, b := ToInt(from), ToInt(to) - if a < 0 { - a = length + a - } - if b < 0 { - b = length + b - } - if b > length { - b = length - } - if a > b { - a = b - } - value := v.Slice(a, b) - if value.IsValid() { - return value.Interface() - } - - case reflect.Ptr: - value := v.Elem() - if value.IsValid() { - return Slice(value.Interface(), from, to) - } - - } - panic(fmt.Sprintf("cannot slice %v", from)) -} - -func In(needle interface{}, array interface{}) bool { - if array == nil { - return false - } - v := reflect.ValueOf(array) - - switch v.Kind() { - - case reflect.Array, reflect.Slice: - for i := 0; i < v.Len(); i++ { - value := v.Index(i) - if value.IsValid() { - if Equal(value.Interface(), needle) { - return true - } - } - } - return false - - case reflect.Map: - var value reflect.Value - if needle == nil { - value = v.MapIndex(reflect.Zero(v.Type().Key())) - } else { - n := reflect.ValueOf(needle) - if !n.IsValid() { - panic(fmt.Sprintf("cannot use %T as index to %T", needle, array)) - } - value = v.MapIndex(n) - } - if value.IsValid() { - return true - } - return false - - case reflect.Struct: - n := reflect.ValueOf(needle) - if !n.IsValid() || n.Kind() != reflect.String { - panic(fmt.Sprintf("cannot use %T as field name of %T", needle, array)) - } - value := v.FieldByName(n.String()) - if value.IsValid() { - return true - } - return false - - case reflect.Ptr: - value := v.Elem() - if value.IsValid() { - return In(needle, value.Interface()) - } - return false - } - - panic(fmt.Sprintf(`operator "in"" not defined on %T`, array)) -} - -func Len(a interface{}) interface{} { - v := reflect.ValueOf(a) - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Map, reflect.String: - return v.Len() - default: - panic(fmt.Sprintf("invalid argument for len (type %T)", a)) - } -} - -func Negate(i interface{}) interface{} { - switch v := i.(type) { - case float32: - return -v - case float64: - return -v - case int: - return -v - case int8: - return -v - case int16: - return -v - case int32: - return -v - case int64: - return -v - case uint: - return -v - case uint8: - return -v - case uint16: - return -v - case uint32: - return -v - case uint64: - return -v - default: - panic(fmt.Sprintf("invalid operation: - %T", v)) - } -} - -func Exponent(a, b interface{}) float64 { - return math.Pow(ToFloat64(a), ToFloat64(b)) -} - -func MakeRange(min, max int) []int { - size := max - min + 1 - if size <= 0 { - return []int{} - } - rng := make([]int, size) - for i := range rng { - rng[i] = min + i - } - return rng -} - -func ToInt(a interface{}) int { - switch x := a.(type) { - case float32: - return int(x) - case float64: - return int(x) - case int: - return x - case int8: - return int(x) - case int16: - return int(x) - case int32: - return int(x) - case int64: - return int(x) - case uint: - return int(x) - case uint8: - return int(x) - case uint16: - return int(x) - case uint32: - return int(x) - case uint64: - return int(x) - case string: - i, err := strconv.Atoi(x) - if err != nil { - panic(fmt.Sprintf("invalid operation: int(%s)", x)) - } - return i - default: - panic(fmt.Sprintf("invalid operation: int(%T)", x)) - } -} - -func ToInt64(a interface{}) int64 { - switch x := a.(type) { - case float32: - return int64(x) - case float64: - return int64(x) - case int: - return int64(x) - case int8: - return int64(x) - case int16: - return int64(x) - case int32: - return int64(x) - case int64: - return x - case uint: - return int64(x) - case uint8: - return int64(x) - case uint16: - return int64(x) - case uint32: - return int64(x) - case uint64: - return int64(x) - default: - panic(fmt.Sprintf("invalid operation: int64(%T)", x)) - } -} - -func ToFloat64(a interface{}) float64 { - switch x := a.(type) { - case float32: - return float64(x) - case float64: - return x - case int: - return float64(x) - case int8: - return float64(x) - case int16: - return float64(x) - case int32: - return float64(x) - case int64: - return float64(x) - case uint: - return float64(x) - case uint8: - return float64(x) - case uint16: - return float64(x) - case uint32: - return float64(x) - case uint64: - return float64(x) - case string: - f, err := strconv.ParseFloat(x, 64) - if err != nil { - panic(fmt.Sprintf("invalid operation: float(%s)", x)) - } - return f - default: - panic(fmt.Sprintf("invalid operation: float(%T)", x)) - } -} - -func IsNil(v interface{}) bool { - if v == nil { - return true - } - r := reflect.ValueOf(v) - switch r.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: - return r.IsNil() - default: - return false - } -} - -func Abs(x interface{}) interface{} { - switch x.(type) { - case float32: - if x.(float32) < 0 { - return -x.(float32) - } else { - return x - } - case float64: - if x.(float64) < 0 { - return -x.(float64) - } else { - return x - } - case int: - if x.(int) < 0 { - return -x.(int) - } else { - return x - } - case int8: - if x.(int8) < 0 { - return -x.(int8) - } else { - return x - } - case int16: - if x.(int16) < 0 { - return -x.(int16) - } else { - return x - } - case int32: - if x.(int32) < 0 { - return -x.(int32) - } else { - return x - } - case int64: - if x.(int64) < 0 { - return -x.(int64) - } else { - return x - } - case uint: - if x.(uint) < 0 { - return -x.(uint) - } else { - return x - } - case uint8: - if x.(uint8) < 0 { - return -x.(uint8) - } else { - return x - } - case uint16: - if x.(uint16) < 0 { - return -x.(uint16) - } else { - return x - } - case uint32: - if x.(uint32) < 0 { - return -x.(uint32) - } else { - return x - } - case uint64: - if x.(uint64) < 0 { - return -x.(uint64) - } else { - return x - } - } - panic(fmt.Sprintf("invalid argument for abs (type %T)", x)) -} diff --git a/vendor/github.com/antonmedv/expr/vm/vm.go b/vendor/github.com/antonmedv/expr/vm/vm.go deleted file mode 100644 index af4fc5bf755..00000000000 --- a/vendor/github.com/antonmedv/expr/vm/vm.go +++ /dev/null @@ -1,523 +0,0 @@ -package vm - -//go:generate sh -c "go run ./func_types > ./generated.go" - -import ( - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/antonmedv/expr/builtin" - "github.com/antonmedv/expr/file" - "github.com/antonmedv/expr/vm/runtime" -) - -var MemoryBudget int = 1e6 -var errorType = reflect.TypeOf((*error)(nil)).Elem() - -type Function = func(params ...interface{}) (interface{}, error) - -func Run(program *Program, env interface{}) (interface{}, error) { - if program == nil { - return nil, fmt.Errorf("program is nil") - } - - vm := VM{} - return vm.Run(program, env) -} - -type VM struct { - stack []interface{} - ip int - scopes []*Scope - debug bool - step chan struct{} - curr chan int - memory int - memoryBudget int -} - -type Scope struct { - Array reflect.Value - It int - Len int - Count int -} - -func Debug() *VM { - vm := &VM{ - debug: true, - step: make(chan struct{}, 0), - curr: make(chan int, 0), - } - return vm -} - -func (vm *VM) Run(program *Program, env interface{}) (_ interface{}, err error) { - defer func() { - if r := recover(); r != nil { - f := &file.Error{ - Location: program.Locations[vm.ip-1], - Message: fmt.Sprintf("%v", r), - } - if err, ok := r.(error); ok { - f.Wrap(err) - } - err = f.Bind(program.Source) - } - }() - - if vm.stack == nil { - vm.stack = make([]interface{}, 0, 2) - } else { - vm.stack = vm.stack[0:0] - } - - if vm.scopes != nil { - vm.scopes = vm.scopes[0:0] - } - - vm.memoryBudget = MemoryBudget - vm.memory = 0 - vm.ip = 0 - - for vm.ip < len(program.Bytecode) { - if vm.debug { - <-vm.step - } - - op := program.Bytecode[vm.ip] - arg := program.Arguments[vm.ip] - vm.ip += 1 - - switch op { - - case OpPush: - vm.push(program.Constants[arg]) - - case OpPop: - vm.pop() - - case OpLoadConst: - vm.push(runtime.Fetch(env, program.Constants[arg])) - - case OpLoadField: - vm.push(runtime.FetchField(env, program.Constants[arg].(*runtime.Field))) - - case OpLoadFast: - vm.push(env.(map[string]interface{})[program.Constants[arg].(string)]) - - case OpLoadMethod: - vm.push(runtime.FetchMethod(env, program.Constants[arg].(*runtime.Method))) - - case OpLoadFunc: - vm.push(program.Functions[arg]) - - case OpFetch: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Fetch(a, b)) - - case OpFetchField: - a := vm.pop() - vm.push(runtime.FetchField(a, program.Constants[arg].(*runtime.Field))) - - case OpMethod: - a := vm.pop() - vm.push(runtime.FetchMethod(a, program.Constants[arg].(*runtime.Method))) - - case OpTrue: - vm.push(true) - - case OpFalse: - vm.push(false) - - case OpNil: - vm.push(nil) - - case OpNegate: - v := runtime.Negate(vm.pop()) - vm.push(v) - - case OpNot: - v := vm.pop().(bool) - vm.push(!v) - - case OpEqual: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Equal(a, b)) - - case OpEqualInt: - b := vm.pop() - a := vm.pop() - vm.push(a.(int) == b.(int)) - - case OpEqualString: - b := vm.pop() - a := vm.pop() - vm.push(a.(string) == b.(string)) - - case OpJump: - vm.ip += arg - - case OpJumpIfTrue: - if vm.current().(bool) { - vm.ip += arg - } - - case OpJumpIfFalse: - if !vm.current().(bool) { - vm.ip += arg - } - - case OpJumpIfNil: - if runtime.IsNil(vm.current()) { - vm.ip += arg - } - - case OpJumpIfNotNil: - if !runtime.IsNil(vm.current()) { - vm.ip += arg - } - - case OpJumpIfEnd: - scope := vm.Scope() - if scope.It >= scope.Len { - vm.ip += arg - } - - case OpJumpBackward: - vm.ip -= arg - - case OpIn: - b := vm.pop() - a := vm.pop() - vm.push(runtime.In(a, b)) - - case OpLess: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Less(a, b)) - - case OpMore: - b := vm.pop() - a := vm.pop() - vm.push(runtime.More(a, b)) - - case OpLessOrEqual: - b := vm.pop() - a := vm.pop() - vm.push(runtime.LessOrEqual(a, b)) - - case OpMoreOrEqual: - b := vm.pop() - a := vm.pop() - vm.push(runtime.MoreOrEqual(a, b)) - - case OpAdd: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Add(a, b)) - - case OpSubtract: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Subtract(a, b)) - - case OpMultiply: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Multiply(a, b)) - - case OpDivide: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Divide(a, b)) - - case OpModulo: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Modulo(a, b)) - - case OpExponent: - b := vm.pop() - a := vm.pop() - vm.push(runtime.Exponent(a, b)) - - case OpRange: - b := vm.pop() - a := vm.pop() - min := runtime.ToInt(a) - max := runtime.ToInt(b) - size := max - min + 1 - if vm.memory+size >= vm.memoryBudget { - panic("memory budget exceeded") - } - vm.push(runtime.MakeRange(min, max)) - vm.memory += size - - case OpMatches: - b := vm.pop() - a := vm.pop() - match, err := regexp.MatchString(b.(string), a.(string)) - if err != nil { - panic(err) - } - - vm.push(match) - - case OpMatchesConst: - a := vm.pop() - r := program.Constants[arg].(*regexp.Regexp) - vm.push(r.MatchString(a.(string))) - - case OpContains: - b := vm.pop() - a := vm.pop() - vm.push(strings.Contains(a.(string), b.(string))) - - case OpStartsWith: - b := vm.pop() - a := vm.pop() - vm.push(strings.HasPrefix(a.(string), b.(string))) - - case OpEndsWith: - b := vm.pop() - a := vm.pop() - vm.push(strings.HasSuffix(a.(string), b.(string))) - - case OpSlice: - from := vm.pop() - to := vm.pop() - node := vm.pop() - vm.push(runtime.Slice(node, from, to)) - - case OpCall: - fn := reflect.ValueOf(vm.pop()) - size := arg - in := make([]reflect.Value, size) - for i := int(size) - 1; i >= 0; i-- { - param := vm.pop() - if param == nil && reflect.TypeOf(param) == nil { - // In case of nil value and nil type use this hack, - // otherwise reflect.Call will panic on zero value. - in[i] = reflect.ValueOf(¶m).Elem() - } else { - in[i] = reflect.ValueOf(param) - } - } - out := fn.Call(in) - if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() { - panic(out[1].Interface().(error)) - } - vm.push(out[0].Interface()) - - case OpCall0: - out, err := program.Functions[arg]() - if err != nil { - panic(err) - } - vm.push(out) - - case OpCall1: - a := vm.pop() - out, err := program.Functions[arg](a) - if err != nil { - panic(err) - } - vm.push(out) - - case OpCall2: - b := vm.pop() - a := vm.pop() - out, err := program.Functions[arg](a, b) - if err != nil { - panic(err) - } - vm.push(out) - - case OpCall3: - c := vm.pop() - b := vm.pop() - a := vm.pop() - out, err := program.Functions[arg](a, b, c) - if err != nil { - panic(err) - } - vm.push(out) - - case OpCallN: - fn := vm.pop().(Function) - size := arg - in := make([]interface{}, size) - for i := int(size) - 1; i >= 0; i-- { - in[i] = vm.pop() - } - out, err := fn(in...) - if err != nil { - panic(err) - } - vm.push(out) - - case OpCallFast: - fn := vm.pop().(func(...interface{}) interface{}) - size := arg - in := make([]interface{}, size) - for i := int(size) - 1; i >= 0; i-- { - in[i] = vm.pop() - } - vm.push(fn(in...)) - - case OpCallTyped: - fn := vm.pop() - out := vm.call(fn, arg) - vm.push(out) - - case OpArray: - size := vm.pop().(int) - array := make([]interface{}, size) - for i := size - 1; i >= 0; i-- { - array[i] = vm.pop() - } - vm.push(array) - vm.memory += size - if vm.memory >= vm.memoryBudget { - panic("memory budget exceeded") - } - - case OpMap: - size := vm.pop().(int) - m := make(map[string]interface{}) - for i := size - 1; i >= 0; i-- { - value := vm.pop() - key := vm.pop() - m[key.(string)] = value - } - vm.push(m) - vm.memory += size - if vm.memory >= vm.memoryBudget { - panic("memory budget exceeded") - } - - case OpLen: - vm.push(runtime.Len(vm.current())) - - case OpCast: - t := arg - switch t { - case 0: - vm.push(runtime.ToInt(vm.pop())) - case 1: - vm.push(runtime.ToInt64(vm.pop())) - case 2: - vm.push(runtime.ToFloat64(vm.pop())) - } - - case OpDeref: - a := vm.pop() - vm.push(runtime.Deref(a)) - - case OpIncrementIt: - scope := vm.Scope() - scope.It++ - - case OpIncrementCount: - scope := vm.Scope() - scope.Count++ - - case OpGetCount: - scope := vm.Scope() - vm.push(scope.Count) - - case OpGetLen: - scope := vm.Scope() - vm.push(scope.Len) - - case OpPointer: - scope := vm.Scope() - vm.push(scope.Array.Index(scope.It).Interface()) - - case OpBegin: - a := vm.pop() - array := reflect.ValueOf(a) - vm.scopes = append(vm.scopes, &Scope{ - Array: array, - Len: array.Len(), - }) - - case OpEnd: - vm.scopes = vm.scopes[:len(vm.scopes)-1] - - case OpBuiltin: - switch arg { - case builtin.Len: - vm.push(runtime.Len(vm.pop())) - - case builtin.Abs: - vm.push(runtime.Abs(vm.pop())) - - case builtin.Int: - vm.push(runtime.ToInt(vm.pop())) - - case builtin.Float: - vm.push(runtime.ToFloat64(vm.pop())) - - default: - panic(fmt.Sprintf("unknown builtin %v", arg)) - } - - default: - panic(fmt.Sprintf("unknown bytecode %#x", op)) - } - - if vm.debug { - vm.curr <- vm.ip - } - } - - if vm.debug { - close(vm.curr) - close(vm.step) - } - - if len(vm.stack) > 0 { - return vm.pop(), nil - } - - return nil, nil -} - -func (vm *VM) push(value interface{}) { - vm.stack = append(vm.stack, value) -} - -func (vm *VM) current() interface{} { - return vm.stack[len(vm.stack)-1] -} - -func (vm *VM) pop() interface{} { - value := vm.stack[len(vm.stack)-1] - vm.stack = vm.stack[:len(vm.stack)-1] - return value -} - -func (vm *VM) Stack() []interface{} { - return vm.stack -} - -func (vm *VM) Scope() *Scope { - if len(vm.scopes) > 0 { - return vm.scopes[len(vm.scopes)-1] - } - return nil -} - -func (vm *VM) Step() { - vm.step <- struct{}{} -} - -func (vm *VM) Position() chan int { - return vm.curr -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.clang-format b/vendor/github.com/argoproj/argo-workflows/v3/.clang-format deleted file mode 100644 index 2f14c857514..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.clang-format +++ /dev/null @@ -1,2 +0,0 @@ -# Allow unlimited column length, rather than 80. This prevents word-wrapping comments, which end up in Swagger. -ColumnLimit: 0 \ No newline at end of file diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.codecov.yml b/vendor/github.com/argoproj/argo-workflows/v3/.codecov.yml deleted file mode 100644 index dceffa66be6..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.codecov.yml +++ /dev/null @@ -1,17 +0,0 @@ -ignore: -- "**/*.pb.go" -- "**/*.pb.gw.go" -- "**/*generated.go" -- "**/*generated.deepcopy.go" -- "**/*_test.go" -- "pkg/apis/client/.*" -- "pkg/client/.*" -- "vendor/.*" -coverage: - status: - # we've found this not to be useful - patch: off - project: - default: - # allow test coverage to drop by 2%, assume that it's typically due to CI problems - threshold: 2 \ No newline at end of file diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.dockerignore b/vendor/github.com/argoproj/argo-workflows/v3/.dockerignore deleted file mode 100644 index 37c2602814e..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.dockerignore +++ /dev/null @@ -1,22 +0,0 @@ -# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from -# a user's workspace, and are only building off of what is locked by dep. -*.iml -*.md -*.yaml -.github -.idea -.run -assets -community -coverage.out -dist -docs -examples -manifests -plugins -sdks -test/e2e -ui/dist -ui/node_modules -v3 -vendor \ No newline at end of file diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.gitattributes b/vendor/github.com/argoproj/argo-workflows/v3/.gitattributes deleted file mode 100644 index 107ce6a7b95..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -sdks/python/client/** linguist-generated diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.gitignore b/vendor/github.com/argoproj/argo-workflows/v3/.gitignore deleted file mode 100644 index 012b8b06807..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.gitignore +++ /dev/null @@ -1,46 +0,0 @@ -Pipfile -.vscode/ -.idea/ -.node-version -.DS_Store -vendor/ -dist/ -# delve debug binaries -cmd/**/debug -hack/**/debug -/argo -/argoexec -release-notes -debug.test -git-ask-pass.sh -*.iml -/coverage.out -.envrc -/.vendor-new -/kustomize -/workflow-controller -/.scannerwork/ -/test-results/ -/package-lock.json -/pkg/apiclient/_.secondary.swagger.json -/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json -/pkg/apiclient/cronworkflow/cron-workflow.swagger.json -/pkg/apiclient/event/event.swagger.json -/pkg/apiclient/eventsource/eventsource.swagger.json -/pkg/apiclient/info/info.swagger.json -/pkg/apiclient/pipeline/pipeline.swagger.json -/pkg/apiclient/sensor/sensor.swagger.json -/pkg/apiclient/workflow/workflow.swagger.json -/pkg/apiclient/workflowarchive/workflow-archive.swagger.json -/pkg/apiclient/workflowtemplate/workflow-template.swagger.json -/site/ -/.brew_home -/go-diagrams/ -/.run/ -sdks/python/client/dist/* -/v3/ -/cmd/argoexec/commands/test.txt - -# Do not commit rendered installation manifests since they are misleading to users. -manifests/install.yaml -manifests/namespace-install.yaml diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.golangci.yml b/vendor/github.com/argoproj/argo-workflows/v3/.golangci.yml deleted file mode 100644 index f0b13cf5ee5..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.golangci.yml +++ /dev/null @@ -1,67 +0,0 @@ -# https://golangci-lint.run/usage/quick-start/ -run: - timeout: 8m - skip-dirs: - - dist - - docs - - examples - - hack - - manifests - - pkg/client - - sdks - - ui - - vendor - skip-files: - - server/static/files.go - build-tags: - - api - - cli - - cron - - executor - - examples - - corefunctional - - functional - - plugins -linters: - enable: - - bodyclose - - errcheck - - goimports - # only minor issues - # - errorlint - - exportloopref - # seems to have bugs in recent version, also slow - # - gci - - gosec - - gosimple - - govet - - ineffassign - - misspell - - rowserrcheck - - sqlclosecheck - - staticcheck - - typecheck - - unparam - - unused -linters-settings: - goimports: - local-prefixes: github.com/argoproj/argo-workflows/ - gosec: - includes: - - G304 - - G307 - excludes: - # G106: Use of ssh InsecureIgnoreHostKey should be audited - - G106 - # G402: TLS InsecureSkipVerify set true - - G402 - # G601: Implicit memory aliasing in for loop. - - G601 -issues: - exclude: - # We are leaving io/ioutil for now to make backports easier - # https://github.com/argoproj/argo-workflows/pull/6964#issuecomment-946827019 - - "SA1019: \"io/ioutil\" has been deprecated since Go 1.16" - exclude-rules: - - path: server/artifacts/artifact_server_test.go - text: "response body must be closed" diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.markdownlint.yaml b/vendor/github.com/argoproj/argo-workflows/v3/.markdownlint.yaml deleted file mode 100644 index 261ef7e6517..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.markdownlint.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# not fix for line length -MD013: false -# mkdocs uses 4 spaces indent -MD007: - indent: 4 diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.mlc_config.json b/vendor/github.com/argoproj/argo-workflows/v3/.mlc_config.json deleted file mode 100644 index 946725d2fec..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.mlc_config.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": ".*localhost.*" - } - ], - "aliveStatusCodes": [ - 200, - 429 - ] -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/.spelling b/vendor/github.com/argoproj/argo-workflows/v3/.spelling deleted file mode 100644 index c009b517fab..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/.spelling +++ /dev/null @@ -1,219 +0,0 @@ -# markdown-spellcheck spelling configuration file -# Format - lines beginning # are comments -# global dictionary is at the start, file overrides afterwards -# one word per line, to define a file override use ' - filename' -# where filename is relative to this configuration file -000s -0s -100Mi -100s -10h -10s -120s -120sec -1Gi -1Mi -1h -1m -2Gi -2h -30s -3min -3s -4Gi -4xx -512Mi -5m -5xx -8Ki -90m -Alexandre -Alibaba -Ang -Anthos -ArgoLabs -Artifactory -BlackRock -Breitgand -Couler -DataDog -Dataflow -DeleteObject -DevOps -Dex -EtcD -EventRouter -FailFast -GSoC -GitOps -Github -Golang -Grafana -Grammarly -Hadoop -Heptio -Homebrew -InsideBoard -Invocators -Istio -J.P. -Jemison -JetBrains -KNative -Katacoda -Kerberos -Killercoda -KubectlExec -Kubeflow -Kustomize -Lifecycle-Hook -LitmusChaos -metadata -MLOps -MinIO -Minikube -MySQL -Nagal -Nano -Nginx -Node.JS. -OAuth -OAuth2 -Okta -parameterize -parameterized -parameterizing -PDBs -PProf -PVCs -Peixuan -Ploomber -Postgres -Roadmap -RoleBinding -s3 -SDKs -Sharding -Singer.io -Snyk -Sumit -Tekton -Tianchu -Traefik -TripAdvisor -VSCode -Valasek -Webhooks -Welch -`CronTab` -`OnFailure` -a.m. -alexec -anded -apis -architecting -argo -args -async -auth -backend -blkperl -boolean -booleans -buildkit -config -cpu -cron -daemoned -dev-container -dinever -dropdown -e.g. -e2e -entrypoint -enum -env -errored -expr -fibonacci -finalizer -govaluate -gzipped -i.e. -instantiator -instantiators -jenkins -k3d -k3s -k8s-jobs -kube -kubelet -kubernetes -liveness -localhost -memoization -memoized -memoizing -mentee -mentees -minikube -mutex -namespace -namespaces -natively -p.m. -params -pre-commit -rc2 -repo -roadmap -runtime -runtimes -sandboxed -sarabala1979 -simster7 -stateful -stderr -tczhao -terrytangyuan -themself -un-reconciled -untracked -v1 -v1.0 -v1.1 -v1.2 -v1.3 -v1.24 -v2 -v2.10 -v2.11 -v2.12 -v2.35.0 -v2.4 -v2.5 -v2.6 -v2.7 -v2.7.2 -v2.8 -v2.9 -v3.0 -v3.0.0 -v3.1 -v3.1.4 -v3.2 -v3.2. -v3.3 -v3.3. -v3.4 -v3.4. -validator -versioning -webHDFS -webhook -webhooks -workflow-controller-configmap -yaml -idempotence -kube-scheduler -kube-apiserver diff --git a/vendor/github.com/argoproj/argo-workflows/v3/CHANGELOG.md b/vendor/github.com/argoproj/argo-workflows/v3/CHANGELOG.md deleted file mode 100644 index 937b937401b..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/CHANGELOG.md +++ /dev/null @@ -1,7175 +0,0 @@ -# Changelog - -## v3.4.2 (2022-10-22) - - * [b00550f7b](https://github.com/argoproj/argo-workflows/commit/b00550f7bae3938d324ce2857019529d61382d84) chore(deps): bump github.com/prometheus/client_model from 0.2.0 to 0.3.0 (#9885) - * [a6e5b6ce7](https://github.com/argoproj/argo-workflows/commit/a6e5b6ce78acd210f6d8f42439948ac771084db8) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.41 to 7.0.42 (#9886) - * [c81b07145](https://github.com/argoproj/argo-workflows/commit/c81b071455c7850ae33ff842bf35275ef44a4065) chore(deps): bump github.com/valyala/fasttemplate from 1.2.1 to 1.2.2 (#9887) - * [ec5162983](https://github.com/argoproj/argo-workflows/commit/ec5162983fd5e3032e5d3162245eab28e41b694b) fix: P/R/C reporting in argo list -o wide. Fixes #9281 (#9874) - * [6c432d2c9](https://github.com/argoproj/argo-workflows/commit/6c432d2c980bd37be28ebb22d1e83b176993ce38) fix: upgrade python openapiclient version, fixes #9770 (#9840) - * [36646ef81](https://github.com/argoproj/argo-workflows/commit/36646ef81cb4775c1ef31861f01331bc75166e7b) fix: Support Kubernetes v1.24. Fixes #8320 (#9620) - * [05e1425f8](https://github.com/argoproj/argo-workflows/commit/05e1425f857264076e0de29124d4fbf74b4107b4) fix(server&ui): can't fetch inline artifact. Fixes #9817 (#9853) - * [ce3172804](https://github.com/argoproj/argo-workflows/commit/ce31728046cbfe0a58bfd31e20e63c7edec25437) feat(ui): Display detailed Start/End times in workflow-node-info. Fixes #7920 (#9834) - * [b323bb1e5](https://github.com/argoproj/argo-workflows/commit/b323bb1e570a6cbd347942bbce82e25a05c4ca92) fix(ui): view manifest error on inline node. Fixes #9841 (#9842) - * [9237a72f7](https://github.com/argoproj/argo-workflows/commit/9237a72f7999f375279d054232028e4931d737f3) fix(ui): containerset archive log query params. Fixes #9669 (#9833) - * [a752a583a](https://github.com/argoproj/argo-workflows/commit/a752a583a5b9295fddae5c2978ea5f4cee2687d2) fix: exit code always be '0' in windows container. Fixes #9797 (#9807) - * [af8347c36](https://github.com/argoproj/argo-workflows/commit/af8347c36d305a56c7c1355078b410f97e2ed3d5) chore(deps): Bump github.com/TwiN/go-color from v1.1.0 to v1.2.0 (#9794) - * [102c3ec22](https://github.com/argoproj/argo-workflows/commit/102c3ec22118a49ccfa75b9c3878d62057afb441) fix: migrated from distribution@v2.8.0 to distribution@v2.8.1. Fixes #9850 (#9851) - * [d4a907411](https://github.com/argoproj/argo-workflows/commit/d4a907411a7105ffda52a284e1059c6de9829bcf) fix: trigger startup.sh at devcontainer startup instead of create (#9831) - * [b7f9071d0](https://github.com/argoproj/argo-workflows/commit/b7f9071d0a5c57e8e6dfe2638dfc6dacca2af6cf) chore(deps-dev): bump @babel/preset-env from 7.19.3 to 7.19.4 in /ui (#9829) - * [9b9abf9ea](https://github.com/argoproj/argo-workflows/commit/9b9abf9eab7cc7ffdf27aabe4fb8d8d998bf42e7) chore(deps-dev): bump babel-jest from 29.1.2 to 29.2.0 in /ui (#9828) - * [ca750d056](https://github.com/argoproj/argo-workflows/commit/ca750d056db8d2d4005cf2f1dadb32e79be9b76a) chore(deps): bump github.com/TwiN/go-color from 1.1.0 to 1.2.0 (#9822) - * [593eab25c](https://github.com/argoproj/argo-workflows/commit/593eab25cade9f2a5b71fdef028d3886ff5e0e3c) chore(deps): bump google.golang.org/api from 0.98.0 to 0.99.0 (#9823) - * [1670dca60](https://github.com/argoproj/argo-workflows/commit/1670dca6092b51781ed5e1f2d2522b0c0bca0ced) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.40 to 7.0.41 (#9825) - * [e838214ed](https://github.com/argoproj/argo-workflows/commit/e838214ed452d4bff528da4a7a2f101ebf324277) chore(deps): bump cronstrue from 2.12.0 to 2.14.0 in /ui (#9826) - * [7d2081830](https://github.com/argoproj/argo-workflows/commit/7d2081830b8b77de37429958b7968d7073ef5f0c) chore(deps): bump nick-fields/retry from 2.8.1 to 2.8.2 (#9820) - * [f6a8b0130](https://github.com/argoproj/argo-workflows/commit/f6a8b0130dccb5a773fc52fd46354f8537d022cb) fix: SDK CI workflow (#9609) - * [faa0294f5](https://github.com/argoproj/argo-workflows/commit/faa0294f5c29fa10800f94677c21b7180d9b3da4) fix: fixed url encoded link template (#9792) - * [ebae212d7](https://github.com/argoproj/argo-workflows/commit/ebae212d709f039823737b495437c14898690376) fix(ui): missing url href formatting in template link. Fixes #9764 (#9790) - * [d4817efff](https://github.com/argoproj/argo-workflows/commit/d4817efffad2a1d96374f69f3b547bf3f9d758a9) fix: fix iam permissions to retrieve logs from aws s3 (#9798) - * [aa59b4374](https://github.com/argoproj/argo-workflows/commit/aa59b43748f78e599709add871af7ec14e1fd3c1) fix: enable when expressions to use expr; add new json variables to avoid expr conflicts (#9761) - * [0fc883a41](https://github.com/argoproj/argo-workflows/commit/0fc883a41c81c533c57ec64ca8c19279b38e60ec) fix: avoid nil pointer dereference. Fixes #9269 (#9787) - * [cd43bba6c](https://github.com/argoproj/argo-workflows/commit/cd43bba6c87d185bd1530c03c99b874eeceba966) fix: Send workflow UID to plugins. Fixes #8573 (#9784) - * [514aa050c](https://github.com/argoproj/argo-workflows/commit/514aa050cab63bba8e6af20700ad4aa7ed53bfd4) feat(server): server logs to be structured and add more log error #2308 (#9779) - * [f27fe08b1](https://github.com/argoproj/argo-workflows/commit/f27fe08b1b06ee86040371b5fa992b82b27d7980) fix: default not respected in setting global configmap params. Fixes #9745 (#9758) - * [dc48c8cf1](https://github.com/argoproj/argo-workflows/commit/dc48c8cf12eccb1cc447a4f9a32e1c7dfc4f93da) fix: Set scheduling constraints to the agent pod by the workflow. Fixes #9704 (#9771) - * [f767f39d8](https://github.com/argoproj/argo-workflows/commit/f767f39d86acb549ef29d8196f067280683afd4d) fix: artifactory not working. Fixes #9681 (#9782) - * [1fc6460fa](https://github.com/argoproj/argo-workflows/commit/1fc6460fa16b157b0d333b96d6d93b7d273ed91a) fix: Log early abort. Fixes #9573 (#9575) - * [f1bab8947](https://github.com/argoproj/argo-workflows/commit/f1bab8947c44f9fc0483dc6489b098e04e0510f7) fix: a WorkflowTemplate doesn't need to define workflow-level input p… (#9762) - * [b12b5f987](https://github.com/argoproj/argo-workflows/commit/b12b5f9875b2a070bbcb0a3a16154495c196e6b2) fix: SSO integration not considering HTTP_PROXY when making requests. Fixes #9259 (#9760) - * [529dc0fec](https://github.com/argoproj/argo-workflows/commit/529dc0fec443cd33171d32e7f798ceeaddef1587) feat(ui): add v3.4 feature info (#9777) - * [a8e37e9be](https://github.com/argoproj/argo-workflows/commit/a8e37e9bea5d586f8b1811fcbb8df668d00bdb31) fix: Concurrent map read and map write in agent. Fixes #9685 (#9689) - * [1bbdf0d2a](https://github.com/argoproj/argo-workflows/commit/1bbdf0d2ad5a74832ecff5a6e13a758bdf54e909) feat: Added workflow summary to workflow-list page. (#9693) - * [82201d521](https://github.com/argoproj/argo-workflows/commit/82201d521d91cfa2926584864edbdc8a15e9a5ad) chore(deps): bump cronstrue from 2.11.0 to 2.12.0 in /ui (#9774) - * [d7febc928](https://github.com/argoproj/argo-workflows/commit/d7febc92818fa2cbee5eb32cbf6169beb739673d) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.39 to 7.0.40 (#9773) - * [d64b8d397](https://github.com/argoproj/argo-workflows/commit/d64b8d3976c4cd7592b9433be20547a80f28e289) fix: quick-start-* manifests pointing to invalid httpbin image tag. Fixes #9659 (#9759) - * [de4ea2d51](https://github.com/argoproj/argo-workflows/commit/de4ea2d51262d86f8806fbb710c6b3ae14b24c7f) fix: `value` is required when parameter is of type `enum` (#9753) - * [2312cc9ca](https://github.com/argoproj/argo-workflows/commit/2312cc9ca4f26f06ccc107a10013ea903c10ec15) Revert "Add --tls-certificate-secret-name parameter to server command. Fixes #5582" (#9756) - * [d9d1968de](https://github.com/argoproj/argo-workflows/commit/d9d1968de80fa0ee19a5e46ceea5d2b4cf4b5475) fix: node links on UI use podName instead of workflow name (#9740) - * [4fa3d1f37](https://github.com/argoproj/argo-workflows/commit/4fa3d1f37eeec285008e4c23dd50e019c5e41b64) chore(deps-dev): bump @babel/core from 7.19.1 to 7.19.3 in /ui (#9723) - * [cf06067c8](https://github.com/argoproj/argo-workflows/commit/cf06067c898bb87c356fc6fc6d2ba5b203ca5df2) chore(deps-dev): bump @babel/preset-env from 7.19.1 to 7.19.3 in /ui (#9728) - * [b5bef026f](https://github.com/argoproj/argo-workflows/commit/b5bef026ff80bd0c97ffaed51040e59a16c69b66) chore(deps-dev): bump babel-jest from 29.0.3 to 29.1.2 in /ui (#9726) - * [9ac6df02e](https://github.com/argoproj/argo-workflows/commit/9ac6df02e7253df5e0764d6f29bda1ac1bdbb071) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.37 to 7.0.39 (#9721) - * [0b957c128](https://github.com/argoproj/argo-workflows/commit/0b957c1289fd6c04b8c0f63ab18463de9074ac91) chore(deps): bump github.com/argoproj/argo-events from 1.7.2 to 1.7.3 (#9722) - * [e547c72f7](https://github.com/argoproj/argo-workflows/commit/e547c72f7956cb39471f3c523210c79cf05b3775) chore(deps): bump dependabot/fetch-metadata from 1.3.3 to 1.3.4 (#9718) - * [4ba1a0f9b](https://github.com/argoproj/argo-workflows/commit/4ba1a0f9bcfc2a5cd6dd246b4b4635e2d8cecf6d) chore(deps): bump google.golang.org/api from 0.97.0 to 0.98.0 (#9719) - -### Contributors - - * Aditya Shrivastava - * Alex Collins - * Andrii Chubatiuk - * Anil Kumar - * Dillen Padhiar - * Isitha Subasinghe - * Julie Vogelman - * Lukas Heppe - * Ricardo Rosales - * Rohan Kumar - * Saravanan Balasubramanian - * Shadow W - * Takumi Sue - * Tianchu Zhao - * TwiN - * Vũ Hải Lâm - * Yuan Tang - * alexdittmann - * botbotbot - * chen yangxue - * dependabot[bot] - * jibuji - -## v3.4.1 (2022-09-30) - - * [365b6df16](https://github.com/argoproj/argo-workflows/commit/365b6df1641217d1b21b77bb1c2fcb41115dd439) fix: Label on Artifact GC Task no longer exceeds max characters (#9686) - * [0851c36d8](https://github.com/argoproj/argo-workflows/commit/0851c36d8638833b9ecfe0125564e5635641846f) fix: Workflow-controller panic when stop a wf using plugin. Fixes #9587 (#9690) - * [2f5e7534c](https://github.com/argoproj/argo-workflows/commit/2f5e7534c44499a9efce51d12ff87f8c3f725a21) fix: ordering of functionality for setting and evaluating label expressions (#9661) - * [4e34979e1](https://github.com/argoproj/argo-workflows/commit/4e34979e1b132439fe1101a23b46e24a62c0368d) chore(deps): bump argo-events to 1.7.2 (#9624) - * [f0016e054](https://github.com/argoproj/argo-workflows/commit/f0016e054ec32505dcd7f7d610443ad380fc6651) fix: Remove LIST_LIMIT in workflow informer (#9700) - * [e08524d2a](https://github.com/argoproj/argo-workflows/commit/e08524d2acbd474f232f958e711d04d8919681e8) fix: Avoid controller crashes when running large number of workflows (#9691) - * [4158cf11a](https://github.com/argoproj/argo-workflows/commit/4158cf11ad2e5837a76d1194a99b38e6d66f7dd0) Adding Splunk as Argo Workflows User (#9697) - * [d553c9186](https://github.com/argoproj/argo-workflows/commit/d553c9186c761da16a641885a6de8f7fdfb42592) chore(deps-dev): bump sass from 1.54.9 to 1.55.0 in /ui (#9675) - * [ff6aab34e](https://github.com/argoproj/argo-workflows/commit/ff6aab34ecbb5c0de26e36108cd1201c1e1ae2f5) Add --tls-certificate-secret-name parameter to server command. Fixes #5582 (#9423) - * [84c19ea90](https://github.com/argoproj/argo-workflows/commit/84c19ea909cbc5249f684133dcb5a8481a533dab) fix: render template vars in DAGTask before releasing lock.. Fixes #9395 (#9405) - * [b214161b3](https://github.com/argoproj/argo-workflows/commit/b214161b38642da75a38a100548d3809731746ff) fix: add authorization from cookie to metadata (#9663) - * [b219d85ab](https://github.com/argoproj/argo-workflows/commit/b219d85ab57092b37b0b26f9f7c4cfbf5a9bea9a) fix: retry ExecutorPlugin invocation on transient network errors Fixes: #9664 (#9665) - * [b96d446d6](https://github.com/argoproj/argo-workflows/commit/b96d446d666f704ba102077404bf0b7c472c1494) fix: Improve semaphore concurrency performance (#9666) - * [38b55e39c](https://github.com/argoproj/argo-workflows/commit/38b55e39cca03e54da1f38849b066b36e03ba240) fix: sh not available in scratch container but used in argoexec. Fixes #9654 (#9679) - * [67fc0acab](https://github.com/argoproj/argo-workflows/commit/67fc0acabc4a03f374195246b362b177893866b1) chore(deps): bump golangci-lint to v1.49.0 (#9639) - * [9d7450139](https://github.com/argoproj/argo-workflows/commit/9d74501395fd715e2eb364e9f011b0224545d9ce) chore(deps-dev): bump webpack-dev-server from 4.11.0 to 4.11.1 in /ui (#9677) - * [56454d0c8](https://github.com/argoproj/argo-workflows/commit/56454d0c8d8d4909e23f0938e561ad2bdb02cef2) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.36 to 7.0.37 (#9673) - * [49c47cbad](https://github.com/argoproj/argo-workflows/commit/49c47cbad0408adaf1371da36c3ece340fdecd65) chore(deps): bump cloud.google.com/go/storage from 1.26.0 to 1.27.0 (#9672) - * [e6eb02fb5](https://github.com/argoproj/argo-workflows/commit/e6eb02fb529b7952227dcef091853edcf20f8248) fix: broken archived workflows ui. Fixes #9614, #9433 (#9634) - * [e556fe3eb](https://github.com/argoproj/argo-workflows/commit/e556fe3eb355bf9ef31a1ef8b057c680a5c24f06) fix: Fixed artifact retrieval when templateRef in use. Fixes #9631, #9644. (#9648) - * [72d3599b9](https://github.com/argoproj/argo-workflows/commit/72d3599b9f75861414475a39950879bddbc4e154) fix: avoid panic when not passing AuthSupplier (#9586) - * [4e430ecd8](https://github.com/argoproj/argo-workflows/commit/4e430ecd88d26c89b0fa38b7962d40dd09e9695e) chore(deps-dev): bump @babel/preset-env from 7.19.0 to 7.19.1 in /ui (#9605) - * [4ab943528](https://github.com/argoproj/argo-workflows/commit/4ab943528c8e1b510549e9c860c03adb8893e96b) chore(deps): bump google.golang.org/api from 0.95.0 to 0.96.0 (#9600) - * [7d3432899](https://github.com/argoproj/argo-workflows/commit/7d3432899890a84a2e745932a2f88ef53e75282a) chore(deps-dev): bump babel-jest from 29.0.2 to 29.0.3 in /ui (#9604) - -### Contributors - - * Adam - * Brian Loss - * Dakota Lillie - * Jesse Suen - * Julie Vogelman - * Rohan Kumar - * Seokju Hong - * Takumi Sue - * Vladimir Ivanov - * William Van Hevelingen - * Yuan Tang - * chen yangxue - * dependabot[bot] - * emagana - * jsvk - -## v3.4.0 (2022-09-18) - - * [047952afd](https://github.com/argoproj/argo-workflows/commit/047952afd539d06cae2fd6ba0b608b19c1194bba) fix: SDK workflow file - * [97328f1ed](https://github.com/argoproj/argo-workflows/commit/97328f1ed3885663b780f43e6b553208ecba4d3c) chore(deps): bump classnames and @types/classnames in /ui (#9603) - * [2dac194a5](https://github.com/argoproj/argo-workflows/commit/2dac194a52acb46c5535e5f552fdf7fd520d0f4e) chore(deps-dev): bump @babel/core from 7.19.0 to 7.19.1 in /ui (#9602) - * [47544cc02](https://github.com/argoproj/argo-workflows/commit/47544cc02a8663b5b69e4c213a382ff156deb63e) feat: Support retrying complex workflows with nested group nodes (#9499) - * [30bd96b4c](https://github.com/argoproj/argo-workflows/commit/30bd96b4c030fb728a3da78e0045982bf778d554) fix: Error message if cronworkflow failed to update (#9583) - * [fc5e11cd3](https://github.com/argoproj/argo-workflows/commit/fc5e11cd37f51e36517f7699c23afabac4f08528) chore(deps-dev): bump webpack-dev-server from 4.10.1 to 4.11.0 in /ui (#9567) - * [ace179804](https://github.com/argoproj/argo-workflows/commit/ace179804996edc0d356bff257a980e60b9bc5a0) docs(dev-container): Fix buildkit doc for local dev (#9580) - -### Contributors - - * JM - * Saravanan Balasubramanian - * Yuan Tang - * dependabot[bot] - -## v3.4.0-rc4 (2022-09-10) - - * [dee4ea5b0](https://github.com/argoproj/argo-workflows/commit/dee4ea5b0be2408e13af7745db910d0130e578f2) chore(deps-dev): bump @babel/core from 7.18.13 to 7.19.0 in /ui (#9566) - * [8172b493d](https://github.com/argoproj/argo-workflows/commit/8172b493d649c20b0b72ae56cf5b69bd2fa5ed8d) chore(deps-dev): bump sass from 1.54.8 to 1.54.9 in /ui (#9565) - * [68a793586](https://github.com/argoproj/argo-workflows/commit/68a793586ed8154f71d156e9daa8055e7ea8492e) chore(deps-dev): bump @babel/preset-env from 7.18.10 to 7.19.0 in /ui (#9562) - * [e1d8387fa](https://github.com/argoproj/argo-workflows/commit/e1d8387fa7a9c0648c548e2809f61eb77a802537) chore(deps-dev): bump babel-jest from 29.0.1 to 29.0.2 in /ui (#9564) - * [3950f8c1c](https://github.com/argoproj/argo-workflows/commit/3950f8c1c12ff7451b3e1be96b2ba108025a9677) chore(deps): bump google.golang.org/api from 0.94.0 to 0.95.0 (#9561) - * [8310bdbc9](https://github.com/argoproj/argo-workflows/commit/8310bdbc9d07f87640d944b949e465a044148368) chore(deps): bump github.com/coreos/go-oidc/v3 from 3.3.0 to 3.4.0 (#9560) - * [baaa8d0a9](https://github.com/argoproj/argo-workflows/commit/baaa8d0a9e90f5234ce7d02cbc33f8756a3ad4da) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.35 to 7.0.36 (#9558) - * [aab923452](https://github.com/argoproj/argo-workflows/commit/aab92345267e9e0562ee8495f49ac6d80e06ae28) chore(deps): bump github.com/spf13/viper from 1.12.0 to 1.13.0 (#9559) - * [ec7c210c9](https://github.com/argoproj/argo-workflows/commit/ec7c210c9743d8f85d528d5593bc7390d73ff534) fix: use urlencode instead of htmlencode to sanitize url (#9538) - * [3a3f15997](https://github.com/argoproj/argo-workflows/commit/3a3f1599718453ca79800cfc28f6631ee780911b) fix: enable workflow-aggregate-roles to treat workflowtaskresults. Fixes #9545 (#9546) - * [9d66b69f0](https://github.com/argoproj/argo-workflows/commit/9d66b69f0bca92d7ef0c9aa67e87a2e334797530) fix: for pod that's been GC'ed we need to get the log from the artifact (#9540) - * [34a4e48c3](https://github.com/argoproj/argo-workflows/commit/34a4e48c3f412ba89cd0491469d13a14fdaf51b3) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.34 to 7.0.35 (#9502) - * [ef6bd5710](https://github.com/argoproj/argo-workflows/commit/ef6bd5710e5780afe40321f4d384471d9e02197c) fix: Capture exit code of signaled containers. Fixes #9415 (#9523) - * [6e2f15f9e](https://github.com/argoproj/argo-workflows/commit/6e2f15f9eea82f1344f139800869f9e7fd255b04) feat: added support for DAG task name as variables in workflow templates (#9387) - * [f27475feb](https://github.com/argoproj/argo-workflows/commit/f27475feb850dc43e07c3c5215cc9638947f0859) fix: default to 'main' container in Sensor logs. Fixes #9459 (#9438) - * [c00fbf88f](https://github.com/argoproj/argo-workflows/commit/c00fbf88f15104673b05ba5e109a72fed84dd38e) feat: Add node ID to node info panel (#9500) - * [2a80a2c1a](https://github.com/argoproj/argo-workflows/commit/2a80a2c1a9b0a2370f547492ef9168ee583077f5) fix: revert accidental commit in UI logs viewer (#9515) - * [b9d02cfd5](https://github.com/argoproj/argo-workflows/commit/b9d02cfd59c72b2bc8e437e6591ca4a145a3eb9b) chore(deps): bump cloud.google.com/go/storage from 1.25.0 to 1.26.0 (#9506) - * [bd9fc66c5](https://github.com/argoproj/argo-workflows/commit/bd9fc66c52c8e14123e5d7a4a7829023a072da9f) chore(deps-dev): bump @fortawesome/fontawesome-free from 6.1.2 to 6.2.0 in /ui (#9513) - * [9004f5e26](https://github.com/argoproj/argo-workflows/commit/9004f5e263a4ead8a5be4a4a09db03064eb1d453) chore(deps): bump google.golang.org/api from 0.93.0 to 0.94.0 (#9505) - * [605b0a0eb](https://github.com/argoproj/argo-workflows/commit/605b0a0eb3413107e2e87d6f3399d6b5f2778727) chore(deps-dev): bump sass from 1.54.5 to 1.54.8 in /ui (#9514) - * [6af53eff3](https://github.com/argoproj/argo-workflows/commit/6af53eff34180d9d238ba0fd0cb5a5b9b57b15a5) chore(deps-dev): bump babel-jest from 28.1.3 to 29.0.1 in /ui (#9512) - * [a2c20d70e](https://github.com/argoproj/argo-workflows/commit/a2c20d70e8885937532055b8c2791799020057ec) chore(deps): bump react-monaco-editor from 0.49.0 to 0.50.1 in /ui (#9509) - * [041d1382d](https://github.com/argoproj/argo-workflows/commit/041d1382d0a22a8bb88e88486f79c6b4bb6dfc8d) chore(deps-dev): bump webpack-dev-server from 4.10.0 to 4.10.1 in /ui (#9510) - * [7f9a15e77](https://github.com/argoproj/argo-workflows/commit/7f9a15e77eaa84d7f5474d28e30e52a77ca76b2e) chore(deps-dev): bump @babel/core from 7.18.10 to 7.18.13 in /ui (#9507) - * [08963c468](https://github.com/argoproj/argo-workflows/commit/08963c4680353a0b4e0abf16f0590a66b8dd4b3e) chore(deps-dev): bump @types/dagre from 0.7.47 to 0.7.48 in /ui (#9508) - * [1b09c8641](https://github.com/argoproj/argo-workflows/commit/1b09c8641ad11680b90dba582b3eae98dcee01c3) chore(deps): bump github.com/coreos/go-oidc/v3 from 3.2.0 to 3.3.0 (#9504) - * [4053ddf08](https://github.com/argoproj/argo-workflows/commit/4053ddf081755df8819a4a33ce558c92235ea81d) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk from 2.2.4+incompatible to 2.2.5+incompatible (#9503) - * [06d295752](https://github.com/argoproj/argo-workflows/commit/06d29575210d7b61ca7c7f2fb8e28fdd6c3d5637) feat: log format option for main containers (#9468) - -### Contributors - - * Alex Collins - * Julie Vogelman - * Rohan Kumar - * Takao Shibata - * Thomas Bonfort - * Tianchu Zhao - * Yuan Tang - * dependabot[bot] - * jsvk - -## v3.4.0-rc3 (2022-08-31) - - * [b941fbcab](https://github.com/argoproj/argo-workflows/commit/b941fbcaba087d5c5569573d1ef1a027313174ce) feat: improve e2e test for ArtifactGC (#9448) - * [94608d1dd](https://github.com/argoproj/argo-workflows/commit/94608d1ddc8781a55563f52ea65476dc99a54f94) feat: added support for artifact GC on GCS (#9420) - * [26ab0aed8](https://github.com/argoproj/argo-workflows/commit/26ab0aed8ba19571ffe3a2b048fcb43cbd1986e3) fix: link to "get artifacts from logs" was assuming Node ID was equal to Pod Name (#9464) - * [9cce91ea0](https://github.com/argoproj/argo-workflows/commit/9cce91ea0ca748cb35bd653c6f401d1aed97e6e8) Update USERS.md (#9471) - * [7118e1224](https://github.com/argoproj/argo-workflows/commit/7118e1224283ecb894794fdd72526089409e1476) feat: support slash in synchronization lock names. Fixes #9394 (#9404) - * [ff4109928](https://github.com/argoproj/argo-workflows/commit/ff4109928bd09a1b1d716cbdf82bd3ca132276d1) fix: Descendants of suspended nodes need to be removed when retrying workflow (#9440) - * [a09172afa](https://github.com/argoproj/argo-workflows/commit/a09172afafdb98ab362058618b5dc61980f0254e) fix: Incorrect alignment for archived workflow. Fixes #9433 (#9439) - * [04d19435c](https://github.com/argoproj/argo-workflows/commit/04d19435cb07e8815f1f95cca6751f8ce6b4bec1) fix: Properly reset suspended and skipped nodes when retrying (#9422) - * [de6b5ae6f](https://github.com/argoproj/argo-workflows/commit/de6b5ae6fa39693b7cd7777b9fcff9ff291476dd) fix(executor): Resource template gets incorrect plural for certain types (#9396) - * [3ddbb5e00](https://github.com/argoproj/argo-workflows/commit/3ddbb5e009f39fdb31cdaa7d77fca71dc3ae3f0e) fix: Only validate manifests for certain resource actions. Fixes #9418 (#9419) - * [a91e0041c](https://github.com/argoproj/argo-workflows/commit/a91e0041c9583deb48751c666dbbef111f3a56f9) fix: Workflow level http template hook status update. Fixes #8529 (#8586) - * [343c29819](https://github.com/argoproj/argo-workflows/commit/343c29819ac92d35f5db8a0de432f63df148ea31) fix: Argo waiter: invalid memory address or nil pointer dereference (#9408) - * [6f19e50a4](https://github.com/argoproj/argo-workflows/commit/6f19e50a41a17dbf06e6281f005ade6a2f19dba4) fix: Invalid memory address or nil pointer dereference (#9409) - * [7d9319b60](https://github.com/argoproj/argo-workflows/commit/7d9319b60d0bc417b25d35968c1619e51c13b7ec) Fix: UI to reflect Template.ArchiveLocation when showing Artifact's bucket in URN (#9351) - * [b7904c41c](https://github.com/argoproj/argo-workflows/commit/b7904c41c008176f40bb69c312b38ce6c0f9ce03) chore(deps-dev): bump sass from 1.54.4 to 1.54.5 in /ui (#9402) - * [fa66ed8e8](https://github.com/argoproj/argo-workflows/commit/fa66ed8e8bc20c4d759eb923b99dd6641ceafa86) chore(deps): bump github.com/tidwall/gjson from 1.14.2 to 1.14.3 (#9401) - -### Contributors - - * Brian Tate - * Julie Vogelman - * Rohan Kumar - * Saravanan Balasubramanian - * William Reed - * Yuan Tang - * dependabot[bot] - * jsvk - -## v3.4.0-rc2 (2022-08-18) - - * [6e8d1629d](https://github.com/argoproj/argo-workflows/commit/6e8d1629d9eebf78dce07f180ee99a233e422a80) fix: Artifact panel crashes when viewing artifacts. Fixes #9391 (#9392) - * [aa23a9ec8](https://github.com/argoproj/argo-workflows/commit/aa23a9ec8b9fc95593fdc41e1632412542a9c050) fix: Exit handle and Lifecycle hook to access {steps/tasks status} (#9229) - * [74cdf5d87](https://github.com/argoproj/argo-workflows/commit/74cdf5d870cc4d0b5576e6d78da7a6fde6a1be99) fix: improper selfLinks for cluster-scoped resources. Fixes #9320 (#9375) - * [f53d4834a](https://github.com/argoproj/argo-workflows/commit/f53d4834a208f39797637d7fad744caf0540cff8) fix: Panic on nill pointer when running a workflow with restricted parallelism (#9385) - * [c756291f7](https://github.com/argoproj/argo-workflows/commit/c756291f701296b36411ccdd639a965a302a5af8) fix: removed error check which prevented deleting successful artGC wfs. (#9383) - * [81e3d23e7](https://github.com/argoproj/argo-workflows/commit/81e3d23e730d80f24c90feb283fa3ff3b358e215) chore(deps): bump google.golang.org/api from 0.91.0 to 0.93.0 (#9381) - * [62b0db982](https://github.com/argoproj/argo-workflows/commit/62b0db9822ef93732544667739b33c1d9792ccf9) fix(ui): Correctly show icons in DAG. Fixes #9372 & #9373 (#9378) - * [47f59c050](https://github.com/argoproj/argo-workflows/commit/47f59c050ed579cdf9e01eddf0f388ac52fe5713) chore(deps): bump cloud.google.com/go/storage from 1.24.0 to 1.25.0 (#9357) - * [65670a402](https://github.com/argoproj/argo-workflows/commit/65670a402b1e9a96d246fd2ee363dd27a7f3149b) fix: Fix blank workflow details page after workflow submission (#9377) - * [6d08098a8](https://github.com/argoproj/argo-workflows/commit/6d08098a887c701cfffb2ea57f0391d6f7f5d489) feat: add argo delete --force. Fixes #9315. (#9321) - * [12466b7c9](https://github.com/argoproj/argo-workflows/commit/12466b7c9138052150afa6e0e81964d91a0538f5) fix: Retry for http timeout error. Fixes #9271 (#9335) - * [1b252fd33](https://github.com/argoproj/argo-workflows/commit/1b252fd33c8e456af0f6ed437b4f74a6d8cb46e7) chore(deps-dev): bump sass from 1.54.3 to 1.54.4 in /ui (#9359) - * [3f56a74dd](https://github.com/argoproj/argo-workflows/commit/3f56a74dd44e6e28da5bf2fc28cf03bae9b9f5c1) chore(deps-dev): bump webpack-dev-server from 4.9.3 to 4.10.0 in /ui (#9358) - * [fd08b0339](https://github.com/argoproj/argo-workflows/commit/fd08b0339506f8f11288393061cf8c2eb155403a) fix: ArtifactGC e2e test was looking for the wrong artifact names (#9353) - * [b430180d2](https://github.com/argoproj/argo-workflows/commit/b430180d275adac05d64b82613134b926d4405f1) fix: Deleted pods are not tracked correctly when retrying workflow (#9340) - * [e12c697b7](https://github.com/argoproj/argo-workflows/commit/e12c697b7be2547cdffd18c73bf39e10dfa458f0) feat: fix bugs in retryWorkflow if failed pod node has children nodes. Fix #9244 (#9285) - * [61f252f1d](https://github.com/argoproj/argo-workflows/commit/61f252f1d2083e5e9f262d0acd72058571e27708) fix: TestWorkflowStepRetry's comment accurately reflects what it does. (#9234) - -### Contributors - - * Alex Collins - * Dillen Padhiar - * Julie Vogelman - * Kyle Wong - * Robert Kotcher - * Saravanan Balasubramanian - * Yuan Tang - * dependabot[bot] - * jingkai - * smile-luobin - -## v3.4.0-rc1 (2022-08-09) - - * [f481e3b74](https://github.com/argoproj/argo-workflows/commit/f481e3b7444eb9cbb5c4402a27ef209818b1d817) feat: fix workflow hangs during executeDAGTask. Fixes #6557 (#8992) - * [ec213c070](https://github.com/argoproj/argo-workflows/commit/ec213c070d92f4ac937f55315feab0fcc108fed5) Fixes #8622: fix http1 keep alive connection leak (#9298) - * [0d77f5554](https://github.com/argoproj/argo-workflows/commit/0d77f5554f251771a175a95fc80eeb12489e42b4) fix: Look in correct bucket when downloading artifacts (Template.ArchiveLocation configured) (#9301) - * [b356cb503](https://github.com/argoproj/argo-workflows/commit/b356cb503863da43c0cc5e1fe667ebf602cb5354) feat: Artifact GC (#9255) - * [e246abec1](https://github.com/argoproj/argo-workflows/commit/e246abec1cbe6be8cb8955f798602faf619a943f) feat: modify "argoexec artifact delete" to handle multiple artifacts. Fixes #9143 (#9291) - * [f359625f6](https://github.com/argoproj/argo-workflows/commit/f359625f6262b6fa93b558f4e488a13652e9f50a) chore(deps-dev): bump @babel/preset-env from 7.18.9 to 7.18.10 in /ui (#9311) - * [ffefe9402](https://github.com/argoproj/argo-workflows/commit/ffefe9402885a275e7a26c12b5a5e52e7522c4d7) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.32 to 7.0.34 (#9304) - * [ee8404bac](https://github.com/argoproj/argo-workflows/commit/ee8404baca5303a6a66f0236aa82464572bded0c) chore(deps-dev): bump @babel/core from 7.18.9 to 7.18.10 in /ui (#9310) - * [028851d7f](https://github.com/argoproj/argo-workflows/commit/028851d7f832be5687048fbec20d4d47ef910d26) chore(deps-dev): bump sass from 1.54.0 to 1.54.3 in /ui (#9309) - * [c0d26d61c](https://github.com/argoproj/argo-workflows/commit/c0d26d61c02f7fb4140a089139f8984df91eaaf9) chore(deps): bump cron-parser from 4.5.0 to 4.6.0 in /ui (#9307) - * [8d06a83bc](https://github.com/argoproj/argo-workflows/commit/8d06a83bccba87886163143e959369f0d0240943) chore(deps): bump github.com/prometheus/client_golang from 1.12.2 to 1.13.0 (#9306) - * [f83346959](https://github.com/argoproj/argo-workflows/commit/f83346959cf5204fe80b6b70e4d823bf481579fe) chore(deps): bump google.golang.org/api from 0.90.0 to 0.91.0 (#9305) - * [63876713e](https://github.com/argoproj/argo-workflows/commit/63876713e809ceca8e1e540a38b5ad0e650cbb2a) chore(deps): bump github.com/tidwall/gjson from 1.14.1 to 1.14.2 (#9303) - * [06b0a8cce](https://github.com/argoproj/argo-workflows/commit/06b0a8cce637db1adae0bae91670e002cfd0ae4d) fix(gcs): Wrap errors using `%w` to make retrying work (#9280) - * [083f3a21a](https://github.com/argoproj/argo-workflows/commit/083f3a21a601e086ca48d2532463a858cc8b316b) fix: pass correct error obj for azure blob failures (#9276) - * [55d15aeb0](https://github.com/argoproj/argo-workflows/commit/55d15aeb03847771e2b48f11fa84f88ad1df3e7c) feat: support zip for output artifacts archive. Fixes #8861 (#8973) - * [a51e833d9](https://github.com/argoproj/argo-workflows/commit/a51e833d9eea18ce5ef7606e55ddd025efa85de1) chore(deps): bump google.golang.org/api from 0.89.0 to 0.90.0 (#9260) - * [c484c57f1](https://github.com/argoproj/argo-workflows/commit/c484c57f13f6316bbf5ac7e98c1216ba915923c7) chore(deps-dev): bump @fortawesome/fontawesome-free from 6.1.1 to 6.1.2 in /ui (#9261) - * [2d1758fe9](https://github.com/argoproj/argo-workflows/commit/2d1758fe90fd60b37d0dfccb55c3f79d8a897289) fix: retryStrategy.Limit is now read properly for backoff strategy. Fixes #9170. (#9213) - * [b565bf358](https://github.com/argoproj/argo-workflows/commit/b565bf35897f529bbb446058c24b72d506024e29) Fix: user namespace override (Fixes #9266) (#9267) - * [0c24ca1ba](https://github.com/argoproj/argo-workflows/commit/0c24ca1ba8a5c38c846d595770e16398f6bd84a5) fix: TestParallel 503 with external url (#9265) - * [fd6c7a7ec](https://github.com/argoproj/argo-workflows/commit/fd6c7a7ec1f2053f9fdd03451d7d29b1339c0408) feat: Add custom event aggregator function with annotations (#9247) - * [be6ba4f77](https://github.com/argoproj/argo-workflows/commit/be6ba4f772f65588af7c79cc9351ff6dea63ed16) fix: add ServiceUnavailable to s3 transient errors list Fixes #9248 (#9249) - * [51538235c](https://github.com/argoproj/argo-workflows/commit/51538235c7a70b89855dd3b96d97387472bdbade) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.31 to 7.0.32 (#9253) - * [5cf5150ef](https://github.com/argoproj/argo-workflows/commit/5cf5150efe1694bb165e98c1d7509f9987d4f524) chore(deps): bump cloud.google.com/go/storage from 1.22.1 to 1.24.0 (#9252) - * [454f19ac8](https://github.com/argoproj/argo-workflows/commit/454f19ac8959f3e0db87bb34ec8f7099558aa737) chore(deps): bump google.golang.org/api from 0.87.0 to 0.89.0 (#9251) - * [e19d73f64](https://github.com/argoproj/argo-workflows/commit/e19d73f64af073bdd7778674c72a1d197c0836f6) chore(deps-dev): bump @babel/core from 7.18.6 to 7.18.9 in /ui (#9218) - * [073431310](https://github.com/argoproj/argo-workflows/commit/07343131080ab125da7ed7d33dbf2d7e0e21362a) chore(deps-dev): bump sass from 1.53.0 to 1.54.0 in /ui (#9219) - * [aa6aaf753](https://github.com/argoproj/argo-workflows/commit/aa6aaf7539ed86f08c43d4a59eb42337aea86ce6) chore(deps-dev): bump @babel/preset-env from 7.18.6 to 7.18.9 in /ui (#9216) - * [6f8592228](https://github.com/argoproj/argo-workflows/commit/6f8592228668457a8b1db072cc53db2c5b01de55) chore(deps): bump github.com/sirupsen/logrus from 1.8.1 to 1.9.0 (#9214) - * [769896eb5](https://github.com/argoproj/argo-workflows/commit/769896eb5bf0a7d8db1a94b423e5bc16cf09d5aa) feat: APIratelimit headers and doc (#9206) - * [bcb596270](https://github.com/argoproj/argo-workflows/commit/bcb59627072c3b4f0cd1cef12f499ec3d8e87815) ui: remove workflowlist searchbox (#9208) - * [15fdf4903](https://github.com/argoproj/argo-workflows/commit/15fdf4903a05c7854656f59f61a676362fe551c6) fix: line return in devcontainer host file (#9204) - * [44731d671](https://github.com/argoproj/argo-workflows/commit/44731d671d425b0709bab5c5e27ed7c42a0ee92d) feat: adding new CRD type "ArtifactGCTask" (#9184) - * [d5d4628a3](https://github.com/argoproj/argo-workflows/commit/d5d4628a3573a0e1a75c367243e259844320e021) fix: Set namespace to user namespace obtained from /userinfo service (#9191) - * [e4489f5d1](https://github.com/argoproj/argo-workflows/commit/e4489f5d12c4f62421c87c69d8b997aad71fdea6) feat: log format option for wait and init containers. Fixes #8986 (#9169) - * [573fe98ff](https://github.com/argoproj/argo-workflows/commit/573fe98ffaa119b607bb5d4aafc1fb3c70a4c564) fix: remove unused argument which is triggering in lint (needed for PRs to pass CI) (#9186) - * [1af892133](https://github.com/argoproj/argo-workflows/commit/1af892133cd5b9e6ac22fc61bd4eabd84c568e89) feat: api ratelimiter for argoserver (#8993) - * [0f1d1d9b7](https://github.com/argoproj/argo-workflows/commit/0f1d1d9b7ef9b602b82123a9d92c212b50ac01e1) fix: support RemainingItemCount in archivedWrokflow (#9118) - * [aea581e02](https://github.com/argoproj/argo-workflows/commit/aea581e027fcd0675e785f413e964c588af304ad) fix: Incorrect link to workflows list with the same author (#9173) - * [fd6f3c263](https://github.com/argoproj/argo-workflows/commit/fd6f3c263412a1174de723470a14721b220c4651) feat: Add support for Azure Blob Storage artifacts Fixes #1540 (#9026) - * [26ff2e8a1](https://github.com/argoproj/argo-workflows/commit/26ff2e8a17ff68628090e18a3f246ab87fe950a3) chore(deps): bump google.golang.org/api from 0.86.0 to 0.87.0 (#9157) - * [877f36f37](https://github.com/argoproj/argo-workflows/commit/877f36f370d7ef00a1b8f136bb157e64c1e2769a) fix: Workflow details accessing undefined templateRef. Fixes #9167 (#9168) - * [6c20202ca](https://github.com/argoproj/argo-workflows/commit/6c20202cae8e62bb6c04a067a269e964d181e864) feat: make node info side panel resizable. Fixes #8917 (#8963) - * [19db1d35e](https://github.com/argoproj/argo-workflows/commit/19db1d35e3f1be55ca8e7ddc5040b9eaf4ac3f4b) chore(deps-dev): bump babel-jest from 28.1.2 to 28.1.3 in /ui (#9159) - * [96b98dafb](https://github.com/argoproj/argo-workflows/commit/96b98dafbdde5770d4d92c469e13ca81734a753f) chore(deps): bump github.com/prometheus/common from 0.35.0 to 0.37.0 (#9158) - * [4dc0e83ea](https://github.com/argoproj/argo-workflows/commit/4dc0e83ea091990e2a02dd8a2b542035ebe98d9a) chore(deps-dev): bump webpack-dev-server from 4.9.2 to 4.9.3 in /ui (#9105) - * [cbe17105d](https://github.com/argoproj/argo-workflows/commit/cbe17105d91517f37cafafb49ad5f422b895c239) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.30 to 7.0.31 (#9130) - * [a9c36e723](https://github.com/argoproj/argo-workflows/commit/a9c36e723c0ab44baf3ea0cdf4706fc4b8bf848a) chore(deps-dev): bump @types/swagger-ui-react from 3.23.2 to 4.11.0 in /ui (#9132) - * [9bbf7e0f0](https://github.com/argoproj/argo-workflows/commit/9bbf7e0f092f0d76c7419d291d3f9dba016b2f3c) feat: Support overriding parameters when retry/resubmit workflows (#9141) - * [42729ff75](https://github.com/argoproj/argo-workflows/commit/42729ff7542760bd27b08a7347a603d8f232466e) fix: Workflow retry should also reset the selected nodes (#9156) - * [559b59c0a](https://github.com/argoproj/argo-workflows/commit/559b59c0a2b9b3254740edf634de8a1c63c84ab0) feat: report Artifact GC failures in user interface. Fixes #8518 (#9115) - * [56d0c664a](https://github.com/argoproj/argo-workflows/commit/56d0c664ad96c95ca6c2311b2d1559dd423a5e4d) fix: Do not error when getting log artifacts from GCS. Fixes #8746 (#9155) - * [2b92b1aef](https://github.com/argoproj/argo-workflows/commit/2b92b1aefbf1e6a12476b946f05559c9b05fffef) fix: Fixed swagger error. Fixes #8922 (#9078) - * [57bac335a](https://github.com/argoproj/argo-workflows/commit/57bac335afac2c28a4eb5ccf1fa97bb5bba63e97) feat: refactoring e2e test timeouts to support multiple environments. (#8925) - * [921ae1ebf](https://github.com/argoproj/argo-workflows/commit/921ae1ebf5f849d4f684c79dee375205f05cfca9) chore(deps): bump moment from 2.29.3 to 2.29.4 in /ui (#9131) - * [c149dc53c](https://github.com/argoproj/argo-workflows/commit/c149dc53c78571778b0589d977dd0445e75d9eec) chore(deps): bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#9097) - * [a0c9e66c1](https://github.com/argoproj/argo-workflows/commit/a0c9e66c1d1cb3d83c5150814c4b8ccd9acdcfb1) chore(deps): bump react-monaco-editor from 0.48.0 to 0.49.0 in /ui (#9104) - * [0f0e25e03](https://github.com/argoproj/argo-workflows/commit/0f0e25e03ffe00f79e74087044ecd080f2d6242a) [Snyk] Upgrade swagger-ui-react from 4.10.3 to 4.12.0 (#9072) - * [8fc78ca9d](https://github.com/argoproj/argo-workflows/commit/8fc78ca9dce321f2173fba7735e4b4bd48df1b6c) chore(deps): bump cronstrue from 1.125.0 to 2.11.0 in /ui (#9102) - * [01e9ef78f](https://github.com/argoproj/argo-workflows/commit/01e9ef78f9cd81d3e0ea4c85e33abd181118868c) chore(deps-dev): bump @babel/core from 7.18.5 to 7.18.6 in /ui (#9100) - * [50a4d0044](https://github.com/argoproj/argo-workflows/commit/50a4d00443cfc53976db6227394784bbf34fe239) feat: Support retry on nested DAG and node groups (#9028) - * [20f8582a9](https://github.com/argoproj/argo-workflows/commit/20f8582a9e71effee220b160b229b5fd68bf7c95) feat(ui): Add workflow author information to workflow summary and drawer (#9119) - * [18be9593e](https://github.com/argoproj/argo-workflows/commit/18be9593e76bdeb456b5de5ea047a6aa8d201d74) chore(deps-dev): bump babel-jest from 28.1.1 to 28.1.2 in /ui (#9103) - * [154d849b3](https://github.com/argoproj/argo-workflows/commit/154d849b32082a4211487b6dbebbae215b97b9ee) chore(deps): bump cron-parser from 4.4.0 to 4.5.0 in /ui (#9101) - * [801216c44](https://github.com/argoproj/argo-workflows/commit/801216c44053343020f41a9953a5ed1722b36232) chore(deps-dev): bump @babel/preset-env from 7.18.2 to 7.18.6 in /ui (#9099) - * [ba225d3aa](https://github.com/argoproj/argo-workflows/commit/ba225d3aa586dd9e6770ec1b2f482f1c15fe2add) chore(deps): bump google.golang.org/api from 0.85.0 to 0.86.0 (#9096) - * [ace228486](https://github.com/argoproj/argo-workflows/commit/ace2284869a9574602b602a5bdf4592cd6ae8376) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.29 to 7.0.30 (#9098) - * [3967929cf](https://github.com/argoproj/argo-workflows/commit/3967929cfde54c2a3c62c47fd509beaea1832ea4) chore(deps): bump dependabot/fetch-metadata from 1.3.1 to 1.3.3 (#9095) - * [f69cb89b1](https://github.com/argoproj/argo-workflows/commit/f69cb89b16bce0b88b63ec3fec14d7abc0b32fef) docs(workflow/artifacts/gcs): correct spelling of BUCKET (#9082) - * [61211f9db](https://github.com/argoproj/argo-workflows/commit/61211f9db1568190dd46b7469fa79eb6530bba73) fix: Add workflow failures before hooks run. Fixes #8882 (#9009) - * [c1154ff97](https://github.com/argoproj/argo-workflows/commit/c1154ff975bcb580554f78f393fd908b1f64ea6a) feat: redirect to archive on workflow absence. Fixes #7745 (#7854) - * [f5f1a3438](https://github.com/argoproj/argo-workflows/commit/f5f1a34384ab4bbbebd9863711a3047a08ced7fb) fix: sync lock should be released only if we're retrying (#9063) - * [146e38a3f](https://github.com/argoproj/argo-workflows/commit/146e38a3f91ac8a7b9b749d96c54bd3eab2ce1ab) chore!: Remove dataflow pipelines from codebase (#9071) - * [92eaadffc](https://github.com/argoproj/argo-workflows/commit/92eaadffcd0c244f05b23d4f177fd53f000b1a99) feat: inform users on UI if an artifact will be deleted. Fixes #8667 (#9056) - * [d0cfc6d10](https://github.com/argoproj/argo-workflows/commit/d0cfc6d10b11d9977007bb14373e699e604c1b74) feat: UI default to the namespace associated with ServiceAccount. Fixes #8533 (#9008) - * [1ccc120cd](https://github.com/argoproj/argo-workflows/commit/1ccc120cd5392f877ecbb328cbf5304e6eb89783) feat: added support for binary HTTP template bodies. Fixes #6888 (#8087) - * [443155dea](https://github.com/argoproj/argo-workflows/commit/443155deaa1aa9e19688de0580840bd0f8598dd5) feat: If artifact has been deleted, show a message to that effect in the iFrame in the UI (#8966) - * [cead295fe](https://github.com/argoproj/argo-workflows/commit/cead295fe8b4cdfbc7eeb3c2dcfa99e2bfb291b6) chore(deps-dev): bump @types/superagent from 3.8.3 to 4.1.15 in /ui (#9057) - * [b1e49a471](https://github.com/argoproj/argo-workflows/commit/b1e49a471c7de65a628ac496a4041a2ec9975eb0) chore(deps-dev): bump html-webpack-plugin from 3.2.0 to 4.5.2 in /ui (#9036) - * [11801d044](https://github.com/argoproj/argo-workflows/commit/11801d044cfddfc8100d973e91ddfe9a1252a028) chore(deps): bump superagent from 7.1.6 to 8.0.0 in /ui (#9052) - * [c30493d72](https://github.com/argoproj/argo-workflows/commit/c30493d722c2fd9aa5ccc528327759d96f99fb23) chore(deps): bump github.com/prometheus/common from 0.34.0 to 0.35.0 (#9049) - * [74c1e86b8](https://github.com/argoproj/argo-workflows/commit/74c1e86b8bc302780f36a364d7adb98184bf6e45) chore(deps): bump google.golang.org/api from 0.83.0 to 0.85.0 (#9044) - * [77be291da](https://github.com/argoproj/argo-workflows/commit/77be291da21c5057d0c966adce449a7f9177e0db) chore(deps): bump github.com/stretchr/testify from 1.7.2 to 1.7.5 (#9045) - * [278f61c46](https://github.com/argoproj/argo-workflows/commit/278f61c46309b9df07ad23497a4fd97817af93cc) chore(deps): bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#9047) - * [e288dfc89](https://github.com/argoproj/argo-workflows/commit/e288dfc8963fdd5e5bff8d7cbed5d227e76afd7b) Revert "chore(deps-dev): bump raw-loader from 0.5.1 to 4.0.2 in /ui (#9034)" (#9041) - * [b9318ba93](https://github.com/argoproj/argo-workflows/commit/b9318ba939defe5fdeb46dcbfc44bc8f7cf14a6d) chore(deps-dev): bump webpack-cli from 4.9.2 to 4.10.0 in /ui (#9037) - * [891a256a2](https://github.com/argoproj/argo-workflows/commit/891a256a2165a853bc18e5f068d870a232b671f3) chore(deps-dev): bump sass from 1.52.1 to 1.53.0 in /ui (#9038) - * [db73db04d](https://github.com/argoproj/argo-workflows/commit/db73db04d033cc5a4e2f113fd090afe773ebcb81) chore(deps-dev): bump @babel/core from 7.18.2 to 7.18.5 in /ui (#9031) - * [fa93a6558](https://github.com/argoproj/argo-workflows/commit/fa93a655834138fc549f67f8a4eadd8df7a18c50) chore(deps-dev): bump babel-jest from 28.1.0 to 28.1.1 in /ui (#9035) - * [aeed837be](https://github.com/argoproj/argo-workflows/commit/aeed837be8083b8f49242635f3baa1b162a8db8b) chore(deps-dev): bump webpack-dev-server from 4.9.0 to 4.9.2 in /ui (#9032) - * [e7d3308ef](https://github.com/argoproj/argo-workflows/commit/e7d3308ef4f755d484c8ca6cf90993a5e1d7f954) chore(deps-dev): bump raw-loader from 0.5.1 to 4.0.2 in /ui (#9034) - * [d90f11c3e](https://github.com/argoproj/argo-workflows/commit/d90f11c3e4c1f7d88be3220f57c3184d7beaddaf) [Snyk] Upgrade superagent from 7.1.3 to 7.1.4 (#9020) - * [6e962fdca](https://github.com/argoproj/argo-workflows/commit/6e962fdcab5effbb4ac12180249019d7d6241b8c) feat: sanitize config links (#8779) - * [89f3433bf](https://github.com/argoproj/argo-workflows/commit/89f3433bf7cbca7092952aa8ffc5e5c254f28999) fix: workflow.status is now set properly in metrics. Fixes #8895 (#8939) - * [2aa32aea5](https://github.com/argoproj/argo-workflows/commit/2aa32aea5eaf325bc6a3eff852f2ff0052366bf6) fix: check for nil, and add logging to expose root cause of panic in Issue 8968 (#9010) - * [62287487a](https://github.com/argoproj/argo-workflows/commit/62287487a0895a457804f0ac97fdf9c9413dd2ab) fix: Treat 'connection reset by peer' as a transient network error. Fixes #9013 (#9017) - * [2e3177617](https://github.com/argoproj/argo-workflows/commit/2e31776175b2cbb123278920e30807244e2f7a3b) fix: add nil check for retryStrategy.Limit in deadline check. Fixes #8990 (#8991) - * [73487fbee](https://github.com/argoproj/argo-workflows/commit/73487fbeeb645ac8f6229f98aed2ec6eec756571) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.27 to 7.0.29 (#9004) - * [e34e378af](https://github.com/argoproj/argo-workflows/commit/e34e378af05b0ffde14b89e8d9eec9964a903002) chore(deps): bump github.com/argoproj/pkg from 0.13.2 to 0.13.3 (#9002) - * [89f82cea4](https://github.com/argoproj/argo-workflows/commit/89f82cea4b3f3f40d1666d2469ab3a97e3665fdd) feat: log workflow size before hydrating/dehydrating. Fixes #8976 (#8988) - * [a1535fa44](https://github.com/argoproj/argo-workflows/commit/a1535fa446d15bae56656d20577fdbb000353cc2) fix: Workflow Duration metric shouldn't increase after workflow complete (#8989) - * [6106ac722](https://github.com/argoproj/argo-workflows/commit/6106ac7229eeaac9132f8df595b569de2bc68ccf) feat: Support loading manifest from artifacts for resource templates. Fixes #5878 (#8657) - * [e0a1afa91](https://github.com/argoproj/argo-workflows/commit/e0a1afa91d8e51ba2c6aed6c604f2a69bdb1b387) fix: sync cluster Workflow Template Informer before it's used (#8961) - * [1ed1ee114](https://github.com/argoproj/argo-workflows/commit/1ed1ee114b2069d9cdeb9fd1f3a7513f9f13a396) chore(deps): bump actions/setup-python from 3 to 4 (#8949) - * [6c244f3cb](https://github.com/argoproj/argo-workflows/commit/6c244f3cb400f69b641d7e59c5215806a2870604) fix: long code blocks overflow in ui. Fixes #8916 (#8947) - * [e31ffcd33](https://github.com/argoproj/argo-workflows/commit/e31ffcd339370d6000f86d552845d7d378620d29) fix: Correct kill command. Fixes #8687 (#8908) - * [263977967](https://github.com/argoproj/argo-workflows/commit/263977967a47f24711b9f6110fe950c47d8c5f08) chore(deps): bump google.golang.org/api from 0.82.0 to 0.83.0 (#8951) - * [e96b1b3fd](https://github.com/argoproj/argo-workflows/commit/e96b1b3fd4e27608de8a94763782bd2d41cd5761) chore(deps): bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#8950) - * [107ed932d](https://github.com/argoproj/argo-workflows/commit/107ed932de466a89feb71dc04950c86d98747cc5) feat: add indexes for improve archived workflow performance. Fixes #8836 (#8860) - * [1d4edb433](https://github.com/argoproj/argo-workflows/commit/1d4edb4333ce4e5efeb44a199b390c3d9d02fc25) feat: Date range filter for workflow list. Fixes #8329 (#8596) - * [a6eef41bf](https://github.com/argoproj/argo-workflows/commit/a6eef41bf961cda347b9a9bd8476fc33e3a467a9) feat: add artifact delete to argoexec CLI. Fixes #8669 (#8913) - * [416fce705](https://github.com/argoproj/argo-workflows/commit/416fce70543059cc81753ba5131b1661a13a0fed) fix: Fork sub-process. Fixes #8454 (#8906) - * [750c4e1f6](https://github.com/argoproj/argo-workflows/commit/750c4e1f699b770a309843f2189b4e703305e44f) fix: Only signal running containers, ignore failures. (#8909) - * [ede1a39e7](https://github.com/argoproj/argo-workflows/commit/ede1a39e7cb48890aa5d4c8221e2c9d94e7ef007) fix: workflowMetadata needs to be loaded into globalParams in both ArgoServer and Controller (#8907) - * [df3764925](https://github.com/argoproj/argo-workflows/commit/df37649251f5791c40802defd923dd735924eb3a) Add left-margin to the question circle next to parameter name in Submit Workflow Panel (#8927) - * [1e17f7ff5](https://github.com/argoproj/argo-workflows/commit/1e17f7ff5232067c9c1c05bfa55322e41e0915d7) chore(deps): bump google.golang.org/api from 0.81.0 to 0.82.0 (#8914) - * [7dacb5bca](https://github.com/argoproj/argo-workflows/commit/7dacb5bcaeae8e3be64bb1fbf54024401d42d867) fix: Fixed Swagger error. Fixes #8830 (#8886) - * [8592e9ce6](https://github.com/argoproj/argo-workflows/commit/8592e9ce6e4de64e55c23bfda460b0cad67e74f7) feat: enable gcflags (compiler flags) to be passed into 'go build' (#8896) - * [7a626aa6a](https://github.com/argoproj/argo-workflows/commit/7a626aa6a1368da59c322f1d768e691b0ee4d7e4) feat: add Artifact.Deleted (#8893) - * [f2c748ac4](https://github.com/argoproj/argo-workflows/commit/f2c748ac44ed41b1d672e6c45a34090992b979d7) feat: Artifact GC Finalizer needs to be added if any Output Artifacts have a strategy (#8856) - * [093a6fe7e](https://github.com/argoproj/argo-workflows/commit/093a6fe7e1b1926f5feaff07a66edb9ff036f866) Add Orchest to ecosystem (#8884) - * [2b5ae622b](https://github.com/argoproj/argo-workflows/commit/2b5ae622bc257a4dafb4fab961e8142accaa484d) Removed Security Nudge and all its invocations (#8838) - * [86ab55726](https://github.com/argoproj/argo-workflows/commit/86ab55726e213bc406e69edb14921b501938fa25) chore(deps-dev): bump monaco-editor-webpack-plugin from 1.9.0 to 1.9.1 in /ui (#8877) - * [df750d715](https://github.com/argoproj/argo-workflows/commit/df750d7158f7291983aeffe709b7624eb73f964a) chore(deps-dev): bump @babel/preset-env from 7.18.0 to 7.18.2 in /ui (#8876) - * [f0447918d](https://github.com/argoproj/argo-workflows/commit/f0447918d6826b21a8e0cf0d0d218113e69059a8) chore(deps): bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#8874) - * [8b7bdb713](https://github.com/argoproj/argo-workflows/commit/8b7bdb7139e8aa152e95ad3fe6815e7a801afcbb) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.26 to 7.0.27 (#8875) - * [282a72295](https://github.com/argoproj/argo-workflows/commit/282a722950b113008b4efb258309cc4066f925a0) add pismo.io to argo users (#8871) - * [1a517e6f5](https://github.com/argoproj/argo-workflows/commit/1a517e6f5b801feae9416acf824c83ff65dea65c) chore(deps): bump superagent from 3.8.3 to 7.1.3 in /ui (#8851) - * [53012fe66](https://github.com/argoproj/argo-workflows/commit/53012fe66fb6afcefcf4b237c34264a600ae6804) chore(deps-dev): bump source-map-loader from 0.2.4 to 1.1.3 in /ui (#8850) - * [35eb2bb96](https://github.com/argoproj/argo-workflows/commit/35eb2bb96d1489366e9813c14863a79db4ea85df) chore(deps-dev): bump file-loader from 6.0.0 to 6.2.0 in /ui (#8848) - * [116dfdb03](https://github.com/argoproj/argo-workflows/commit/116dfdb039611d70dd98aef7eb4428b589d55361) chore(deps-dev): bump @fortawesome/fontawesome-free from 5.15.3 to 6.1.1 in /ui (#8846) - * [7af70ff39](https://github.com/argoproj/argo-workflows/commit/7af70ff3926e0400d2fe5260f0ea2eeb8bc9bf53) chore(deps-dev): bump glob from 7.1.6 to 8.0.3 in /ui (#8845) - * [67dab5d85](https://github.com/argoproj/argo-workflows/commit/67dab5d854a4b1be693571765eae3857559851c6) chore(deps): bump cron-parser from 2.18.0 to 4.4.0 in /ui (#8844) - * [e7d294214](https://github.com/argoproj/argo-workflows/commit/e7d2942148ed876717b24fcd2b8af7735e977cb0) chore(deps-dev): bump @babel/core from 7.12.10 to 7.18.2 in /ui (#8843) - * [f676ac59a](https://github.com/argoproj/argo-workflows/commit/f676ac59a0794791dc5bdfd74acd9764110f2d2a) chore(deps): bump google.golang.org/api from 0.80.0 to 0.81.0 (#8841) - * [d324faaf8](https://github.com/argoproj/argo-workflows/commit/d324faaf885d32e8666a70e1f20bae7e71db386e) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk from 2.2.2+incompatible to 2.2.4+incompatible (#8842) - * [40ab51766](https://github.com/argoproj/argo-workflows/commit/40ab51766aa7cb511dcc3533aeb917379e6037ad) Revert "chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /ui" (#8839) - * [cc9d14cf0](https://github.com/argoproj/argo-workflows/commit/cc9d14cf0d60812e177ebb447181df933199b722) feat: Use Pod Names v2 by default (#8748) - * [c0490ec04](https://github.com/argoproj/argo-workflows/commit/c0490ec04be88975c316ff6a9dc007861c8f9254) chore(deps-dev): bump webpack-cli from 3.3.11 to 4.9.2 in /ui (#8726) - * [bc4a80a8d](https://github.com/argoproj/argo-workflows/commit/bc4a80a8d63f869a7a607861374e0c206873f250) feat: remove size limit of 128kb for workflow templates. Fixes #8789 (#8796) - * [5c91d93af](https://github.com/argoproj/argo-workflows/commit/5c91d93afd07f207769a63730ec72e9a93b584ce) chore(deps-dev): bump @babel/preset-env from 7.12.11 to 7.18.0 in /ui (#8825) - * [d61bea949](https://github.com/argoproj/argo-workflows/commit/d61bea94947526e7ca886891152c565cc15abded) chore(deps): bump js-yaml and @types/js-yaml in /ui (#8823) - * [4688afcc5](https://github.com/argoproj/argo-workflows/commit/4688afcc51c50edc27eaba92c449bc4bce00a139) chore(deps-dev): bump webpack-dev-server from 3.11.3 to 4.9.0 in /ui (#8818) - * [14ac0392c](https://github.com/argoproj/argo-workflows/commit/14ac0392ce79bddbb9fc44c86fcf315ea1746235) chore(deps): bump cloud.google.com/go/storage from 1.22.0 to 1.22.1 (#8816) - * [3a21fb8a4](https://github.com/argoproj/argo-workflows/commit/3a21fb8a423047268a50fba22dcdd2b4d4029944) chore(deps-dev): bump tslint from 5.11.0 to 5.20.1 in /ui (#8822) - * [eca4bdc49](https://github.com/argoproj/argo-workflows/commit/eca4bdc493332eeaf626f454fb25f1ec5257864a) chore(deps-dev): bump copyfiles from 1.2.0 to 2.4.1 in /ui (#8821) - * [3416253be](https://github.com/argoproj/argo-workflows/commit/3416253be1047d5c6e6c0cb69defd92ee7eea5fe) chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /ui (#8820) - * [e9ea8ee69](https://github.com/argoproj/argo-workflows/commit/e9ea8ee698d8b0d173d0039eba66b2a017d650d3) chore(deps-dev): bump sass from 1.30.0 to 1.52.1 in /ui (#8817) - * [ac92a49d0](https://github.com/argoproj/argo-workflows/commit/ac92a49d0f253111bd14bd72699ca3ad8cbeee1d) chore(deps): bump google.golang.org/api from 0.79.0 to 0.80.0 (#8815) - * [1bd841853](https://github.com/argoproj/argo-workflows/commit/1bd841853633ebb71fc569b2975def90afb1a68d) docs(running-locally): update dependencies info (#8810) - * [bc0100346](https://github.com/argoproj/argo-workflows/commit/bc01003468186ddcb93d1d32e9a49a75046827e7) fix: Change to distroless. Fixes #8805 (#8806) - * [872826591](https://github.com/argoproj/argo-workflows/commit/8728265915fd7c18f05f32e32dc12de1ef3ca46b) Revert "chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /u… (#8804) - * [fbb8246cd](https://github.com/argoproj/argo-workflows/commit/fbb8246cdc44d218f70f0de677be0f4dfd0780cf) fix: set NODE_OPTIONS to no-experimental-fetch to prevent yarn start error (#8802) - * [39fbdb2a5](https://github.com/argoproj/argo-workflows/commit/39fbdb2a551482c5ae2860fd266695c0113cb7b7) fix: fix a command in the quick-start page (#8782) - * [961f731b7](https://github.com/argoproj/argo-workflows/commit/961f731b7e9cb60490dd763a394893154c0b3c60) fix: Omitted task result should also be valid (#8776) - * [67cdd5f97](https://github.com/argoproj/argo-workflows/commit/67cdd5f97a16041fd1ec32134158c71c07249e4d) chore(deps-dev): bump babel-loader from 8.2.2 to 8.2.5 in /ui (#8767) - * [fce407663](https://github.com/argoproj/argo-workflows/commit/fce40766351440375e6b2761cd6a304474764b9a) chore(deps-dev): bump babel-jest from 26.6.3 to 28.1.0 in /ui (#8774) - * [026298671](https://github.com/argoproj/argo-workflows/commit/02629867180367fb21a347c3a36cf2d52b63a2c3) chore(deps-dev): bump style-loader from 0.20.3 to 2.0.0 in /ui (#8775) - * [2e1fd11db](https://github.com/argoproj/argo-workflows/commit/2e1fd11db5bbb95ee9bcdbeaeab970fa92fc3588) chore(deps-dev): bump webpack from 4.35.0 to 4.46.0 in /ui (#8768) - * [00bda0b06](https://github.com/argoproj/argo-workflows/commit/00bda0b0690ea24fa52603f30eecb40fe8b5cdd7) chore(deps-dev): bump @types/prop-types from 15.5.4 to 15.7.5 in /ui (#8773) - * [28b494a67](https://github.com/argoproj/argo-workflows/commit/28b494a674e560a07e5a1c98576a94bbef111fc5) chore(deps-dev): bump @types/dagre from 0.7.44 to 0.7.47 in /ui (#8772) - * [b07a57694](https://github.com/argoproj/argo-workflows/commit/b07a576945e87915e529d718101319d2f83cd98a) chore(deps): bump react-monaco-editor from 0.47.0 to 0.48.0 in /ui (#8770) - * [2a0ac29d2](https://github.com/argoproj/argo-workflows/commit/2a0ac29d27466a247c3a4fee0429d95aa5b67338) chore(deps-dev): bump webpack-dev-server from 3.7.2 to 3.11.3 in /ui (#8769) - * [6b11707f5](https://github.com/argoproj/argo-workflows/commit/6b11707f50301a125eb8349193dd0be8659a4cdf) chore(deps): bump github.com/coreos/go-oidc/v3 from 3.1.0 to 3.2.0 (#8765) - * [d23693166](https://github.com/argoproj/argo-workflows/commit/d236931667a60266f87fbc446064ceebaf582996) chore(deps): bump github.com/prometheus/client_golang from 1.12.1 to 1.12.2 (#8763) - * [f6d84640f](https://github.com/argoproj/argo-workflows/commit/f6d84640fda435e08cc6a961763669b7572d0e69) fix: Skip TestExitHookWithExpression() completely (#8761) - * [178bbbc31](https://github.com/argoproj/argo-workflows/commit/178bbbc31c594f9ded4b8a66b0beecbb16cfa949) fix: Temporarily fix CI build. Fixes #8757. (#8758) - * [6b9dc2674](https://github.com/argoproj/argo-workflows/commit/6b9dc2674f2092b2198efb0979e5d7e42efffc30) feat: Add WebHDFS support for HTTP artifacts. Fixes #7540 (#8468) - * [354dee866](https://github.com/argoproj/argo-workflows/commit/354dee86616014bcb77afd170685242a18efd07c) fix: Exit lifecycle hook should respect expression. Fixes #8742 (#8744) - * [aa366db34](https://github.com/argoproj/argo-workflows/commit/aa366db345d794f0d330336d51eb2a88f14ebbe6) fix: remove list and watch on secrets. Fixes #8534 (#8555) - * [342abcd6d](https://github.com/argoproj/argo-workflows/commit/342abcd6d72b4cda64b01f30fa406b2f7b86ac6d) fix: mkdocs uses 4space indent for nested list (#8740) - * [567436640](https://github.com/argoproj/argo-workflows/commit/5674366404a09cee5f4e36e338a4292b057fe1b9) chore(deps-dev): bump typescript from 3.9.2 to 4.6.4 in /ui (#8719) - * [1f2417e30](https://github.com/argoproj/argo-workflows/commit/1f2417e30937399e96fd4dfcd3fcc2ed7333291a) feat: running locally through dev container (#8677) - * [515e0763a](https://github.com/argoproj/argo-workflows/commit/515e0763ad4b1bd9d2941fc5c141c52691fc3b12) fix: Simplify return logic in executeTmplLifeCycleHook (#8736) - * [b8f511309](https://github.com/argoproj/argo-workflows/commit/b8f511309adf6443445e6dbf55889538fd39eacc) fix: Template in Lifecycle hook should be optional (#8735) - * [98a97d6d9](https://github.com/argoproj/argo-workflows/commit/98a97d6d91c0d9d83430da20e11cea39a0a7919b) chore(deps-dev): bump ts-node from 4.1.0 to 9.1.1 in /ui (#8722) - * [e4d35f0ad](https://github.com/argoproj/argo-workflows/commit/e4d35f0ad3665d7d732a16b9e369f8658049bacd) chore(deps-dev): bump react-hot-loader from 3.1.3 to 4.13.0 in /ui (#8723) - * [b9ec444fc](https://github.com/argoproj/argo-workflows/commit/b9ec444fc4cf60ed876823b25a41f74a28698f0b) chore(deps-dev): bump copy-webpack-plugin from 4.5.2 to 5.1.2 in /ui (#8718) - * [43fb7106a](https://github.com/argoproj/argo-workflows/commit/43fb7106a83634b85a3b934e22a05246e76f7d15) chore(deps-dev): bump tslint-plugin-prettier from 2.1.0 to 2.3.0 in /ui (#8716) - * [c0cd1f855](https://github.com/argoproj/argo-workflows/commit/c0cd1f855a5ef89d0f7a0d49f8e11781735cfa86) feat: ui, Dependabot auto dependency update (#8706) - * [b3bf327a0](https://github.com/argoproj/argo-workflows/commit/b3bf327a021e4ab5cc329f83bdec8f533c87a4d6) fix: Fix the resursive example to call the coinflip template (#8696) - * [427c16072](https://github.com/argoproj/argo-workflows/commit/427c16072b6c9d677265c95f5fd84e6a37fcc848) feat: Increased default significant figures in formatDuration. Fixes #8650 (#8686) - * [7e2df8129](https://github.com/argoproj/argo-workflows/commit/7e2df81299f660089cf676f7622638156affedf5) chore(deps): bump google.golang.org/api from 0.78.0 to 0.79.0 (#8710) - * [9ddae875f](https://github.com/argoproj/argo-workflows/commit/9ddae875fdb49d3e852f935e3d8b52fae585bc5e) fix: Fixed podName in killing daemon pods. Fixes #8692 (#8708) - * [72d3f32e5](https://github.com/argoproj/argo-workflows/commit/72d3f32e5676207d1511c609b00d26df20a2607e) fix: update go-color path/version (#8707) - * [92b3ef27a](https://github.com/argoproj/argo-workflows/commit/92b3ef27af7a7e6b930045e95072a47c8745b1d3) fix: upgrade moment from 2.29.2 to 2.29.3 (#8679) - * [8d4ac38a1](https://github.com/argoproj/argo-workflows/commit/8d4ac38a158dc2b4708478f7e7db1f2dd488ffed) feat: ui, add node version constraint (#8678) - * [2cabddc9a](https://github.com/argoproj/argo-workflows/commit/2cabddc9a9241061d8b89cf671f1c548405f4cb0) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.24 to 7.0.26 (#8673) - * [859ebe99f](https://github.com/argoproj/argo-workflows/commit/859ebe99f760c6fb30870993359274a92cec2fb9) fix: Terminate, rather than delete, deadlined pods. Fixes #8545 (#8620) - * [dd565208e](https://github.com/argoproj/argo-workflows/commit/dd565208e236bc56230e75bedcc5082d171e6155) fix(git): add auth to fetch (#8664) - * [70f70209d](https://github.com/argoproj/argo-workflows/commit/70f70209d693d3933177a7de2cb6e421b763656f) fix: Handle omitted nodes in DAG enhanced depends logic. Fixes #8654 (#8672) - * [3fdf30d9f](https://github.com/argoproj/argo-workflows/commit/3fdf30d9f9181d74d81ca3184b53bbe661ecb845) fix: Enhance artifact visualization. Fixes #8619 (#8655) - * [16fef4e54](https://github.com/argoproj/argo-workflows/commit/16fef4e5498fac88dc80d33d653c99fec641150d) fix: enable `ARGO_REMOVE_PVC_PROTECTION_FINALIZER` by default. Fixes #8592 (#8661) - * [e4d57c6d5](https://github.com/argoproj/argo-workflows/commit/e4d57c6d560e025a336415aa840d2457eeca79f4) feat: `argo cp` to download artifacts. Fixes #695 (#8582) - * [e6e0c9bb3](https://github.com/argoproj/argo-workflows/commit/e6e0c9bb3b923a6d977875cbbd2744b8bacfce15) chore(deps): bump docker/login-action from 1 to 2 (#8642) - * [05781101d](https://github.com/argoproj/argo-workflows/commit/05781101dc94701aabd1bdbc2d3be4aa383b49f2) chore(deps): bump docker/setup-buildx-action from 1 to 2 (#8641) - * [6a4957135](https://github.com/argoproj/argo-workflows/commit/6a495713593f11514500998f6f69ce8f2e463975) chore(deps): bump docker/setup-qemu-action from 1 to 2 (#8640) - * [02370b51d](https://github.com/argoproj/argo-workflows/commit/02370b51d59bdd60b07c6c938737ed997807e4f2) feat: Track UI event #8402 (#8460) - * [64a2b28a5](https://github.com/argoproj/argo-workflows/commit/64a2b28a5fb51b50fe0e0a30185a8c3400d10548) fix: close http body. Fixes #8622 (#8624) - * [68a2cee6a](https://github.com/argoproj/argo-workflows/commit/68a2cee6a3373214803db009c7a6290954107c37) chore(deps): bump google.golang.org/api from 0.77.0 to 0.78.0 (#8602) - * [ed351ff08](https://github.com/argoproj/argo-workflows/commit/ed351ff084c4524ff4b2a45b53e539f91f5d423a) fix: ArtifactGC moved from Template to Artifact. Fixes #8556. (#8581) - * [87470e1c2](https://github.com/argoproj/argo-workflows/commit/87470e1c2bf703a9110e97bb755614ce8757fdcc) fix: Added artifact Content-Security-Policy (#8585) - * [61b80c90f](https://github.com/argoproj/argo-workflows/commit/61b80c90fd93aebff26df73fcddffa75732d10ec) Fix panic on executor plugin eventhandler (#8588) - * [974031570](https://github.com/argoproj/argo-workflows/commit/97403157054cb779b2005991fbb65c583aa3644c) fix: Polish artifact visualisation. Fixes #7743 (#8552) - * [98dd898be](https://github.com/argoproj/argo-workflows/commit/98dd898bef67e8523a0bf2ed942241dcb69eabe7) fix: Correct CSP. Fixes #8560 (#8579) - * [3d892d9b4](https://github.com/argoproj/argo-workflows/commit/3d892d9b481c5eefeb309b462b3f166a31335bc4) feat: New endpoint capable of serving directory listing or raw file, from non-archived or archived workflow (#8548) - * [71e2073b6](https://github.com/argoproj/argo-workflows/commit/71e2073b66b3b30b1eda658e88b7f6fd89469a92) chore(deps): bump lodash-es from 4.17.20 to 4.17.21 in /ui (#8577) - * [abf3c7411](https://github.com/argoproj/argo-workflows/commit/abf3c7411921dd422804c72b4f68dc2ab2731047) chore(deps): bump github.com/argoproj/pkg from 0.13.1 to 0.13.2 (#8571) - * [ffd5544c3](https://github.com/argoproj/argo-workflows/commit/ffd5544c31da026999b78197f55e6f4d2c8d7628) chore(deps): bump google.golang.org/api from 0.76.0 to 0.77.0 (#8572) - * [dc8fef3e5](https://github.com/argoproj/argo-workflows/commit/dc8fef3e5b1c0b833cc8568dbea23dbd1b310bdc) fix: Support memoization on plugin node. Fixes #8553 (#8554) - * [5b8638fcb](https://github.com/argoproj/argo-workflows/commit/5b8638fcb0f6ab0816f58f35a71f4f178ba9b7d9) fix: modified `SearchArtifact` to return `ArtifactSearchResults`. Fixes #8543 (#8557) - * [9398b0717](https://github.com/argoproj/argo-workflows/commit/9398b0717c14e15c78f6fe314ca9168d0104418d) feat: add more options to ArtifactSearchQuery. Fixes #8542. (#8549) - * [c781a5828](https://github.com/argoproj/argo-workflows/commit/c781a582821c4e08416eba9a3889eb2588596aa6) feat: Make artifacts discoverable in the DAG. Fixes #8494 (#8496) - * [d25b3fec4](https://github.com/argoproj/argo-workflows/commit/d25b3fec49377ea4be6a63d815a2b609636ef607) feat: Improve artifact server response codes. Fixes #8516 (#8524) - * [65b7437f7](https://github.com/argoproj/argo-workflows/commit/65b7437f7b26e19581650c0c2078f9dd8c89a73f) chore(deps): bump github.com/argoproj/pkg from 0.13.0 to 0.13.1 (#8537) - * [ecd91b1c4](https://github.com/argoproj/argo-workflows/commit/ecd91b1c4215a2ab8742f7c43eaade98a1d47eba) fix: added json tag to ArtifactGCStrategies (#8523) - * [f223bb8a3](https://github.com/argoproj/argo-workflows/commit/f223bb8a3c277e96a19e08f30f27ad70c0c425d3) fix: ArtifactGCOnWorkflowDeletion typo quick fix (#8519) - * [b4202b338](https://github.com/argoproj/argo-workflows/commit/b4202b338b5f97552fb730e4d07743c365d6f5ec) feat: Do not return cause of internal server error. Fixes #8514 (#8522) - * [d7bcaa756](https://github.com/argoproj/argo-workflows/commit/d7bcaa7569ac15d85eb293a72a1a98779275bd6e) feat: add finalizer for artifact GC (#8513) - * [c3ae56565](https://github.com/argoproj/argo-workflows/commit/c3ae56565bbe05c9809c5ad1192fcfc3ae717114) fix: Do not log container not found (#8509) - * [9a1345323](https://github.com/argoproj/argo-workflows/commit/9a1345323bb4727ba4fa769363b671213c02ded7) feat: Implement Workflow.SearchArtifacts(). Fixes #8473 (#8517) - * [30d9f8d77](https://github.com/argoproj/argo-workflows/commit/30d9f8d77caa69467f2b388b045fe9c3f8d05cb8) feat: Add correct CSP/XFO to served artifacts. Fixing #8492 (#8511) - * [d3f8db341](https://github.com/argoproj/argo-workflows/commit/d3f8db3417586b307401ecd5d172f9a1f97241db) feat: Save `containerSet` logs in artifact repository. Fixes #7897 (#8491) - * [6769ba720](https://github.com/argoproj/argo-workflows/commit/6769ba7209c1c8ffa6ecd5414d9694e743afe557) feat: add ArtifactGC to template spec (#8493) - * [19e763a3b](https://github.com/argoproj/argo-workflows/commit/19e763a3ba7ceaa890dc34310abeb4e7e4555641) chore(deps): bump google.golang.org/api from 0.75.0 to 0.76.0 (#8495) - * [6e9d42aed](https://github.com/argoproj/argo-workflows/commit/6e9d42aed1623e215a04c98cf1632f08f79a45cb) feat: add capability to choose params in suspend node.Fixes #8425 (#8472) - * [8685433e1](https://github.com/argoproj/argo-workflows/commit/8685433e1c183f1eb56add14c3e19c7b676314bb) feat: Added a delete function to the artifacts storage. Fixes #8470 (#8490) - * [9f5759b5b](https://github.com/argoproj/argo-workflows/commit/9f5759b5bd2a01d0f2930faa20ad5a769395eb99) feat: Enable git artifact clone of single branch (#8465) - * [7376e7cda](https://github.com/argoproj/argo-workflows/commit/7376e7cda4f72f0736fc128d15495acff71b987d) feat: Artifact streaming: enable artifacts to be streamed to users rather than loading the full file to disk first. Fixes #8396 (#8486) - * [06e9445ba](https://github.com/argoproj/argo-workflows/commit/06e9445ba71faba6f1132703762ec592a168ca9b) feat: add empty dir into wait container (#8390) - * [c61770622](https://github.com/argoproj/argo-workflows/commit/c6177062276cc39c3b21644ab1d6989cbcaf075c) fix: Pod `OOMKilled` should fail workflow. Fixes #8456 (#8478) - * [37a8a81df](https://github.com/argoproj/argo-workflows/commit/37a8a81df1d7ef3067596199f96974d31b200b88) feat: add ArtifactGC to workflow and template spec. Fixes #8471 (#8482) - * [ae803bba4](https://github.com/argoproj/argo-workflows/commit/ae803bba4f9b0c85f0d0471c22e44eb1c0f8f5f9) fix: Revert controller readiness changes. Fixes #8441 (#8454) - * [147ca4637](https://github.com/argoproj/argo-workflows/commit/147ca46376a4d86a09bde689d848396af6750b1e) fix: PodGC works with WorkflowTemplate. Fixes #8448 (#8452) - * [b7aeb6298](https://github.com/argoproj/argo-workflows/commit/b7aeb62982d91036edf5ba942eebeb4b22e30a3d) feat: Add darwin-arm64 binary build. Fixes #8450 (#8451) - * [8c0a957c3](https://github.com/argoproj/argo-workflows/commit/8c0a957c3ef0149f3f616a8baef2eb9a164436c1) fix: Fix bug in entrypoint lookup (#8453) - * [79508cc78](https://github.com/argoproj/argo-workflows/commit/79508cc78bd5b79762719c3b2fbe970981277e1f) chore(deps): bump google.golang.org/api from 0.74.0 to 0.75.0 (#8447) - * [24f9db628](https://github.com/argoproj/argo-workflows/commit/24f9db628090e9dfdfc7d657af80d96c176a47fd) chore(deps): bump github.com/argoproj/pkg from 0.11.0 to 0.12.0 (#8439) - * [e28fb0744](https://github.com/argoproj/argo-workflows/commit/e28fb0744209529cf0f7562c71f7f645db21ba1a) chore(deps): bump dependabot/fetch-metadata from 1.3.0 to 1.3.1 (#8438) - * [72bb11305](https://github.com/argoproj/argo-workflows/commit/72bb1130543a3cc81347fe4fcf3257d8b35cd478) chore(deps): bump github.com/argoproj-labs/argo-dataflow (#8440) - * [230c82652](https://github.com/argoproj/argo-workflows/commit/230c8265246d50a095cc3a697fcd437174731aa8) feat: added support for http as option for artifact upload. Fixes #785 (#8414) - * [4f067ab4b](https://github.com/argoproj/argo-workflows/commit/4f067ab4bcb9ae570b9af11b2abd64d592e1fbbc) chore(deps): bump github.com/prometheus/common from 0.33.0 to 0.34.0 (#8427) - * [a2fd0031e](https://github.com/argoproj/argo-workflows/commit/a2fd0031ef13b63fd65520c615043e2aff89dde8) chore(deps): bump github.com/tidwall/gjson from 1.14.0 to 1.14.1 (#8426) - * [3d1ea426a](https://github.com/argoproj/argo-workflows/commit/3d1ea426a28c65c206752e957bb68a57ee8ed32e) fix: Remove binaries from Windows image. Fixes #8417 (#8420) - * [e71fdee07](https://github.com/argoproj/argo-workflows/commit/e71fdee07b8ccd7905752808bffb2283e170077a) Revert "feat: added support for http as an option for artifact upload. Fixes #785 (#8405)" - * [5845efbb9](https://github.com/argoproj/argo-workflows/commit/5845efbb94da8acfb218787846ea10c37fb2eebb) feat: Log result of HTTP requests & artifacts load/saves. Closes #8257 (#8394) - * [d22be825c](https://github.com/argoproj/argo-workflows/commit/d22be825cfb901f1ce59ba3744488cb8e144233b) feat: added support for http as an option for artifact upload. Fixes #785 (#8405) - * [4471b59a5](https://github.com/argoproj/argo-workflows/commit/4471b59a52873ca66d6834a06519407c858f5906) fix: open minio dashboard on different port in quick-start (#8407) - * [f467cc555](https://github.com/argoproj/argo-workflows/commit/f467cc5558bd22330eebfbc352ad4a7607f9fa4c) fix: Daemon step updated 'pod delete' while pod is running (#8399) - * [a648ccdcf](https://github.com/argoproj/argo-workflows/commit/a648ccdcfa3bb4cd5f5684faf921ab9fdab761de) fix: prevent backoff when retryStrategy.limit has been reached. Fixes #7588 (#8090) - * [136ebbc45](https://github.com/argoproj/argo-workflows/commit/136ebbc45b7cba346d7ba72f278624647a6b5a1c) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.23 to 7.0.24 (#8397) - * [73ea7c72c](https://github.com/argoproj/argo-workflows/commit/73ea7c72c99a073dbe3ec0a420e112945916fb94) feat!: Add entrypoint lookup. Fixes #8344 (#8345) - * [283f6b58f](https://github.com/argoproj/argo-workflows/commit/283f6b58f979db1747ca23753d0562a440f95908) fix: Add readiness check to controller. Fixes #8283 (#8285) - * [75b533b61](https://github.com/argoproj/argo-workflows/commit/75b533b61eebd00044f2682540f5de15d6be8fbb) chore(deps): bump github.com/spf13/viper from 1.10.1 to 1.11.0 (#8392) - * [b09b9bdfb](https://github.com/argoproj/argo-workflows/commit/b09b9bdfb132c3967b81718bbc3c6e37fb2a3a42) fix: Absolute submodules in git artifacts. Fixes #8377 (#8381) - * [d47081fb4](https://github.com/argoproj/argo-workflows/commit/d47081fb4664d3a26e802a5c3c36798108388f2f) fix: upgrade react-moment from 1.0.0 to 1.1.1 (#8389) - * [010e359e4](https://github.com/argoproj/argo-workflows/commit/010e359e4c29b1af5653c46112ad53ac9b2679be) fix: upgrade react-datepicker from 2.14.1 to 2.16.0 (#8388) - * [0c9d88b44](https://github.com/argoproj/argo-workflows/commit/0c9d88b4429ff59c656e7b78b2160a55b49976ce) fix: upgrade prop-types from 15.7.2 to 15.8.1 (#8387) - * [54fa39c89](https://github.com/argoproj/argo-workflows/commit/54fa39c897d9883cec841450808102d71bd46fa8) fix: Back-off UI retries. Fixes #5697 (#8333) - * [637d14c88](https://github.com/argoproj/argo-workflows/commit/637d14c88f7d12c1c0355d62c2d1d4b03c4934e1) fix: replace `podName` with `nodeId` in `_.primary.swagger.json` (#8385) - * [95323f87d](https://github.com/argoproj/argo-workflows/commit/95323f87d42c9cf878563bfcb11460171906684b) fix: removed error from artifact server 401 response. Fixes #8382 (#8383) - * [2d91646aa](https://github.com/argoproj/argo-workflows/commit/2d91646aafede0e5671b07b2ac6eb27a057455b1) fix: upgrade js-yaml from 3.13.1 to 3.14.1 (#8374) - * [54eaed060](https://github.com/argoproj/argo-workflows/commit/54eaed0604393106b4dde3e7d7e6ccb41a42de6b) fix: upgrade cron-parser from 2.16.3 to 2.18.0 (#8373) - * [e97b0e66b](https://github.com/argoproj/argo-workflows/commit/e97b0e66b89f131fe6a12f24c26efbb73e16ef2e) fix: Updating complated node status - * [627597b56](https://github.com/argoproj/argo-workflows/commit/627597b5616f4d22e88b89a6d7017a67b6a4143d) fix: Add auth for SDKs. Fixes #8230 (#8367) - * [55ecfeb7b](https://github.com/argoproj/argo-workflows/commit/55ecfeb7b0e300a5d5cc6027c9212365cdaf4a2b) chore(deps): bump github.com/go-openapi/jsonreference (#8363) - * [163be6d99](https://github.com/argoproj/argo-workflows/commit/163be6d99cc7ee262580196fbfd2cb9e9d7d8833) chore(deps): bump actions/download-artifact from 2 to 3 (#8360) - * [765bafb12](https://github.com/argoproj/argo-workflows/commit/765bafb12de25a7589aa1e2733786e0285290c22) chore(deps): bump actions/upload-artifact from 2 to 3 (#8361) - * [eafa10de8](https://github.com/argoproj/argo-workflows/commit/eafa10de80d31bbcf1ec030d20ecfe879ab2d171) chore(deps): bump actions/setup-go from 2 to 3 (#8362) - * [e9de085d6](https://github.com/argoproj/argo-workflows/commit/e9de085d65a94d4189a54566d99c7177c1a7d735) fix: Erratum in docs. Fixes #8342 (#8359) - * [a3d1d07e1](https://github.com/argoproj/argo-workflows/commit/a3d1d07e1cbd19039771c11aa202bd8fd68198e7) fix: upgrade react-chartjs-2 from 2.10.0 to 2.11.2 (#8357) - * [b199cb947](https://github.com/argoproj/argo-workflows/commit/b199cb9474f7b1a3303a12858a2545aa85484d28) fix: upgrade history from 4.7.2 to 4.10.1 (#8356) - * [e40521556](https://github.com/argoproj/argo-workflows/commit/e4052155679a43cf083daf0c1b3fd5d45a5fbe24) fix: upgrade multiple dependencies with Snyk (#8355) - * [8c893bd13](https://github.com/argoproj/argo-workflows/commit/8c893bd13998b7dee09d0dd0c7a292b22509ca20) fix: upgrade com.google.code.gson:gson from 2.8.9 to 2.9.0 (#8354) - * [ee3765643](https://github.com/argoproj/argo-workflows/commit/ee3765643632fa6d8dbfb528a395cbb28608e2e8) feat: add message column to `kubectl get wf` and `argo list`. Fixes #8307 (#8353) - * [ae3881525](https://github.com/argoproj/argo-workflows/commit/ae3881525ce19a029a4798ff294e1b0c982e3268) fix: examples/README.md: overriten => overridden (#8351) - * [242d53596](https://github.com/argoproj/argo-workflows/commit/242d53596a5cf23b4470c2294204030ce11b01c4) fix: Fix response type for artifact service OpenAPI and SDKs. Fixes #7781 (#8332) - * [ab21eed52](https://github.com/argoproj/argo-workflows/commit/ab21eed527d15fa2c10272f740bff7c7963891c7) fix: upgrade io.swagger:swagger-annotations from 1.6.2 to 1.6.5 (#8335) - * [f708528fb](https://github.com/argoproj/argo-workflows/commit/f708528fbdfb9adecd8a66df866820eaab9a69ea) fix: upgrade react-monaco-editor from 0.36.0 to 0.47.0 (#8339) - * [3c35bd2f5](https://github.com/argoproj/argo-workflows/commit/3c35bd2f55dfdf641882cb5f9085b0b14f6d4d93) fix: upgrade cronstrue from 1.109.0 to 1.125.0 (#8338) - * [7ee17ddb7](https://github.com/argoproj/argo-workflows/commit/7ee17ddb7804e3f2beae87a8f532b1c0e6d1e520) fix: upgrade com.squareup.okhttp3:logging-interceptor from 4.9.1 to 4.9.3 (#8336) - * [68229e37e](https://github.com/argoproj/argo-workflows/commit/68229e37e295e3861cb7f6621ee3b9c7aabf8d67) added new-line to USERS.md (#8340) - * [94472c0ba](https://github.com/argoproj/argo-workflows/commit/94472c0bad4ed92ac06efb8c28563eba7b5bd1ab) chore(deps): bump cloud.google.com/go/storage from 1.20.0 to 1.22.0 (#8341) - * [aa9ff17d5](https://github.com/argoproj/argo-workflows/commit/aa9ff17d5feaa79aa26d9dc9cf9f67533f886b1c) fix: Remove path traversal CWE-23 (#8331) - * [14a9a1dc5](https://github.com/argoproj/argo-workflows/commit/14a9a1dc57f0d83231a19e76095ebdd4711f2594) fix: ui/package.json & ui/yarn.lock to reduce vulnerabilities (#8328) - * [58052c2b7](https://github.com/argoproj/argo-workflows/commit/58052c2b7b72daa928f8d427055be01cf896ff3e) fix: sdks/java/pom.xml to reduce vulnerabilities (#8327) - * [153540fdd](https://github.com/argoproj/argo-workflows/commit/153540fdd0e3b6f00050550abed67cae16299cbe) feat: Remove binaries from argoexec image. Fixes #7486 (#8292) - * [af8077423](https://github.com/argoproj/argo-workflows/commit/af807742343cb1a76926f6a1251466b9af988a47) feat: Always Show Workflow Parameters (#7809) - * [62e0a8ce4](https://github.com/argoproj/argo-workflows/commit/62e0a8ce4e74d2e19f3a9c0fb5e52bd58a6b944b) feat: Remove the PNS executor. Fixes #7804 (#8296) - * [0cdd2b40a](https://github.com/argoproj/argo-workflows/commit/0cdd2b40a8ee2d31476f8078eaedaa16c6827a76) fix: update docker version to address CVE-2022-24921 (#8312) - * [9c901456a](https://github.com/argoproj/argo-workflows/commit/9c901456a44501f11afc2bb1e856f0d0828fd13f) fix: Default value is ignored when loading params from configmap. Fixes #8262 (#8271) - * [9ab0e959a](https://github.com/argoproj/argo-workflows/commit/9ab0e959ac497433bcee2bb9c8d5710f87f1e3ea) fix: reduce number of workflows displayed in UI by default. Fixes #8297 (#8303) - * [13bc01362](https://github.com/argoproj/argo-workflows/commit/13bc013622c3b681bbd3c334dce0eea6870fcfde) fix: fix: git artifact will be checked out even if local file matches name of tracking branch (#8287) - * [65dc0882c](https://github.com/argoproj/argo-workflows/commit/65dc0882c9bb4496f1c4b2e0deb730e775724c82) feat: Fail on invalid config. (#8295) - * [5ac0e314d](https://github.com/argoproj/argo-workflows/commit/5ac0e314da80667e8b3b355c55cf9e1ab9b57b34) fix: `taskresults` owned by pod rather than workflow. (#8284) - * [996655f4f](https://github.com/argoproj/argo-workflows/commit/996655f4f3f03a30bcb82a1bb03f222fd100b8e0) fix: Snyk security recommendations (Golang). Fixes #8288 - * [221d99827](https://github.com/argoproj/argo-workflows/commit/221d9982713ca30c060955bb35b48af3143c3754) fix: Snyk security recommendations (Node). Fixes #8288 - * [b55dead05](https://github.com/argoproj/argo-workflows/commit/b55dead055139d1de33c464beed2b5ef596f5c8e) Revert "build: Enable governance bot. Fixes #8256 (#8259)" (#8294) - * [e50ec699c](https://github.com/argoproj/argo-workflows/commit/e50ec699cb33a7b84b0cb3c5b99396fe5365facd) chore(deps): bump google.golang.org/api from 0.73.0 to 0.74.0 (#8281) - * [954a3ee7e](https://github.com/argoproj/argo-workflows/commit/954a3ee7e7cc4f02074c07f7add971ca2be3291e) fix: install.yaml missing crb subject ns (#8280) - * [a3c326fdf](https://github.com/argoproj/argo-workflows/commit/a3c326fdf0d2133d5e78ef71854499f576e7e530) Remove hardcoded namespace in kustomize file #8250 (#8266) - * [b198b334d](https://github.com/argoproj/argo-workflows/commit/b198b334dfdb8e77d2ee51cd05b0716a29ab9169) fix: improve error message when the controller is set `templateReferencing: Secure` (#8277) - * [5598b8c7f](https://github.com/argoproj/argo-workflows/commit/5598b8c7fb5d17015e5c941e09953a74d8931436) feat: add resubmit and retry buttons for archived workflows. Fixes #7908 and #7911 (#8272) - * [6975607fa](https://github.com/argoproj/argo-workflows/commit/6975607fa33bf39e752b9cefcb8cb707a46bc6d4) chore(deps): bump github.com/prometheus/common from 0.32.1 to 0.33.0 (#8274) - * [78f01f2b9](https://github.com/argoproj/argo-workflows/commit/78f01f2b9f24a89db15a119885dfe8eb6420c70d) fix: patch workflow status to workflow (#8265) - * [f48998c07](https://github.com/argoproj/argo-workflows/commit/f48998c070c248688d996e5c8a4fec7601f5ab53) feat: Add a link in the UI for WorkflowTemplate. Fixes #4760 (#8208) - * [f02d4b72a](https://github.com/argoproj/argo-workflows/commit/f02d4b72adea9fbd23880c70871f92d66dc183c7) chore(deps): bump github.com/argoproj-labs/argo-dataflow (#8264) - * [48202fe99](https://github.com/argoproj/argo-workflows/commit/48202fe9976ff39731cf73c03578081a10146596) chore(deps): bump dependabot/fetch-metadata from 1.1.1 to 1.3.0 (#8263) - * [f00ec49d6](https://github.com/argoproj/argo-workflows/commit/f00ec49d695bdad108000abcdfd0f82f6af9ca6c) feat!: Refactor/simplify configuration code (#8235) - * [c1f72b662](https://github.com/argoproj/argo-workflows/commit/c1f72b66282012e712e28a715c08dddb1a556c16) feat: add archive retry command to argo CLI. Fixes #7907 (#8229) - * [7a07805b1](https://github.com/argoproj/argo-workflows/commit/7a07805b183d598847bb9323f1009d7e8bbc1ac6) fix: Update argo-server manifests to have read-only root file-system (#8210) - * [0d4b4dc34](https://github.com/argoproj/argo-workflows/commit/0d4b4dc34127a27f7ca6e5c41197f3aaacc79cb8) fix: Panic in Workflow Retry (#8243) - * [61f0decd8](https://github.com/argoproj/argo-workflows/commit/61f0decd873a6a422c3a7159d6023170637338ff) fix: Hook with wftemplateRef (#8242) - * [e232340cc](https://github.com/argoproj/argo-workflows/commit/e232340cc5191c5904afe87f03c80545bb10e430) fix: grep pattern (#8238) - * [1d373c41a](https://github.com/argoproj/argo-workflows/commit/1d373c41afbebcf8de55114582693bcbdc59b342) fix: submodule cloning via git. Fixes #7469 (#8225) - * [6ee1b03f9](https://github.com/argoproj/argo-workflows/commit/6ee1b03f9e83c1e129b45a6bc9292a99add6b36e) fix: do not panic when termination-log is not writeable (#8221) - * [cae38894f](https://github.com/argoproj/argo-workflows/commit/cae38894f96b0d33cde54ef9cdee3cda53692a8d) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk (#8232) - * [e0e45503e](https://github.com/argoproj/argo-workflows/commit/e0e45503e6704b27e3e9ef0ff4a98169f3b072fa) chore(deps): bump peter-evans/create-pull-request from 3 to 4 (#8216) - * [8c77e89fc](https://github.com/argoproj/argo-workflows/commit/8c77e89fc185ff640e1073692dfc7c043037440a) feat: add archive resubmit command to argo CLI. Fixes #7910 (#8166) - * [d8aa46731](https://github.com/argoproj/argo-workflows/commit/d8aa46731c74730ccca1a40187109a63a675618b) fix: Support `--parameters-file` where ARGO_SERVER specified. Fixes #8160 (#8213) - * [d33d391a4](https://github.com/argoproj/argo-workflows/commit/d33d391a4c06c136b6a0964a51c75850323684e6) feat: Add support to auto-mount service account tokens for plugins. (#8176) - * [8a1fbb86e](https://github.com/argoproj/argo-workflows/commit/8a1fbb86e7c83bf14990805166d04d5cb4479ea3) fix: removed deprecated k8sapi executor. Fixes #7802 (#8205) - * [4d5079822](https://github.com/argoproj/argo-workflows/commit/4d5079822da17fd644a99a9e4b27259864ae8c36) chore(deps): bump actions/cache from 2 to 3 (#8206) - * [12cd8bcaa](https://github.com/argoproj/argo-workflows/commit/12cd8bcaa75381b5a9fa65aff03ac13aec706375) fix: requeue not delete the considererd Task flag (#8194) - * [e2b288318](https://github.com/argoproj/argo-workflows/commit/e2b288318b15fa3e3cdc38c3dc7e66774920be8d) fix: Use `latest` image tag when version is `untagged`. Fixes #8188 (#8191) - * [6d6d23d81](https://github.com/argoproj/argo-workflows/commit/6d6d23d8110165331d924e97b01d5e26214c72db) fix: task worker requeue wrong task. Fixes #8139 (#8186) - * [41fd07aa4](https://github.com/argoproj/argo-workflows/commit/41fd07aa4f8462d70ad3c2c0481d5e09ae97b612) fix: Update `workflowtaskresult` code have own reconciliation loop. (#8135) - * [051c7b8d2](https://github.com/argoproj/argo-workflows/commit/051c7b8d2baf50b55e8076a1e09e7340551c04c1) fix: pkg/errors is no longer maintained (#7440) - * [fbb43b242](https://github.com/argoproj/argo-workflows/commit/fbb43b2429e45346221a119583aac11df4b5f880) fix: workflow.duration' is not available as a real time metric (#8181) - * [0e707cdf6](https://github.com/argoproj/argo-workflows/commit/0e707cdf69f891c7c7483e2244f5ea930d31b1c5) fix: Authentication for plugins. Fixes #8144 (#8147) - * [d4b1afe6f](https://github.com/argoproj/argo-workflows/commit/d4b1afe6f68afc3061a924186fa09556290ec3e1) feat: add retry API for archived workflows. Fixes #7906 (#7988) - * [e7008eada](https://github.com/argoproj/argo-workflows/commit/e7008eada7a885d80952b5184562a29508323c2a) fix: Correctly order emissary combined output. Fixes #8159 (#8175) - * [9101c4939](https://github.com/argoproj/argo-workflows/commit/9101c49396fe95d62ef3040cd4d330fde9f35554) fix: Add instance ID to `workflowtaskresults` (#8150) - * [2b5e4a1d2](https://github.com/argoproj/argo-workflows/commit/2b5e4a1d2df7877d9b7b7fbedd7136a125a39c8d) feat: Use pinned executor version. (#8165) - * [715f6ced6](https://github.com/argoproj/argo-workflows/commit/715f6ced6f42c0b7b5994bf8d16c561f48025fe8) fix: add /etc/mime.types mapping table (#8171) - * [6d6e08aa8](https://github.com/argoproj/argo-workflows/commit/6d6e08aa826c406a912387ac438ec20428c7623d) fix: Limit workflows to 128KB and return a friendly error message (#8169) - * [057c3346f](https://github.com/argoproj/argo-workflows/commit/057c3346f9f792cf10888320c4297b09f3c11e2e) feat: add TLS config option to HTTP template. Fixes #7390 (#7929) - * [013fa2578](https://github.com/argoproj/argo-workflows/commit/013fa2578bc5cace4de754daef04448b30faae32) chore(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 (#8163) - * [ad341c4af](https://github.com/argoproj/argo-workflows/commit/ad341c4af1645c191a5736d91d78a19acc7b2fa7) chore(deps): bump google.golang.org/api from 0.72.0 to 0.73.0 (#8162) - * [5efc9fc99](https://github.com/argoproj/argo-workflows/commit/5efc9fc995ac898672a575b514f8bfc83b220c4c) feat: add mysql options (#8157) - * [cda5737c3](https://github.com/argoproj/argo-workflows/commit/cda5737c37e3ab7c381869d7d820de71285f55a5) chore(deps): bump google.golang.org/api from 0.71.0 to 0.72.0 (#8156) - * [be2dd19a0](https://github.com/argoproj/argo-workflows/commit/be2dd19a0718577348823f1f68b82dbef8d95959) Update USERS.md (#8132) - * [af26ff7ed](https://github.com/argoproj/argo-workflows/commit/af26ff7ed54d4fe508edac34f82fe155f2d54a9d) fix: Remove need for `get pods` from Emissary (#8133) - * [537dd3be6](https://github.com/argoproj/argo-workflows/commit/537dd3be6bf93be37e06d768d9a610038eafb361) feat: Change pod clean-up to use informer. (#8136) - * [1d71fb3c4](https://github.com/argoproj/argo-workflows/commit/1d71fb3c4ebdb2891435ed12257743331ff34436) chore(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 (#8131) - * [972a4e989](https://github.com/argoproj/argo-workflows/commit/972a4e98987296a844a28dce31162d59732e6532) fix(plugins): UX improvements (#8122) - * [437b37647](https://github.com/argoproj/argo-workflows/commit/437b3764783b48a304034cc4291472c6e490689b) feat: add resubmit API for archived workflows. Fixes #7909 (#8079) - * [707cf8321](https://github.com/argoproj/argo-workflows/commit/707cf8321ccaf98b4596695fdbfdb04faf9a9487) update kustomize/kubectl installation (#8095) - * [48348247f](https://github.com/argoproj/argo-workflows/commit/48348247f0a0fd949871a9f982d7ee70c39509a1) chore(deps): bump google.golang.org/api from 0.70.0 to 0.71.0 (#8108) - * [765333dc9](https://github.com/argoproj/argo-workflows/commit/765333dc95575608fdf87328c7548c5e349b557d) fix(executor): Retry kubectl on internal transient error (#8092) - * [4d4890454](https://github.com/argoproj/argo-workflows/commit/4d4890454e454acbc86cef039bb6905c63f79e73) fix: Fix the TestStopBehavior flackiness (#8096) - * [6855f4c51](https://github.com/argoproj/argo-workflows/commit/6855f4c51b5bd667599f072ae5ddde48967006f1) fix: pod deleted due to delayed cleanup. Fixes #8022 (#8061) - -### Contributors - - * Aatman - * Adam Eri - * Alex Collins - * BOOK - * Basanth Jenu H B - * Brian Loss - * Cash Williams - * Clemens Lange - * Dakota Lillie - * Dana Pieluszczak - * Dillen Padhiar - * Doğukan - * Ezequiel Muns - * Felix Seidel - * Fernando Luís da Silva - * Gaurav Gupta - * Grzegorz Bielski - * Hao Xin - * Iain Lane - * Isitha Subasinghe - * Iván Sánchez - * JasonZhu - * Jessie Teng - * Juan Luis Cano Rodríguez - * Julie Vogelman - * Kesavan - * LoricAndre - * Manik Sidana - * Marc Abramowitz - * Mark Shields - * Markus Lippert - * Michael Goodness - * Michael Weibel - * Mike Tougeron - * Ming Yu Shi - * Miroslav Boussarov - * Noam Gal - * Philippe Richard - * Rohan Kumar - * Sanjay Tiwari - * Saravanan Balasubramanian - * Shubham Nazare - * Snyk bot - * Soumya Ghosh Dastidar - * Stephanie Palis - * Swarnim Pratap Singh - * Takumi Sue - * Tianchu Zhao - * Timo Pagel - * Tristan Colgate-McFarlane - * Tuan - * Vignesh - * William Van Hevelingen - * Wu Jayway - * Yuan Tang - * alexdittmann - * dependabot[bot] - * hadesy - * ibuder - * kennytrytek - * lijie - * mihirpandya-greenops - * momom-i - * shirou - * smile-luobin - * tatsuya-ogawa - * tculp - * ybyang - * İnanç Dokurel - -## v3.3.9 (2022-08-09) - - * [5db53aa0c](https://github.com/argoproj/argo-workflows/commit/5db53aa0ca54e51ca69053e1d3272e37064559d7) Revert "fix: Correct kill command. Fixes #8687 (#8908)" - * [b7b37d5aa](https://github.com/argoproj/argo-workflows/commit/b7b37d5aa2229c09365735fab165b4876c30aa4a) fix: Skip TestRunAsNonRootWithOutputParams - * [e4dca01f1](https://github.com/argoproj/argo-workflows/commit/e4dca01f1a76cefb7cae944ba0c4e54bc0aec427) fix: SignalsSuite test - * [151432f9b](https://github.com/argoproj/argo-workflows/commit/151432f9b754981959e149202d5f4b0617064595) fix: add containerRuntimeExecutor: emissary in ci - * [a3d6a58a7](https://github.com/argoproj/argo-workflows/commit/a3d6a58a71e1603077a4b39c4368d11847d500fb) feat: refactoring e2e test timeouts to support multiple environments. (#8925) - * [f9e2dd21c](https://github.com/argoproj/argo-workflows/commit/f9e2dd21cb09ac90b639be0f97f07da373240202) fix: lint - * [ef3fb421f](https://github.com/argoproj/argo-workflows/commit/ef3fb421f02f96195046ba327beca7b08753530b) fix: Correct kill command. Fixes #8687 (#8908) - * [e85c815a1](https://github.com/argoproj/argo-workflows/commit/e85c815a10fb59cb95cfdf6d2a171cea7c6aec47) fix: set NODE_OPTIONS to no-experimental-fetch to prevent yarn start error (#8802) - * [a19c94bb6](https://github.com/argoproj/argo-workflows/commit/a19c94bb6639540f309883ff0f41b14dd557324b) fix: Omitted task result should also be valid (#8776) - * [15f9d5227](https://github.com/argoproj/argo-workflows/commit/15f9d52270af4bca44553755d095d2dd8badfa14) fix: Fixed podName in killing daemon pods. Fixes #8692 (#8708) - * [6ec0ca088](https://github.com/argoproj/argo-workflows/commit/6ec0ca0883cf4e2222176ab413b3318017a30796) fix: open minio dashboard on different port in quick-start (#8407) - * [d874c1a87](https://github.com/argoproj/argo-workflows/commit/d874c1a87b65b300b2a4c93032bd2970d6f91d8f) fix: ui/package.json & ui/yarn.lock to reduce vulnerabilities (#8328) - * [481137c25](https://github.com/argoproj/argo-workflows/commit/481137c259b05c6a5b3c0e3adab1649c2b512364) fix: sdks/java/pom.xml to reduce vulnerabilities (#8327) - * [f54fb5c24](https://github.com/argoproj/argo-workflows/commit/f54fb5c24dd52a64da6d5aad5972a6554e386769) fix: grep pattern (#8238) - * [73334cae9](https://github.com/argoproj/argo-workflows/commit/73334cae9fbaef96b63889e16a3a2f78c725995e) fix: removed deprecated k8sapi executor. Fixes #7802 (#8205) - * [9c9efa67f](https://github.com/argoproj/argo-workflows/commit/9c9efa67f38620eeb08d1a9d2bb612bf14bf33de) fix: retryStrategy.Limit is now read properly for backoff strategy. Fixes #9170. (#9213) - * [69b5f1d79](https://github.com/argoproj/argo-workflows/commit/69b5f1d7945247a9e219b53f12fb8b3eec6e5e52) fix: Add missing Go module entries - -### Contributors - - * Alex Collins - * Dillen Padhiar - * Grzegorz Bielski - * Julie Vogelman - * Kesavan - * Rohan Kumar - * Saravanan Balasubramanian - * Snyk bot - * Takumi Sue - * Yuan Tang - -## v3.3.8 (2022-06-23) - - * [621b0d1a8](https://github.com/argoproj/argo-workflows/commit/621b0d1a8e09634666ebe403ee7b8fc29db1dc4e) fix: check for nil, and add logging to expose root cause of panic in Issue 8968 (#9010) - * [b7c218c0f](https://github.com/argoproj/argo-workflows/commit/b7c218c0f7b3ea0035dc44ccc9e8416f30429d16) feat: log workflow size before hydrating/dehydrating. Fixes #8976 (#8988) - -### Contributors - - * Dillen Padhiar - * Julie Vogelman - -## v3.3.7 (2022-06-20) - - * [479763c04](https://github.com/argoproj/argo-workflows/commit/479763c04036db98cd1e9a7a4fc0cc932affb8bf) fix: Skip TestExitHookWithExpression() completely (#8761) - * [a1ba42140](https://github.com/argoproj/argo-workflows/commit/a1ba42140154e757b024fe29c61fc7043c741cee) fix: Template in Lifecycle hook should be optional (#8735) - * [f10d6238d](https://github.com/argoproj/argo-workflows/commit/f10d6238d83b410a461d1860d0bb3c7ae4d74383) fix: Simplify return logic in executeTmplLifeCycleHook (#8736) - * [f2ace043b](https://github.com/argoproj/argo-workflows/commit/f2ace043bb7d050e8d539a781486c9f932bca931) fix: Exit lifecycle hook should respect expression. Fixes #8742 (#8744) - * [8c0b43569](https://github.com/argoproj/argo-workflows/commit/8c0b43569bb3e9c9ace21afcdd89d2cec862939c) fix: long code blocks overflow in ui. Fixes #8916 (#8947) - * [1d26628b8](https://github.com/argoproj/argo-workflows/commit/1d26628b8bc5f5a4d90d7a31b6f8185f280a4538) fix: sync cluster Workflow Template Informer before it's used (#8961) - * [4d9f8f7c8](https://github.com/argoproj/argo-workflows/commit/4d9f8f7c832ff888c11a41dad7a755ef594552c7) fix: Workflow Duration metric shouldn't increase after workflow complete (#8989) - * [72e0c6f00](https://github.com/argoproj/argo-workflows/commit/72e0c6f006120f901f02ea3a6bf8b3e7f639eb48) fix: add nil check for retryStrategy.Limit in deadline check. Fixes #8990 (#8991) - -### Contributors - - * Dakota Lillie - * Dillen Padhiar - * Julie Vogelman - * Saravanan Balasubramanian - * Yuan Tang - -## v3.3.6 (2022-05-25) - - * [2b428be80](https://github.com/argoproj/argo-workflows/commit/2b428be8001a9d5d232dbd52d7e902812107eb28) fix: Handle omitted nodes in DAG enhanced depends logic. Fixes #8654 (#8672) - * [7889af614](https://github.com/argoproj/argo-workflows/commit/7889af614c354f4716752942891cbca0a0889df0) fix: close http body. Fixes #8622 (#8624) - * [622c3d594](https://github.com/argoproj/argo-workflows/commit/622c3d59467a2d0449717ab866bd29bbd0469795) fix: Do not log container not found (#8509) - * [7091d8003](https://github.com/argoproj/argo-workflows/commit/7091d800360ad940ec605378324909823911d853) fix: pkg/errors is no longer maintained (#7440) - * [3f4c79fa5](https://github.com/argoproj/argo-workflows/commit/3f4c79fa5f54edcb50b6003178af85c70b5a8a1f) feat: remove size limit of 128kb for workflow templates. Fixes #8789 (#8796) - -### Contributors - - * Alex Collins - * Dillen Padhiar - * Stephanie Palis - * Yuan Tang - * lijie - -## v3.3.5 (2022-05-03) - - * [93cb050e3](https://github.com/argoproj/argo-workflows/commit/93cb050e3933638f0dbe2cdd69630e133b3ad52a) Revert "fix: Pod `OOMKilled` should fail workflow. Fixes #8456 (#8478)" - * [29f3ad844](https://github.com/argoproj/argo-workflows/commit/29f3ad8446ac5f07abda0f6844f3a31a7d50eb23) fix: Added artifact Content-Security-Policy (#8585) - * [a40d27cd7](https://github.com/argoproj/argo-workflows/commit/a40d27cd7535f6d36d5fb8d10cea0226b784fa65) fix: Support memoization on plugin node. Fixes #8553 (#8554) - * [f2b075c29](https://github.com/argoproj/argo-workflows/commit/f2b075c29ee97c95cfebb453b18c0ce5f16a5f04) fix: Pod `OOMKilled` should fail workflow. Fixes #8456 (#8478) - * [ba8c60022](https://github.com/argoproj/argo-workflows/commit/ba8c600224b7147d1832de1bea694fd376570ae9) fix: prevent backoff when retryStrategy.limit has been reached. Fixes #7588 (#8090) - * [c17f8c71d](https://github.com/argoproj/argo-workflows/commit/c17f8c71d40d4e34ef0a87dbc95eda005a57dc39) fix: update docker version to address CVE-2022-24921 (#8312) - * [9d0b7aa56](https://github.com/argoproj/argo-workflows/commit/9d0b7aa56cf065bf70c2cfb43f71ea9f92b5f964) fix: Default value is ignored when loading params from configmap. Fixes #8262 (#8271) - * [beab5b6ef](https://github.com/argoproj/argo-workflows/commit/beab5b6ef40a187e90ff23294bb1d9e2db9cb90a) fix: install.yaml missing crb subject ns (#8280) - * [b0d8be2ef](https://github.com/argoproj/argo-workflows/commit/b0d8be2ef3d3c1c96b15aeda572fcd1596fca9f1) fix: requeue not delete the considererd Task flag (#8194) - -### Contributors - - * Alex Collins - * Cash Williams - * Rohan Kumar - * Soumya Ghosh Dastidar - * Wu Jayway - * Yuan Tang - * ybyang - -## v3.3.4 (2022-04-29) - - * [02fb874f5](https://github.com/argoproj/argo-workflows/commit/02fb874f5deb3fc3e16f033c6f60b10e03504d00) feat: add capability to choose params in suspend node.Fixes #8425 (#8472) - * [32b1b3a3d](https://github.com/argoproj/argo-workflows/commit/32b1b3a3d505dea1d42fdeb0104444ca4f5e5795) feat: Add support to auto-mount service account tokens for plugins. (#8176) - -### Contributors - - * Alex Collins - * Basanth Jenu H B - -## v3.3.3 (2022-04-25) - - * [9c08aedc8](https://github.com/argoproj/argo-workflows/commit/9c08aedc880026161d394207acbac0f64db29a53) fix: Revert controller readiness changes. Fixes #8441 (#8454) - * [9854dd3fc](https://github.com/argoproj/argo-workflows/commit/9854dd3fccccd34bf3e4f110412dbd063f3316c2) fix: PodGC works with WorkflowTemplate. Fixes #8448 (#8452) - -### Contributors - - * Alex Collins - -## v3.3.2 (2022-04-20) - - * [35492a170](https://github.com/argoproj/argo-workflows/commit/35492a1700a0f279694cac874b6d9c07a08265d1) fix: Remove binaries from Windows image. Fixes #8417 (#8420) - * [bfc3b6cad](https://github.com/argoproj/argo-workflows/commit/bfc3b6cad02c0a38141201d7f77e14e3f0e637a4) fix: Skip TestRunAsNonRootWithOutputParams - * [1c34f9801](https://github.com/argoproj/argo-workflows/commit/1c34f9801b502d1566064726145ce5d68124b213) fix: go.sum - * [be35b54b0](https://github.com/argoproj/argo-workflows/commit/be35b54b00e44339f8dcb63d0411bc80f8983764) fix: create cache lint - * [017a31518](https://github.com/argoproj/argo-workflows/commit/017a3151837ac05cca1b2425a8395d547d86ed09) fix: create cache lint - * [20d601b3d](https://github.com/argoproj/argo-workflows/commit/20d601b3dd2ebef102a1a610e4dbef6924f842ff) fix: create cache lint - * [d8f28586f](https://github.com/argoproj/argo-workflows/commit/d8f28586f82b1bdb9e43446bd1792b3b01b2928a) fix: empty push - * [f41d94e91](https://github.com/argoproj/argo-workflows/commit/f41d94e91648961dfdc6e8536768012569dcd28f) fix: codegen - * [ce195dd52](https://github.com/argoproj/argo-workflows/commit/ce195dd521e195df4edd96bcd27fd950f23ff611) fix: Add auth for SDKs. Fixes #8230 (#8367) - * [00c960619](https://github.com/argoproj/argo-workflows/commit/00c9606197c30c138714b27ca5624dd0272c662d) fix: unittest - * [a0148c1b3](https://github.com/argoproj/argo-workflows/commit/a0148c1b32fef820a0cde5a5fed1975abedb7f82) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.23 to 7.0.24 (#8397) - * [5207d287b](https://github.com/argoproj/argo-workflows/commit/5207d287b5657d9049edd1b67c2b681a13c40420) fix: codegen - * [e68e06c34](https://github.com/argoproj/argo-workflows/commit/e68e06c3453453d70a76c08b1a6cb00635b2d941) fix: Daemon step updated 'pod delete' while pod is running (#8399) - * [b9f8b3587](https://github.com/argoproj/argo-workflows/commit/b9f8b3587345eda47edfaebb7bc18ea1193d430b) fix: Add readiness check to controller. Fixes #8283 (#8285) - * [ed26dc0a0](https://github.com/argoproj/argo-workflows/commit/ed26dc0a09bc38ac2366124621ea98918b95b34a) fix: Absolute submodules in git artifacts. Fixes #8377 (#8381) - * [6f77c0af0](https://github.com/argoproj/argo-workflows/commit/6f77c0af03545611dfef0222bcf5f5f76f30f4d4) fix: Back-off UI retries. Fixes #5697 (#8333) - * [8d5c2f2a3](https://github.com/argoproj/argo-workflows/commit/8d5c2f2a39033972e1f389029f5c08290aa19ccd) fix: replace `podName` with `nodeId` in `_.primary.swagger.json` (#8385) - * [a327edd5a](https://github.com/argoproj/argo-workflows/commit/a327edd5a5c5e7aff4c64131f1a9c3d9e5d9d3eb) fix: removed error from artifact server 401 response. Fixes #8382 (#8383) - * [502cf6d88](https://github.com/argoproj/argo-workflows/commit/502cf6d882ac51fd80950c2f25f90e491b3f13f6) fix: Updating complated node status - * [0a0956864](https://github.com/argoproj/argo-workflows/commit/0a09568648199fcc5a8997e4f5eed55c40bfa974) fix: Fix response type for artifact service OpenAPI and SDKs. Fixes #7781 (#8332) - * [a3bce2aaf](https://github.com/argoproj/argo-workflows/commit/a3bce2aaf94b07a73c3a7a4c9205872be7dc360c) fix: patch workflow status to workflow (#8265) - * [c5174fbee](https://github.com/argoproj/argo-workflows/commit/c5174fbeec69aa0ea4dbad8b239b7e46c76e5873) fix: Update argo-server manifests to have read-only root file-system (#8210) - * [ba795e656](https://github.com/argoproj/argo-workflows/commit/ba795e6562902d66adadd15554f791bc85b779a8) fix: Panic in Workflow Retry (#8243) - * [c95de6bb2](https://github.com/argoproj/argo-workflows/commit/c95de6bb25b8d7294f8f48490fccb2ba95d96f9b) fix: Hook with wftemplateRef (#8242) - * [187c21fa7](https://github.com/argoproj/argo-workflows/commit/187c21fa7b45d87c55dd71f247e439f6c9b776b3) fix: submodule cloning via git. Fixes #7469 (#8225) - * [289d44b9b](https://github.com/argoproj/argo-workflows/commit/289d44b9b0234baf24f1384a0b6743ca10bfb060) fix: do not panic when termination-log is not writeable (#8221) - * [c10ba38a8](https://github.com/argoproj/argo-workflows/commit/c10ba38a86eb2ba4e70812b172a02bea901073f1) fix: Support `--parameters-file` where ARGO_SERVER specified. Fixes #8160 (#8213) - * [239781109](https://github.com/argoproj/argo-workflows/commit/239781109e62e405a6596e88c706df21cf152a6e) fix: Use `latest` image tag when version is `untagged`. Fixes #8188 (#8191) - * [7d00fa9d9](https://github.com/argoproj/argo-workflows/commit/7d00fa9d94427e5b30bea3c3bd7fecd673b95870) fix: task worker requeue wrong task. Fixes #8139 (#8186) - * [ed6907f1c](https://github.com/argoproj/argo-workflows/commit/ed6907f1cafb1cd53a877c1bdebbf0497ab53278) fix: Authentication for plugins. Fixes #8144 (#8147) - * [5ff9bc9aa](https://github.com/argoproj/argo-workflows/commit/5ff9bc9aaba80db7833d513321bb6ae2d305f1f9) fix: Correctly order emissary combined output. Fixes #8159 (#8175) - * [918c27311](https://github.com/argoproj/argo-workflows/commit/918c273113ed14349c8df87d727a5b8070d301a1) fix: Add instance ID to `workflowtaskresults` (#8150) - * [af0cfab8f](https://github.com/argoproj/argo-workflows/commit/af0cfab8f3bd5b62ebe967381fed0bccbd7c7ada) fix: Update `workflowtaskresult` code have own reconciliation loop. (#8135) - * [3a425ec5a](https://github.com/argoproj/argo-workflows/commit/3a425ec5a1010e9b9ac2aac054095e5e9d240693) fix: Authentication for plugins. Fixes #8144 (#8147) - * [cdd1633e4](https://github.com/argoproj/argo-workflows/commit/cdd1633e428d8596467e7673d0d6d5c50ade41af) fix: Correctly order emissary combined output. Fixes #8159 (#8175) - * [22c203fc4](https://github.com/argoproj/argo-workflows/commit/22c203fc44a005e4207fff5b8ce7f4854ed0bf78) fix: Add instance ID to `workflowtaskresults` (#8150) - * [79a9a5b6f](https://github.com/argoproj/argo-workflows/commit/79a9a5b6fcca7953e740a5e171d3bc7f08953854) fix: improve error message when the controller is set `templateReferencing: Secure` (#8277) - * [7e880216a](https://github.com/argoproj/argo-workflows/commit/7e880216a1bf384d15d836877d170bbeea19814d) fix: `taskresults` owned by pod rather than workflow. (#8284) - * [347583132](https://github.com/argoproj/argo-workflows/commit/347583132916fd2f87b3885381fe86281ea3ec33) fix: fix: git artifact will be checked out even if local file matches name of tracking branch (#8287) - * [aa460b9ad](https://github.com/argoproj/argo-workflows/commit/aa460b9adc40ed4854dc373d0d755e6d36b633f8) fix: reduce number of workflows displayed in UI by default. Fixes #8297 (#8303) - -### Contributors - - * Aatman - * Alex Collins - * Dillen Padhiar - * Markus Lippert - * Michael Weibel - * Rohan Kumar - * Saravanan Balasubramanian - * Takumi Sue - * Tristan Colgate-McFarlane - * Wu Jayway - * dependabot[bot] - -## v3.3.1 (2022-03-18) - - * [76ff748d4](https://github.com/argoproj/argo-workflows/commit/76ff748d41c67e1a38ace1352ca3bab8d7ec8a39) feat: add TLS config option to HTTP template. Fixes #7390 (#7929) - * [4c61c8df2](https://github.com/argoproj/argo-workflows/commit/4c61c8df2a3fcbe7abbc04dba34f59d270fe15f3) fix(executor): Retry kubectl on internal transient error (#8092) - * [47b78d4c4](https://github.com/argoproj/argo-workflows/commit/47b78d4c473c5e6e6301181bff298f32456288bd) fix(plugins): UX improvements (#8122) - * [ad7d9058e](https://github.com/argoproj/argo-workflows/commit/ad7d9058ed025481051c8545f26954f87463526f) fix: Authentication for plugins. Fixes #8144 (#8147) - * [5b14e15c2](https://github.com/argoproj/argo-workflows/commit/5b14e15c216995ca72fa5c7fc174913506fbdcd9) feat: add TLS config option to HTTP template. Fixes #7390 (#7929) - * [4e543f268](https://github.com/argoproj/argo-workflows/commit/4e543f268246afd2dcfc309f3d29d3c052ebeef4) fix(plugins): UX improvements (#8122) - * [845a244c7](https://github.com/argoproj/argo-workflows/commit/845a244c71129aa843d06a26d89aeec6da6c57d7) fix(executor): Retry kubectl on internal transient error (#8092) - * [ea36c337d](https://github.com/argoproj/argo-workflows/commit/ea36c337d8805534c3f358d1b44b2f1e50c8141a) fix: workflow.duration' is not available as a real time metric (#8181) - * [d10a7310c](https://github.com/argoproj/argo-workflows/commit/d10a7310c08273209b01c55d325e77407ee5f75c) fix: Correctly order emissary combined output. Fixes #8159 (#8175) - * [442096bf2](https://github.com/argoproj/argo-workflows/commit/442096bf2e893e5034fd0120889244ad6a50387c) fix: Add instance ID to `workflowtaskresults` (#8150) - * [2b87f860d](https://github.com/argoproj/argo-workflows/commit/2b87f860d1dc4007c799337f02101ead89297a11) fix: add /etc/mime.types mapping table (#8171) - * [26471c8ee](https://github.com/argoproj/argo-workflows/commit/26471c8ee2895a275ff3a180e6b92545e7c2dfee) fix: Limit workflows to 128KB and return a friendly error message (#8169) - * [dfca6f1e5](https://github.com/argoproj/argo-workflows/commit/dfca6f1e57eea85e1994a8e39ac56421a1cb466d) fix: Remove need for `get pods` from Emissary (#8133) - * [049d3d11f](https://github.com/argoproj/argo-workflows/commit/049d3d11f3d1e10a4b1b1edddea60030abb80e0b) fix: Fix the TestStopBehavior flackiness (#8096) - * [0cec27390](https://github.com/argoproj/argo-workflows/commit/0cec27390b55bace1c66da8cf7a24194b4ee0c09) fix: pod deleted due to delayed cleanup. Fixes #8022 (#8061) - -### Contributors - - * Alex Collins - * Felix Seidel - * Ming Yu Shi - * Rohan Kumar - * Saravanan Balasubramanian - * Vignesh - * William Van Hevelingen - * Wu Jayway - -## v3.3.0 (2022-03-14) - - -### Contributors - - -## v3.3.0-rc10 (2022-03-07) - - * [e6b3ab548](https://github.com/argoproj/argo-workflows/commit/e6b3ab548d1518630954205c6e2ef0f18e74dcf9) fix: Use EvalBool instead of explicit casting (#8094) - * [6640689e3](https://github.com/argoproj/argo-workflows/commit/6640689e36918d3c24b2af8317d0fdadba834770) fix: e2e TestStopBehavior (#8082) - -### Contributors - - * Saravanan Balasubramanian - * Simon Behar - -## v3.3.0-rc9 (2022-03-04) - - * [4decbea99](https://github.com/argoproj/argo-workflows/commit/4decbea991e49313624a3dc71eb9aadb906e82c8) fix: test - * [e2c53e6b9](https://github.com/argoproj/argo-workflows/commit/e2c53e6b9a3194353874b9c22e61696ca228cd24) fix: lint - * [5d8651d5c](https://github.com/argoproj/argo-workflows/commit/5d8651d5cc65cede4f186dd9d99c5f1b644d5f56) fix: e2e - * [4a2b2bd02](https://github.com/argoproj/argo-workflows/commit/4a2b2bd02b3a62daf61987502077877bbdb4bcca) fix: Make workflow.status available to template level (#8066) - * [baa51ae5d](https://github.com/argoproj/argo-workflows/commit/baa51ae5d74b53b8e54ef8d895eae36b9b50375b) feat: Expand `mainContainer` config to support all fields. Fixes #7962 (#8062) - * [cedfb1d9a](https://github.com/argoproj/argo-workflows/commit/cedfb1d9ab7a7cc58c9032dd40509dc34666b3e9) fix: Stop the workflow if activeDeadlineSeconds has beed patched (#8065) - * [662a7295b](https://github.com/argoproj/argo-workflows/commit/662a7295b2e263f001b94820ebde483fcf7f038d) feat: Replace `patch pod` with `create workflowtaskresult`. Fixes #3961 (#8000) - * [9aa04a149](https://github.com/argoproj/argo-workflows/commit/9aa04a1493c01782ed51b01c733ca6993608ea5b) feat: Remove plugin Kube API access by default. (#8028) - * [cc80219db](https://github.com/argoproj/argo-workflows/commit/cc80219db6fd2be25088593f54c0d55aec4fe1e7) chore(deps): bump actions/checkout from 2 to 3 (#8049) - * [f9c7ab58e](https://github.com/argoproj/argo-workflows/commit/f9c7ab58e20fda8922fa00e9d468bda89031887a) fix: directory traversal vulnerability (#7187) - * [931cbbded](https://github.com/argoproj/argo-workflows/commit/931cbbded2d770e451895cc906ebe8e489ff92a6) fix(executor): handle podlog in deadlineExceed termination. Fixes #7092 #7081 (#7093) - * [8eb862ee5](https://github.com/argoproj/argo-workflows/commit/8eb862ee57815817e437368d0680b824ded2cda4) feat: fix naming (#8045) - * [b7a525be4](https://github.com/argoproj/argo-workflows/commit/b7a525be4014e3bdd28124c8736c25a007049ae7) feat!: Remove deprecated config flags. Fixes #7971 (#8009) - * [46f901311](https://github.com/argoproj/argo-workflows/commit/46f901311a1fbbdc041a3a15e78ed70c2b889849) feat: Add company AKRA GmbH (#8036) - * [7bf377df7](https://github.com/argoproj/argo-workflows/commit/7bf377df7fe998491ada5023be49521d3a44aba6) Yubo added to users (#8040) - * [fe105a5f0](https://github.com/argoproj/argo-workflows/commit/fe105a5f095b80c7adc945f3f33ae5bec9bae016) chore(deps): bump actions/setup-python from 2.3.2 to 3 (#8034) - * [fe8ac30b0](https://github.com/argoproj/argo-workflows/commit/fe8ac30b0760f61b679a605569c197670461ad65) fix: Support for custom HTTP headers. Fixes #7985 (#8004) - -### Contributors - - * Alex Collins - * Anurag Pathak - * Niklas Hansson - * Saravanan Balasubramanian - * Tianchu Zhao - * Todor Todorov - * Wojciech Pietrzak - * dependabot[bot] - * descrepes - * kennytrytek - -## v3.3.0-rc8 (2022-02-28) - - * [9655a8348](https://github.com/argoproj/argo-workflows/commit/9655a834800c0936dbdc1045b49f587a92d454f6) fix: panic on synchronization if workflow has mutex and semaphore (#8025) - * [957330301](https://github.com/argoproj/argo-workflows/commit/957330301e0b29309ae9b08a376b012a639e1dd5) fix: Fix/client go/releaseoncancel. Fixes #7613 (#8020) - * [c5c3b3134](https://github.com/argoproj/argo-workflows/commit/c5c3b31344650be516a6c00da88511b06f38f1b8) fix!: Document `workflowtaskset` breaking change. Fixes #8013 (#8015) - * [56dc11cef](https://github.com/argoproj/argo-workflows/commit/56dc11cef56a0b690222116d52976de9a8418e55) feat: fix path for plugin example (#8014) - * [06d4bf76f](https://github.com/argoproj/argo-workflows/commit/06d4bf76fc2f8ececf2b25a0ba5a81f844445b0f) fix: Reduce agent permissions. Fixes #7986 (#7987) - -### Contributors - - * Alex Collins - * Niklas Hansson - * Saravanan Balasubramanian - * Shyukri Shyukriev - -## v3.3.0-rc7 (2022-02-25) - - * [20f7516f9](https://github.com/argoproj/argo-workflows/commit/20f7516f916fb2c656ed3bf9d1d7bee18d136d53) fix: Re-factor `assessNodeStatus`. Fixes #7996 (#7998) - * [c5a618516](https://github.com/argoproj/argo-workflows/commit/c5a618516820d70c7302d5b4750b68b8c270bc92) chore(deps): bump actions/setup-node from 2.5.1 to 3 (#8001) - * [f0fb0d56d](https://github.com/argoproj/argo-workflows/commit/f0fb0d56d3f896ef74e39c2e391de2c4a30a1a52) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.15 to 7.0.23 (#8003) - * [7e34ac513](https://github.com/argoproj/argo-workflows/commit/7e34ac5138551f0ebe0ca13ebfb4ad1fc8553ef1) feat: Support `workflow.parameters` in workflow meta-data. Fixes #3434 (#7711) - * [aea6c3912](https://github.com/argoproj/argo-workflows/commit/aea6c391256ece81b1d81a1d3cfe59088fa91f8d) chore(deps): bump github.com/gorilla/websocket from 1.4.2 to 1.5.0 (#7991) - * [89d7cc39d](https://github.com/argoproj/argo-workflows/commit/89d7cc39df386507b59c4858968ee06b33168faa) chore(deps): bump github.com/tidwall/gjson from 1.13.0 to 1.14.0 (#7992) - * [7c0e28901](https://github.com/argoproj/argo-workflows/commit/7c0e2890154ee187a8682c8fa6532952d73ef02c) fix: Generate SDKS (#7989) - * [980f2feb7](https://github.com/argoproj/argo-workflows/commit/980f2feb7b887b23513f1fc0717321bfdf134506) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.2.0 to 2.3.1 (#7979) - * [5e45cd95a](https://github.com/argoproj/argo-workflows/commit/5e45cd95a084ec444dfc4c30b27f83ba8503b8e7) chore(deps): bump github.com/antonmedv/expr from 1.8.9 to 1.9.0 (#7967) - * [857768949](https://github.com/argoproj/argo-workflows/commit/8577689491b4d7375dde01faeab4c12eef2ba076) feat: Reduce agent pod permissions. Fixes #7914 (#7915) - * [d57fd0ff4](https://github.com/argoproj/argo-workflows/commit/d57fd0ff409d9f5fa238e0b726c83e0c366012ab) fix: Report container, plugin and HTTP progress. Fixes #7918 (#7960) - * [412ff1c41](https://github.com/argoproj/argo-workflows/commit/412ff1c41196cb602aa7bb98a39e8ec90c08ada5) feat(controller): skip resolve artifact when when evaluates to fals one on withsequence (#7950) - -### Contributors - - * Alex Collins - * Tianchu Zhao - * dependabot[bot] - -## v3.3.0-rc6 (2022-02-21) - - -### Contributors - - -## v3.3.0-rc5 (2022-02-21) - - * [79fc4a9be](https://github.com/argoproj/argo-workflows/commit/79fc4a9bea8d76905d314ac41df7018b556a91d6) chore(deps): bump upper.io/db.v3 (#7939) - * [ad312674a](https://github.com/argoproj/argo-workflows/commit/ad312674a0bbe617d199f4497e79b3e0fb6d64a8) fix: Fix broken Windows build (#7933) - * [5b6bfb6d3](https://github.com/argoproj/argo-workflows/commit/5b6bfb6d334914d8a8722f4d78b4794a92520757) fix: Fix `rowserrcheck` lint errors (#7924) - * [848effce0](https://github.com/argoproj/argo-workflows/commit/848effce0c61978de9da4da93d25a9f78ef1a0a8) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk (#7919) - * [044389b55](https://github.com/argoproj/argo-workflows/commit/044389b55990cb4d13fda279fed48f9bfd3d1112) chore(deps): bump github.com/aliyun/aliyun-oss-go-sdk (#7901) - * [ce00cd8ed](https://github.com/argoproj/argo-workflows/commit/ce00cd8edae68ad8aa5ed6003b574be903a5c346) feat: Support insecureSkipVerify for HTTP templates. Fixes #7790 (#7885) - * [11890b4cc](https://github.com/argoproj/argo-workflows/commit/11890b4cc14405902ee336e9197dd153df27c36b) feat: Update new version modal for v3.3. Fixes #7639 (#7707) - * [3524615b8](https://github.com/argoproj/argo-workflows/commit/3524615b89bd6da041413b88025cddeed8a704ad) fix: Add license to python sdk. Fixes #7881 (#7883) - * [80e7a27bf](https://github.com/argoproj/argo-workflows/commit/80e7a27bf08431204994bf848afdf2d5af8a94c1) fix: Increase padding between elements in workflow template creator. Fixes #7309 (#7420) - * [7776a1113](https://github.com/argoproj/argo-workflows/commit/7776a11131195a580618962f8ec4c0d23fe59cee) Add nil-check in LintWorkflow (#7769) - * [c0c24d24e](https://github.com/argoproj/argo-workflows/commit/c0c24d24e8ac5a2fd69def064dd9f0ed2bcf0326) fix: trim spaces while parse realtime metrics value. Fixes #7819 (#7856) - * [dc82f3f42](https://github.com/argoproj/argo-workflows/commit/dc82f3f428e3b8f17a7ea9121919b6270d1967f7) chore(deps): bump github.com/prometheus/client_golang (#7880) - * [bb8d2858d](https://github.com/argoproj/argo-workflows/commit/bb8d2858da78bf3eb0022688e34020668bbc08a9) fix: workflow-node-info long attribute message cannot be wrapped in the ui (#7876) - * [808c561f1](https://github.com/argoproj/argo-workflows/commit/808c561f1c4a56668c32caa69be5b0441d372610) feat: add container-set retry strategy. Fixes #7290 (#7377) - * [31cc8bf98](https://github.com/argoproj/argo-workflows/commit/31cc8bf98864c15192845ee6f2349bd0099a71ae) fix(cli): fix typo in argo cron error messages (#7875) - * [87cb15591](https://github.com/argoproj/argo-workflows/commit/87cb1559107ec88dd418229b38113d70ba2a8580) fix: added priorityclass to workflow-controller. Fixes #7733 (#7859) - * [69c5bc79f](https://github.com/argoproj/argo-workflows/commit/69c5bc79f38e4aa7f4526111900904ac56e13d54) fix: Fix go-jose dep. Fixes #7814 (#7874) - * [28412ef7c](https://github.com/argoproj/argo-workflows/commit/28412ef7c37b1e1b2be0d60c46c5327f682a6a00) fix: Add env to argo-server deployment manifest. Fixes #7285 (#7852) - * [fce82d572](https://github.com/argoproj/argo-workflows/commit/fce82d5727b89cfe49e8e3568fff40725bd43734) feat: remove pod workers. Fixes #4090 (#7837) - * [938fde967](https://github.com/argoproj/argo-workflows/commit/938fde9673cf7aabe04587e63a28a3aa34ea049e) fix(ui): unauthorized login screen redirection to token creation docs (#7846) - * [1d7a17714](https://github.com/argoproj/argo-workflows/commit/1d7a17714fda0d8331ce11c765f0c95797c75afe) chore(deps): bump github.com/soheilhy/cmux from 0.1.4 to 0.1.5 (#7848) - * [1113f70fa](https://github.com/argoproj/argo-workflows/commit/1113f70fa0152fef5955a295bd5df50242fe9a67) fix: submitting Workflow from WorkflowTemplate will set correct serviceAccount and securityContext. Fixes #7726 (#7805) - -### Contributors - - * AdamKorcz - * Alex Collins - * Baz Chalk - * Dillen Padhiar - * Doğukan Tuna - * Isitha Subasinghe - * Jin Dong - * Ken Kaizu - * Lukasz Stolcman - * Markus Lippert - * Niklas Hansson - * Oleg - * Rohan Kumar - * Tianchu Zhao - * Vrukshali Torawane - * dependabot[bot] - -## v3.3.0-rc4 (2022-02-08) - - * [27977070c](https://github.com/argoproj/argo-workflows/commit/27977070c75e9369e16dd15025893047a95f85a5) chore(deps): bump github.com/go-openapi/spec from 0.20.2 to 0.20.4 (#7817) - * [1a1cc9a9b](https://github.com/argoproj/argo-workflows/commit/1a1cc9a9bc3dfca245c34ab9ecdeed7c52578ed5) feat: Surface container and template name in emissary error message. Fixes #7780 (#7807) - * [fb73d0194](https://github.com/argoproj/argo-workflows/commit/fb73d01940b6d1673c3fbc9238fbd26c88aba3b7) feat: make submit workflow parameter form as textarea to input multi line string easily (#7768) - * [7e96339a8](https://github.com/argoproj/argo-workflows/commit/7e96339a8c8990f68a444ef4f33d5469a8e64a31) chore(deps): bump actions/setup-python from 2.3.1 to 2.3.2 (#7775) - * [932466540](https://github.com/argoproj/argo-workflows/commit/932466540a109550b98714f41a5c6e1d3fc13158) fix: Use v1 pod name if no template name or ref. Fixes #7595 and #7749 (#7605) - * [e9b873ae3](https://github.com/argoproj/argo-workflows/commit/e9b873ae3067431ef7cbcfa6744c57a19adaa9f5) fix: Missed workflow should not trigger if Forbidden Concurreny with no StartingDeadlineSeconds (#7746) - * [e12827b8b](https://github.com/argoproj/argo-workflows/commit/e12827b8b0ecb11425399608b1feee2ad739575d) feat: add claims.Email into gatekeeper audit log entry (#7748) - * [74d1bbef7](https://github.com/argoproj/argo-workflows/commit/74d1bbef7ba33466366623c82343289ace41f01a) chore(deps): bump cloud.google.com/go/storage from 1.19.0 to 1.20.0 (#7747) - -### Contributors - - * Alex Collins - * J.P. Zivalich - * Ken Kaizu - * Saravanan Balasubramanian - * dependabot[bot] - -## v3.3.0-rc3 (2022-02-03) - - * [70715ecc8](https://github.com/argoproj/argo-workflows/commit/70715ecc8a8d29c5800cc7176923344939038cc6) fix: artifacts.(*ArtifactServer).GetInputArtifactByUID ensure valid request path (#7730) - * [1277f0579](https://github.com/argoproj/argo-workflows/commit/1277f05796cdf8c50e933ccdf8d665b6bf8d184c) chore(deps): bump gopkg.in/square/go-jose.v2 from 2.5.1 to 2.6.0 (#7740) - * [7e6f2c0d7](https://github.com/argoproj/argo-workflows/commit/7e6f2c0d7bf493ee302737fd2a4e650b9bc136fc) chore(deps): bump github.com/valyala/fasttemplate from 1.1.0 to 1.2.1 (#7727) - * [877d65697](https://github.com/argoproj/argo-workflows/commit/877d6569754be94f032e1c48d1f7226a83adfbec) chore(deps): bump cloud.google.com/go/storage from 1.10.0 to 1.19.0 (#7714) - * [05fc4a795](https://github.com/argoproj/argo-workflows/commit/05fc4a7957f16a37ef018bd715b904ab33ce716b) chore(deps): bump peaceiris/actions-gh-pages from 2.5.0 to 2.9.0 (#7713) - * [bf3b58b98](https://github.com/argoproj/argo-workflows/commit/bf3b58b98ac62870b779ac4aad734130ee5473b2) fix: ContainerSet termination during pending Pod #7635 (#7681) - * [f6c9a6aa7](https://github.com/argoproj/argo-workflows/commit/f6c9a6aa7734263f478b9cef2bcb570d882f135c) fix: Pod "START TIME"/ "END TIME" tooltip shows time in UTC and local timezone Fixes #7488 (#7694) - * [e2e046f6f](https://github.com/argoproj/argo-workflows/commit/e2e046f6fded6581f153598100d3ccf9bb661912) fix: Fix argo lint panic when missing param value in DAG task. Fixes #7701 (#7706) - * [72817f2b8](https://github.com/argoproj/argo-workflows/commit/72817f2b89c60f30d5dc73fc256ae0399e57737e) feat: Add variable substitution on ConfigMapKeySelector. Fixes #7061 (#7542) - * [0f4c48473](https://github.com/argoproj/argo-workflows/commit/0f4c48473c7281671e84d96392f89ec35f38fb42) chore(deps): bump gopkg.in/go-playground/webhooks.v5 (#7704) - -### Contributors - - * Denis Melnik - * Paco Guzmán - * Tino Schröter - * Yago Riveiro - * Yuan Tang - * dependabot[bot] - -## v3.3.0-rc2 (2022-01-29) - - * [753509394](https://github.com/argoproj/argo-workflows/commit/75350939442d26f35afc57ebe183280dc3d158ac) fix: Handle release candidate versions in Python SDK version. Fixes #7692 (#7693) - -### Contributors - - * Yuan Tang - -## v3.3.0-rc1 (2022-01-28) - - * [45730a9cd](https://github.com/argoproj/argo-workflows/commit/45730a9cdeb588d0e52b1ac87b6e0ca391a95a81) feat: lifecycle hook (#7582) - * [4664aeac4](https://github.com/argoproj/argo-workflows/commit/4664aeac4ffa208114b8483e6300c39b537b402d) chore(deps): bump google.golang.org/grpc from v1.38.0 to v1.41.1 (#7658) - * [ecf2ceced](https://github.com/argoproj/argo-workflows/commit/ecf2cecedcf8fd3f70a846372e85c471b6512aca) chore(deps): bump github.com/grpc-ecosystem/go-grpc-middleware (#7679) - * [67c278cd1](https://github.com/argoproj/argo-workflows/commit/67c278cd1312d695d9925f64f24957c1449219cc) fix: Support terminating with `templateRef`. Fixes #7214 (#7657) - * [1159afc3c](https://github.com/argoproj/argo-workflows/commit/1159afc3c082c62f6142fad35ba461250717a8bb) fix: Match cli display pod names with k8s. Fixes #7646 (#7653) - * [6a97a6161](https://github.com/argoproj/argo-workflows/commit/6a97a616177e96fb80e43bd1f98eac595f0f0a7d) fix: Retry with DAG. Fixes #7617 (#7652) - * [559153417](https://github.com/argoproj/argo-workflows/commit/559153417db5a1291bb1077dc61ee8e6eb787c41) chore(deps): bump github.com/prometheus/common from 0.26.0 to 0.32.1 (#7660) - * [a20150c45](https://github.com/argoproj/argo-workflows/commit/a20150c458c45456e40ef73d91f0fa1561b85a1e) fix: insecureSkipVerify needed. Fixes #7632 (#7651) - * [3089a750c](https://github.com/argoproj/argo-workflows/commit/3089a750cd632801d5c2a994d4544ecc918588f2) chore(deps): bump actions/setup-node from 1 to 2.5.1 (#7644) - * [0137e1980](https://github.com/argoproj/argo-workflows/commit/0137e1980f2952e40c1d11d5bf53e18fe0f3914c) fix: error when path length != 6 (#7648) - * [b7cd2f5a9](https://github.com/argoproj/argo-workflows/commit/b7cd2f5a93effaa6473001da87dc30eaf9814822) feat: add overridable default input artifacts #2026 (#7647) - * [17342bacc](https://github.com/argoproj/argo-workflows/commit/17342bacc991c1eb9cce5639c857936d3ab8c5c9) chore(deps): bump peaceiris/actions-gh-pages from 2.5.0 to 3.8.0 (#7642) - * [24f677a59](https://github.com/argoproj/argo-workflows/commit/24f677a5941eac8eebc0e025e909f58b26a93ce1) chore(deps): bump actions/setup-python from 1 to 2.3.1 (#7643) - * [6f60703db](https://github.com/argoproj/argo-workflows/commit/6f60703dbfb586607a491c8bebc8425029853c84) fix: Fix non-standard git username support. Fixes #7593 (#7634) - * [0ce9e70ef](https://github.com/argoproj/argo-workflows/commit/0ce9e70ef72274d69c4bfb5a6c83d1fdefa9038a) fix: SSO to handle multiple authorization cookies such as from wildca… (#7607) - * [3614db690](https://github.com/argoproj/argo-workflows/commit/3614db690aea3e0c4e5221fa1b2c851ca70e6b18) feat: adding support for getting tls certificates from kubernetes secret (e.g. (#7621) - * [596f94c90](https://github.com/argoproj/argo-workflows/commit/596f94c900ebbe41930472364e2b2298220e9ca7) feat: customize nav bar background color (#7387) - * [774bf47ee](https://github.com/argoproj/argo-workflows/commit/774bf47ee678ef31d27669f7d309dee1dd84340c) feat: Template executor plugin. (#7256) - * [d2e98d6b4](https://github.com/argoproj/argo-workflows/commit/d2e98d6b45e01ec7d7b614f22291e008faedcf01) fix: Support artifact ref from tmpl in UI. Fixes #7587 (#7591) - * [c6be0fe77](https://github.com/argoproj/argo-workflows/commit/c6be0fe774e736059dd53e5cf80f2a99c4a3c569) feat(ui): Show first-time UX. Fixes #7160 (#7170) - * [2e343eb7f](https://github.com/argoproj/argo-workflows/commit/2e343eb7f1328c8ec242116d38bb7e651703ea26) fix: Upgrade prismjs to v1.26 to fix security scan. Fixes #7599 (#7601) - * [f9fa0e303](https://github.com/argoproj/argo-workflows/commit/f9fa0e303da39accd3e1268361df4f70dc6e391e) fix: Support inputs for inline DAG template. Fixes #7432 (#7439) - * [bc27ada85](https://github.com/argoproj/argo-workflows/commit/bc27ada852c57ebf7a3f87e2eaf161cc72ad7198) fix: Fix inconsistent ordering of workflows with the list command. Fixes #7581 (#7594) - * [af257c178](https://github.com/argoproj/argo-workflows/commit/af257c178b78f0a7cae6af38e15b20bfcf3dba6a) feat: Support templateRef in LifecycleHook. Fixes #7558 (#7570) - * [f1fe3bee4](https://github.com/argoproj/argo-workflows/commit/f1fe3bee498ac7eb895af6f89a0eba5095410467) fix: hanging wait container on save artifact to GCS bucket artifactRepository (#7536) - * [a94b846e6](https://github.com/argoproj/argo-workflows/commit/a94b846e67382252831d44624c2f4b1708f7a30c) fix: fix nil point about Outputs.ExitCode. Fixes #7458 (#7459) - * [e395a5b03](https://github.com/argoproj/argo-workflows/commit/e395a5b0381560d59aba928ea31f5cd4e7c04665) Update workflow-restrictions.md (#7508) - * [b056de384](https://github.com/argoproj/argo-workflows/commit/b056de3847db2e654f761ce15309ac7629ea1dc9) Add new line to render bullets properly. (#7579) - * [4b83de9b5](https://github.com/argoproj/argo-workflows/commit/4b83de9b527e59bc29746a824efbe97daa47e504) fix: More informative error message when artefact repository is not configured. Fixes #7372 (#7498) - * [2ab7dfebe](https://github.com/argoproj/argo-workflows/commit/2ab7dfebe13c20a158d5def3f1932fdbc54041d4) fix: update old buildkit version in buildkit-template.yaml (#7512) - * [c172d1dce](https://github.com/argoproj/argo-workflows/commit/c172d1dcef3e787d49a6fe637922de733a054a84) fix: show invalid cron schedule error on cron status ui (#7441) - * [fbf4751f4](https://github.com/argoproj/argo-workflows/commit/fbf4751f45052750024901f6a2ba56b65587d701) fix: resolve resourcesDuration (#7299) - * [033ed978e](https://github.com/argoproj/argo-workflows/commit/033ed978e2d5ec05c862259a92d3ec35e0bfd1d9) fix(controller): fix pod stuck in running when using podSpecPatch and emissary (#7407) - * [ebdde3392](https://github.com/argoproj/argo-workflows/commit/ebdde3392b0c50b248dfbb8b175ef8acff265ed1) fix: Fix supplied global workflow parameters (#7573) - * [eb1c3e0b4](https://github.com/argoproj/argo-workflows/commit/eb1c3e0b40f74ca1a52ef0f7fd7a7cb79ae2987f) feat: Adds timezone to argo cron list output (#7557) (#7559) - * [dbb1bcfbd](https://github.com/argoproj/argo-workflows/commit/dbb1bcfbd4de3295163900509fc624fb7d363b10) fix: add priority field to submitopts (#7572) - * [bc1f304a9](https://github.com/argoproj/argo-workflows/commit/bc1f304a93149131452687162801e865c7decc14) fix: Fix type assertion bug (#7556) - * [970a503c5](https://github.com/argoproj/argo-workflows/commit/970a503c561a0cdb30a7b1ce2ed8d34b1728e61f) fix: nil-pointer in util.ApplySubmitOpts (#7529) - * [18821c57f](https://github.com/argoproj/argo-workflows/commit/18821c57fbea7c86abc3a347155e1ce0cde92ea0) fix: handle source file is empty for script template (#7467) - * [78e74ebe5](https://github.com/argoproj/argo-workflows/commit/78e74ebe5025a6164f1bd23bfd2cfced8ae2689e) chore(build): add windows .exe extension (#7535) - * [b476c4af5](https://github.com/argoproj/argo-workflows/commit/b476c4af505b6f24161a3818c358f6f6b012f87e) fix: Make dev version of the Python SDK PEP440 compatible (#7525) - * [26c1224b0](https://github.com/argoproj/argo-workflows/commit/26c1224b0d8b0786ef1a75a58e49914810d3e115) fix: transient errors for s3 artifacts: Fixes #7349 (#7352) - * [3371e7268](https://github.com/argoproj/argo-workflows/commit/3371e7268c1ed5207d840285133a0d2f0417bbb9) fix: http template doesn't update progress. Fixes #7239 (#7450) - * [4b006d5f8](https://github.com/argoproj/argo-workflows/commit/4b006d5f8eb338f91f1b77a813dc8a09d972c131) fix: Global param value incorrectly overridden when loading from configmaps (#7515) - * [0f206d670](https://github.com/argoproj/argo-workflows/commit/0f206d670eb38c6b02c9015b30b04ff0396289c8) fix: only aggregates output from successful nodes (#7517) - * [318927ed6](https://github.com/argoproj/argo-workflows/commit/318927ed6356d10c73fe775790b7765ea17480d4) fix: out of range in MustUnmarshal (#7485) - * [d3ecdf11c](https://github.com/argoproj/argo-workflows/commit/d3ecdf11c145be97c1c1e4ac4d20d5d543ae53ca) feat: add workflow.labels and workflow.annotations as JSON string. Fixes: #7289 (#7396) - * [4f9e299b7](https://github.com/argoproj/argo-workflows/commit/4f9e299b7f7d8d7084ac0def2a6902b26d2b9b5e) fix: shutdown workqueues to avoid goroutine leaks (#7493) - * [dd77dc993](https://github.com/argoproj/argo-workflows/commit/dd77dc9937bdd9ab97c837e7f3f88ef5ecc2cae3) fix: submitting cluster workflow template on namespaced install returns error (#7437) - * [e4b0f6576](https://github.com/argoproj/argo-workflows/commit/e4b0f65762225962d40e0d8cade8467435876470) feat: Add Python SDK versioning script (#7429) - * [d99796b2f](https://github.com/argoproj/argo-workflows/commit/d99796b2f7e8c9fb895205461cc2a461f0cd643d) fix: Disable SDK release from master branch (#7419) - * [dbda60fc5](https://github.com/argoproj/argo-workflows/commit/dbda60fc5c72c02729d98b4e5ff08f89a6bf428c) feat: Python SDK publish (#7363) - * [79d50fc27](https://github.com/argoproj/argo-workflows/commit/79d50fc278d1d5e1dc8fbc27285c28b360426ce4) fix: Correct default emissary bug. Fixes #7224 (#7412) - * [014bac90f](https://github.com/argoproj/argo-workflows/commit/014bac90ff0c62212ebae23d6dd9a1ed8c7d3a8c) fix: added check for initContainer name in workflow template (#7411) - * [81afc8a7b](https://github.com/argoproj/argo-workflows/commit/81afc8a7b482aa9b95e010e02f9ef48dea7d7161) feat: List UID with 'argo archive list' (#7384) - * [8d552fbf6](https://github.com/argoproj/argo-workflows/commit/8d552fbf6b3752025955b233a9462b34098cedf1) feat: added retention controller. Fixes #5369 (#6854) - * [932040848](https://github.com/argoproj/argo-workflows/commit/932040848404d42a007b19bfaea685d4b505c2ef) fix: Skip missed executions if CronWorkflow schedule is changed. Fixes #7182 (#7353) - * [79a95f223](https://github.com/argoproj/argo-workflows/commit/79a95f223396ecab408d831781ab2d38d1fa6de0) feat: Add SuccessCondition to HTTP template (#7303) - * [aba6599f5](https://github.com/argoproj/argo-workflows/commit/aba6599f5759e57882172c8bc74cc63a2a809148) feat: Adjust name of generated Python SDK (#7328) - * [78dd747c6](https://github.com/argoproj/argo-workflows/commit/78dd747c600541c7ae2e71b473c0652fdd105c66) fix: Propogate errors in task worker and don't return (#7357) - * [8bd7f3971](https://github.com/argoproj/argo-workflows/commit/8bd7f3971e87d86ecd0c1887d49511b325207ab8) fix: argument of PodName function (fixes #7315) (#7316) - * [6423b6995](https://github.com/argoproj/argo-workflows/commit/6423b6995f06188c11eddb3ad23ae6631c2bf025) feat: support workflow template parameter description (#7309) (#7346) - * [1a3b87bdf](https://github.com/argoproj/argo-workflows/commit/1a3b87bdf8edba02ba5e5aed20f3942be1d6f46c) fix: improve error message for ListArchivedWorkflows (#7345) - * [77d87da3b](https://github.com/argoproj/argo-workflows/commit/77d87da3be49ee344090f3ee99498853fdb30ba2) fix: Use and enforce structured logging. Fixes #7243 (#7324) - * [3e727fa38](https://github.com/argoproj/argo-workflows/commit/3e727fa3878adf4133bde56a5fd18e3c50249279) feat: submit workflow make button disable after clicking (#7340) - * [cb8c06369](https://github.com/argoproj/argo-workflows/commit/cb8c06369fec5e499770f5ea1109c862eb213e3b) fix: cannot access HTTP template's outputs (#7200) - * [e0d5abcff](https://github.com/argoproj/argo-workflows/commit/e0d5abcffc9e2d7423454995974a2e91aab6ca24) fix: Use DEFAULT_REQUEUE_TIME for Agent. Fixes #7269 (#7296) - * [242360a4f](https://github.com/argoproj/argo-workflows/commit/242360a4f26a378269aadcbaabca6a8fd6c618bf) fix(ui): Fix events error. Fixes #7320 (#7321) - * [cf78ff6d7](https://github.com/argoproj/argo-workflows/commit/cf78ff6d76b09c4002edbc28048c67335bd1d00f) fix: Validate the type of configmap before loading parameters. Fixes #7312 (#7314) - * [08254f547](https://github.com/argoproj/argo-workflows/commit/08254f547cad5f2e862bca2dd0f8fe52661f1314) fix: Handle the panic in operate function (#7262) - * [d4aa9d1a6](https://github.com/argoproj/argo-workflows/commit/d4aa9d1a6f308a59ec95bd0f0d6221fe899a6e06) feat(controller): Support GC for memoization caches (#6850) - * [77f520900](https://github.com/argoproj/argo-workflows/commit/77f520900bd79c7403aa81cd9e88dea0ba84c675) feat: Add `PodPriorityClassName` to `SubmitOpts`. Fixes #7059 (#7274) - * [88cbea332](https://github.com/argoproj/argo-workflows/commit/88cbea3325d7414a1ea60d2bcde3e71e9f5dfd7b) fix: pod name shown in log when pod deletion (#7301) - * [6c47c91e2](https://github.com/argoproj/argo-workflows/commit/6c47c91e29396df111d5b14867ab8de4befa1153) fix: Use default value for empty env vars (#7297) - * [c2b3e8e93](https://github.com/argoproj/argo-workflows/commit/c2b3e8e93a307842db623c99a7643d3974cee6af) feat: Allow remove of PVC protection finalizer. Fixes #6629 (#7260) - * [160bdc61e](https://github.com/argoproj/argo-workflows/commit/160bdc61e9eaa6e488c9871093504587cb585ab5) feat: Allow parallel HTTP requests (#7113) - * [e0455772a](https://github.com/argoproj/argo-workflows/commit/e0455772a2164093c16f95480a2d21d4ae34a069) fix: Fix `argo auth token`. Fixes #7175 (#7186) - * [0ea855479](https://github.com/argoproj/argo-workflows/commit/0ea85547984583d4919b8139ffd0dc3d2bdaf05e) fix: improve feedback when submitting a workflow from the CLI w/o a serviceaccount specified (#7246) - * [3d47a5d29](https://github.com/argoproj/argo-workflows/commit/3d47a5d29dee66775e6fa871dee1b6ca1ae6acda) feat(emissary executor): Add step to allow users to pause template before and after execution. Fixes #6841 (#6868) - * [1d715a05c](https://github.com/argoproj/argo-workflows/commit/1d715a05c09f1696f693fe8cd3d2e16a05c6368c) fix: refactor/fix pod GC. Fixes #7159 (#7176) - * [389f7f486](https://github.com/argoproj/argo-workflows/commit/389f7f4861653609dd6337b370350bedbe00e5c8) feat(ui): add pagination to workflow-templates (#7163) - * [09987a6dd](https://github.com/argoproj/argo-workflows/commit/09987a6dd03c1119fa286ed55cc97a2f4e588e09) feat: add CreatorUsername label when user is signed in via SSO. Fixes… (#7109) - * [f34715475](https://github.com/argoproj/argo-workflows/commit/f34715475b2c71aeba15e7311f3ef723f394fbbf) fix: add gh ecdsa and ed25519 to known hosts (#7226) - * [eb9a42897](https://github.com/argoproj/argo-workflows/commit/eb9a4289729c0d91bfa45cb5895e5bef61ce483e) fix: Fix ANSI color sequences escaping (#7211) - * [e8a2f3778](https://github.com/argoproj/argo-workflows/commit/e8a2f37784f57c289024f0c5061fde8ec248314e) feat(ui): Support log viewing for user supplied init containers (#7212) - * [1453edca7](https://github.com/argoproj/argo-workflows/commit/1453edca7c510df5b3cfacb8cf1f99a2b9635b1a) fix: Do not patch empty progress. fixes #7184 (#7204) - * [34e5b5477](https://github.com/argoproj/argo-workflows/commit/34e5b54779b25416d7dbd41d78e0effa523c1a21) fix: ci sleep command syntax for macOS 12 (#7203) - * [57d894cb9](https://github.com/argoproj/argo-workflows/commit/57d894cb9a59ae294978af2ae106cae269446107) docs(cli): Move --memoized flag from argo resubmit out of experimental (#7197) - * [17fb9d813](https://github.com/argoproj/argo-workflows/commit/17fb9d813d4d0fb15b0e8652caa52e1078f9bfeb) fix: allow wf templates without parameter values (Fixes #6044) (#7124) - * [225a5a33a](https://github.com/argoproj/argo-workflows/commit/225a5a33afb0010346d10b65f459626eed8cd124) fix(test): Make TestMonitorProgress Faster (#7185) - * [19cff114a](https://github.com/argoproj/argo-workflows/commit/19cff114a20008a8d5460fd5c0508f43e38bcb11) chore(controller): s/retryStrategy.when/retryStrategy.expression/ (#7180) - * [52321e2ce](https://github.com/argoproj/argo-workflows/commit/52321e2ce4cb7077f38fca489059c06ec36732c4) feat(controller): Add default container annotation to workflow pod. FIxes: #5643 (#7127) - * [0482964d9](https://github.com/argoproj/argo-workflows/commit/0482964d9bc09585fd908ed5f912fd8c72f399ff) fix(ui): Correctly show zero-state when CRDs not installed. Fixes #7001 (#7169) - * [a6ce659f8](https://github.com/argoproj/argo-workflows/commit/a6ce659f80b3753fb05bbc3057e3b9795e17d211) feat!: Remove the hidden flag `verify` from `argo submit` (#7158) - * [f9e554d26](https://github.com/argoproj/argo-workflows/commit/f9e554d268fd9dbaf0e07f8a10a8ac03097250ce) fix: Relative submodules in git artifacts. Fixes #7141 (#7162) - * [22af73650](https://github.com/argoproj/argo-workflows/commit/22af7365049a34603cd109e2bcfa51eeee5e1393) fix: Reorder CI checks so required checks run first (#7142) - * [ded64317f](https://github.com/argoproj/argo-workflows/commit/ded64317f21fa137cfb48c2d009571d0ada8ac50) docs(ui): document wftemplate enum dropdown. Fixes #6824 (#7114) - * [bd3be1152](https://github.com/argoproj/argo-workflows/commit/bd3be115299708dc4f97f3559e6f57f38c0c0d48) fix: Return error when YAML submission is invalid (#7135) - * [7886a2b09](https://github.com/argoproj/argo-workflows/commit/7886a2b090d4a31e1cacbc6cff4a8cb18914763c) feat: self reporting workflow progress (#6714) - * [877752428](https://github.com/argoproj/argo-workflows/commit/8777524281bb70e177c3e7f9d530d3cce6505864) feat: Add FAQ link to unknown pod watch error. Fixes #6886 (#6953) - * [209ff9d9b](https://github.com/argoproj/argo-workflows/commit/209ff9d9bd094e1c230be509d2444ae36b4ff04e) fix: Respect template.HTTP.timeoutSeconds (#7136) - * [02165aaeb](https://github.com/argoproj/argo-workflows/commit/02165aaeb83754ee15c635b3707b119a88ec43bd) fix(controller): default volume/mount to emissary (#7125) - * [475d8d54f](https://github.com/argoproj/argo-workflows/commit/475d8d54f0756e147775c28874de0859804e875c) feat: Adds SSO control via individual namespaces. Fixes #6916 (#6990) - * [af32f5799](https://github.com/argoproj/argo-workflows/commit/af32f57995dac8dbfd5ffe1a6477beb3004e254b) Revert "chore: only run API if needed" - * [3d597269e](https://github.com/argoproj/argo-workflows/commit/3d597269e48215080e3318019f1d95ee01d7dacd) fix: typo in node-field-selector.md (#7116) - * [e716aad73](https://github.com/argoproj/argo-workflows/commit/e716aad73072fbea8ed25306634002301909fa93) refactor: Fixing typo WriteTeriminateMessage #6999 (#7043) - * [ca87f2906](https://github.com/argoproj/argo-workflows/commit/ca87f2906995b8fecb796d94299f54f6dfbd6a41) fix: Daemon step in running state, but dependents don't start (#7107) - * [5eab921eb](https://github.com/argoproj/argo-workflows/commit/5eab921eb0f537f1102bbdd6c38b4e52740a88a9) feat: Add workflow logs selector support. Fixes #6910 (#7067) - * [1e8715954](https://github.com/argoproj/argo-workflows/commit/1e871595414d05e2b250bfa3577cf23b9ab7fa38) fix: Add pod name format annotation. Fixes #6962 and #6989 (#6982) - * [93c11a24f](https://github.com/argoproj/argo-workflows/commit/93c11a24ff06049c2197149acd787f702e5c1f9b) feat: Add TLS to Metrics and Telemetry servers (#7041) - * [c5de76b6a](https://github.com/argoproj/argo-workflows/commit/c5de76b6a2d7b13c6ac7bc798e5c7615bf015de1) fix: Format issue on WorkflowEventBinding parameters. Fixes #7042 (#7087) - * [64fce4a82](https://github.com/argoproj/argo-workflows/commit/64fce4a827692cb67284d800ad92f1af37f654fc) fix: Ensure HTTP reconciliation occurs for onExit nodes (#7084) - * [d6a62c3e2](https://github.com/argoproj/argo-workflows/commit/d6a62c3e26d49ab752851be288bcd503386e8ff6) fix: Ensure HTTP templates have children assigned (#7082) - * [2bbba15cf](https://github.com/argoproj/argo-workflows/commit/2bbba15cf53395e0f4f729fd86f74355827b6d76) feat: Bring Python client to core (#7025) - * [46767b86b](https://github.com/argoproj/argo-workflows/commit/46767b86bc29cd8cb1df08fdcc0b5bb351c243f3) fix(ui): Correct HTTP connection in pipeline view (#7077) - * [201ba5525](https://github.com/argoproj/argo-workflows/commit/201ba552557b9edc5908c5224471fec4823b3302) fix: add outputs.parameters scope to script/containerSet templates. Fixes #6439 (#7045) - * [60f2ae95e](https://github.com/argoproj/argo-workflows/commit/60f2ae95e954e4af35cd93b12f554fbaf6ca1e41) feat: Add user's email in the server gatekeeper logs (#7062) - * [31bf57b64](https://github.com/argoproj/argo-workflows/commit/31bf57b643be995860ec77b942c2b587faa0b4ff) fix: Unit test TestNewOperation order of pipeline execution maybe different to order of submit (#7069) - * [18c953df6](https://github.com/argoproj/argo-workflows/commit/18c953df670ab3be6b064a028acdb96c19d0fce2) docs(cli): fix cron delete flag description (#7058) - * [4734cbc44](https://github.com/argoproj/argo-workflows/commit/4734cbc44dedeb2c7e5984aab5dc9b0c846ff491) fix: Precedence of ContainerRuntimeExecutor and ContainerRuntimeExecutors (#7056) - * [56ee94147](https://github.com/argoproj/argo-workflows/commit/56ee94147c1d65b03097b453e090e4930d8da591) feat: Bring Java client into core. (#7026) - * [65ff89ac8](https://github.com/argoproj/argo-workflows/commit/65ff89ac81a8350fb5c34043146fcb1ec4ffbf23) fix: Memozie for Step and DAG level (#7028) - * [8d7ca73b0](https://github.com/argoproj/argo-workflows/commit/8d7ca73b04438a17105312a07263fb6e5417f76e) feat: Upgrade to Golang 1.17 (#7029) - * [0baa4a203](https://github.com/argoproj/argo-workflows/commit/0baa4a2039b981e1ca118a04ceb6ac6439a82d0d) fix: Support RFC3339 in creationTimeStamp. Fixes #6906 (#7044) - * [25e1939e2](https://github.com/argoproj/argo-workflows/commit/25e1939e25551cd15d89bd47e4232c8073b40a9c) feat(ui): add label/state filter to cronworkflow. Fixes #7034 (#7035) - * [0758eab11](https://github.com/argoproj/argo-workflows/commit/0758eab11decb8a1e741abef3e0ec08c48a69ab8) feat(server): Sync dispatch of webhook events by default. Fixes #6981 and #6732 (#6995) - * [ba472e131](https://github.com/argoproj/argo-workflows/commit/ba472e1319d1a393107947aa6d5980906d1cb711) fix: Minor corrections to Swagger/JSON schema (#7027) - * [182b696df](https://github.com/argoproj/argo-workflows/commit/182b696df6652981e490af47deb321cb1bd741ff) feat: add unknown pod watch error explanation to FAQ.md (#6988) - * [3f0a531aa](https://github.com/argoproj/argo-workflows/commit/3f0a531aa14142a5f4f749093b23f690c98eb41e) fix(controller): use correct pod.name in retry/podspecpatch scenario. Fixes #7007 (#7008) - * [6a674e7cb](https://github.com/argoproj/argo-workflows/commit/6a674e7cb2e70259efe377db4235b3bc2dbdb9b0) feat(ui): wider stroke width for selected node (#7000) - * [7f5262338](https://github.com/argoproj/argo-workflows/commit/7f526233824c5065c7a9ee63dac59f168f04f95d) fix(ui): labels in report/archive should be sorted (#7009) - * [50813daaf](https://github.com/argoproj/argo-workflows/commit/50813daaf5b718d143af84f0f5847273114734da) fix(controller): fix bugs in processing retry node output parameters. Fixes #6948 (#6956) - * [86ddda592](https://github.com/argoproj/argo-workflows/commit/86ddda592c4f432f629775908bc9b737ab920cde) fix: Restore default pod name version to v1 (#6998) - * [0446f521d](https://github.com/argoproj/argo-workflows/commit/0446f521d045b542734ee11fafea99daa2ee3105) fix(artifact)!: default https to any URL missing a scheme. Fixes #6973 (#6985) - * [cfdebf64e](https://github.com/argoproj/argo-workflows/commit/cfdebf64eed8b87bf0f84f4284323e72f6d14cbb) fix(typo): correct typo in event-dispatch error log (#6688) - * [2a15853ec](https://github.com/argoproj/argo-workflows/commit/2a15853ec32701dd2dbccea2cc735d8334da1680) fix: OAuth2 callback with self-signed Root CA. Fixes #6793 (#6978) - * [6384e5f21](https://github.com/argoproj/argo-workflows/commit/6384e5f2104c3df69070c33da636599d413f7d6c) feat: fix workflow configmap argument cannot be referenced as local variable. Fixes #6869 (#6898) - * [72356abad](https://github.com/argoproj/argo-workflows/commit/72356abad157b26905be9251c654413b5eb9e6c7) fix: Allow self-signed Root CA for SSO. Fixes #6793 (#6961) - * [e1fe5b58a](https://github.com/argoproj/argo-workflows/commit/e1fe5b58a22e3bbac01e1328998591b37c29b1ad) feat(ui): add label filter to template workflow (#6955) - * [c705294c9](https://github.com/argoproj/argo-workflows/commit/c705294c9813b496b2de5c2ecd6f578d86a329b6) fix: response on canceled workflow action (#6859) (#6967) - * [cf9a6cdd0](https://github.com/argoproj/argo-workflows/commit/cf9a6cdd098901873ac584db649b694041530eb2) fix: Unreachable code in util/tls/tls.go. Fixes #6950 (#6960) - * [6e1f2505a](https://github.com/argoproj/argo-workflows/commit/6e1f2505a18e427d3a39fadafad2fd83f6eff521) fix: multi-steps workflow (#6957) - * [1239ba8ef](https://github.com/argoproj/argo-workflows/commit/1239ba8ef06d31ead8234f090881de892819fbfb) chore(ui): Move pod name functions and add tests. Fixes #6946 (#6954) - * [452433989](https://github.com/argoproj/argo-workflows/commit/4524339892ae3e98bf6a5c9f11c5e2f41622f06c) fix(docs): fix data transformation example (#6901) - * [73d60108b](https://github.com/argoproj/argo-workflows/commit/73d60108b74341baf162580c11323624ba3936b5) fix(executor): add test for non-root user creating a script (#6905) - * [79d03a920](https://github.com/argoproj/argo-workflows/commit/79d03a9203d85d270017b5f0104fbf88879c6cdc) fix: Skip empty withParam tasks. Fixes #6834 (#6912) - * [b0d1f6583](https://github.com/argoproj/argo-workflows/commit/b0d1f658388ebd4ab2c1f26a87d66282304fa391) feat(executor): default executor to emissary. Fixes #6785 (#6882) - * [67fe87ba9](https://github.com/argoproj/argo-workflows/commit/67fe87ba9f3b8dbcb0f330a7ef593403d8909061) fix(ui): Change pod names to new format. Fixes #6865 (#6925) - * [1bcfa1aa5](https://github.com/argoproj/argo-workflows/commit/1bcfa1aa5dcb90559772be2a32512ba17d72c4ed) fix: BASE_HREF ignore (#6926) - * [41515d65c](https://github.com/argoproj/argo-workflows/commit/41515d65c2cc3ac1f492942e21fd33c4e31acdb1) fix(controller): Fix getPodByNode, TestGetPodByNode. Fixes #6458 (#6897) - * [5a7708c2c](https://github.com/argoproj/argo-workflows/commit/5a7708c2c449544905bbed474f9edc21e9fcf3e7) fix: do not delete expr tag tmpl values. Fixes #6909 (#6921) - * [2fd4b8aad](https://github.com/argoproj/argo-workflows/commit/2fd4b8aad161f0510fa5318de8f56724ec915e2a) feat(ui): label autocomplete for report tab (#6881) - * [c5b1533d3](https://github.com/argoproj/argo-workflows/commit/c5b1533d34c37d94defe98742a357c8e6b992db8) feat(ui): resume on selected node. Fixes #5763 (#6885) - * [ef6aad617](https://github.com/argoproj/argo-workflows/commit/ef6aad6171c4ed165078e9569364d7d7c54b434f) fix: Parameter with Value and Default (#6887) - * [4d38404df](https://github.com/argoproj/argo-workflows/commit/4d38404dfe2d6b941fece60c56db21a3b6f70c4b) fix: Resource requests on init/wait containers. Fixes #6809 (#6879) - * [cca4792c5](https://github.com/argoproj/argo-workflows/commit/cca4792c5adfd44340238122f7fe4e6010a96676) fix(ui): fixed width button (#6883) - * [b54809771](https://github.com/argoproj/argo-workflows/commit/b54809771b871b9425c476999100b0c72a4900aa) feat(server): archivedWf add namePrefix search. Fixes #6743 (#6801) - * [689ad6818](https://github.com/argoproj/argo-workflows/commit/689ad68182d9f2dc1479dc5f1398ff646cef4357) feat: add autocomplete for labels for archived workflow (#6776) - * [c962bb189](https://github.com/argoproj/argo-workflows/commit/c962bb189b491bcd8d2c4bedb75f778ca1301305) fix: upgrade sprig to v3.2.2 (#6876) - -### Contributors - - * AdamKorcz - * Alex Collins - * Andy - * Arthur Sudre - * BOOK - * Basanth Jenu H B - * Benny Cornelissen - * Bob Haddleton - * Denis Melnik - * Dillen Padhiar - * Dimas Yudha P - * Dominik Deren - * FengyunPan2 - * Flaviu Vadan - * Gammal-Skalbagge - * Guillaume Fillon - * Hong Wang - * Isitha Subasinghe - * Iven - * J.P. Zivalich - * Jonathan - * Joshua Carp - * Joyce Piscos - * Julien Duchesne - * Ken Kaizu - * Kyle Hanks - * Markus Lippert - * Mathew Wicks - * Micah Beeman - * Michael Weibel - * Miroslav Tomasik - * NextNiclas - * Nico Mandery - * Nicoló Lino - * Niklas Hansson - * Nityananda Gohain - * Peixuan Ding - * Peter Evers - * Rob Herley - * Roel van den Berg - * SalvadorC - * Saravanan Balasubramanian - * Simon Behar - * Takumi Sue - * Tianchu Zhao - * Ting Yuan - * Tom Meadows - * Valér Orlovský - * William Van Hevelingen - * Yuan (Bob) Gong - * Yuan Tang - * Zadkiel - * Ziv Levi - * cod-r - * dependabot[bot] - * jhoenger - * jwjs36987 - * kennytrytek - * khyer - * kostas-theo - * momom-i - * smile-luobin - * toohsk - * ybyang - * zorulo - * 大雄 - -## v3.2.11 (2022-05-03) - - * [8faf269a7](https://github.com/argoproj/argo-workflows/commit/8faf269a795c0c9cc251152f9e4db4cd49234e52) fix: Remove binaries from Windows image. Fixes #8417 (#8420) - -### Contributors - - * Markus Lippert - -## v3.2.10 (2022-05-03) - - * [877216e21](https://github.com/argoproj/argo-workflows/commit/877216e2159f07bfb27aa1991aa249bc2e9a250c) fix: Added artifact Content-Security-Policy (#8585) - -### Contributors - - * Alex Collins - -## v3.2.9 (2022-03-02) - - * [ce91d7b1d](https://github.com/argoproj/argo-workflows/commit/ce91d7b1d0115d5c73f6472dca03ddf5cc2c98f4) fix(controller): fix pod stuck in running when using podSpecPatch and emissary (#7407) - * [f9268c9a7](https://github.com/argoproj/argo-workflows/commit/f9268c9a7fca807d7759348ea623e85c67b552b0) fix: e2e - * [f581d1920](https://github.com/argoproj/argo-workflows/commit/f581d1920fe9e29dc0318fe628eb5a6982d66d93) fix: panic on synchronization if workflow has mutex and semaphore (#8025) - * [192c6b6a4](https://github.com/argoproj/argo-workflows/commit/192c6b6a4a785fa310b782a4e62e59427ece3bd1) fix: Fix broken Windows build (#7933) - -### Contributors - - * Markus Lippert - * Saravanan Balasubramanian - * Yuan (Bob) Gong - -## v3.2.8 (2022-02-04) - - * [8de5416ac](https://github.com/argoproj/argo-workflows/commit/8de5416ac6b8f5640a8603e374d99a18a04b5c8d) fix: Missed workflow should not trigger if Forbidden Concurreny with no StartingDeadlineSeconds (#7746) - -### Contributors - - * Saravanan Balasubramanian - -## v3.2.7 (2022-01-27) - - * [342e44a28](https://github.com/argoproj/argo-workflows/commit/342e44a28e09a5b062745aa8cbea72339b1217b9) fix: Match cli display pod names with k8s. Fixes #7646 (#7653) - * [3429b1617](https://github.com/argoproj/argo-workflows/commit/3429b161783ae6d68ebd580c8c02590c6795abac) fix: Retry with DAG. Fixes #7617 (#7652) - * [7a3b766d4](https://github.com/argoproj/argo-workflows/commit/7a3b766d4a8df693c7fcff867423d56f5658801e) fix: Support artifact ref from tmpl in UI. Fixes #7587 (#7591) - * [e7a628cca](https://github.com/argoproj/argo-workflows/commit/e7a628ccadf50f8a907c4f22a7c8de8cede838a6) fix: Support inputs for inline DAG template. Fixes #7432 (#7439) - * [3f889c484](https://github.com/argoproj/argo-workflows/commit/3f889c484fd50c4e1385c1b81c49d3d7904dc37c) fix: Fix inconsistent ordering of workflows with the list command. Fixes #7581 (#7594) - * [77499bd38](https://github.com/argoproj/argo-workflows/commit/77499bd38308545a21d1e8f9a671b2d19001684d) fix: fix nil point about Outputs.ExitCode. Fixes #7458 (#7459) - * [74ed83a28](https://github.com/argoproj/argo-workflows/commit/74ed83a287b72e45cd9c560d3278cec0c621ee27) fix: Global param value incorrectly overridden when loading from configmaps (#7515) - * [db58583d2](https://github.com/argoproj/argo-workflows/commit/db58583d297d23bc40364150576ef17a86b2c914) fix: only aggregates output from successful nodes (#7517) - * [38fdf4c44](https://github.com/argoproj/argo-workflows/commit/38fdf4c44d78f9c388ee5e0f71e7edf97f81f364) fix: out of range in MustUnmarshal (#7485) - * [e69f2d790](https://github.com/argoproj/argo-workflows/commit/e69f2d7902d3e28e863d72cb81b0e65e55f8fb6e) fix: Support terminating with `templateRef`. Fixes #7214 (#7657) - -### Contributors - - * AdamKorcz - * Alex Collins - * Dillen Padhiar - * FengyunPan2 - * J.P. Zivalich - * Peixuan Ding - * Yuan Tang - -## v3.2.6 (2021-12-17) - - * [2a9fb7067](https://github.com/argoproj/argo-workflows/commit/2a9fb706714744eff7f70dbf56703bcc67ea67e0) Revert "fix(controller): default volume/mount to emissary (#7125)" - -### Contributors - - * Alex Collins - -## v3.2.5 (2021-12-15) - - * [fc4c3d51e](https://github.com/argoproj/argo-workflows/commit/fc4c3d51e93858c2119124bbb3cb2ba1c35debcb) fix: lint - * [09ac50b7d](https://github.com/argoproj/argo-workflows/commit/09ac50b7dc09a8f8497897254252760739363d0d) fix: lint - * [c48269fe6](https://github.com/argoproj/argo-workflows/commit/c48269fe678ae74092afda498da2f897ba22d177) fix: codegen - * [e653e4f2f](https://github.com/argoproj/argo-workflows/commit/e653e4f2f3652a95e8584488e657838f04d01f7e) fix: e2e test and codegen - * [970bcc041](https://github.com/argoproj/argo-workflows/commit/970bcc04179a98cfcce31977aeb34fbf1a68ebaf) fix: e2e testcase - * [fbb2edb03](https://github.com/argoproj/argo-workflows/commit/fbb2edb03494160c28a83d2a04546323e119caff) fix: unit test - * [7933f9579](https://github.com/argoproj/argo-workflows/commit/7933f9579680de570f481004d734bd36ea0ca69e) fix: makefile and common variable - * [0eec0f0d5](https://github.com/argoproj/argo-workflows/commit/0eec0f0d5495a0d5174e74e6cac87cc068eb5295) fix: added check for initContainer name in workflow template (#7411) - * [7c2427005](https://github.com/argoproj/argo-workflows/commit/7c2427005cb69f351b081a6c546bda7978ae665f) fix: Skip missed executions if CronWorkflow schedule is changed. Fixes #7182 (#7353) - * [48e7906d5](https://github.com/argoproj/argo-workflows/commit/48e7906d503831385261dcccd4e1c8695c895895) fix: argument of PodName function (fixes #7315) (#7316) - * [3911d0915](https://github.com/argoproj/argo-workflows/commit/3911d091530fc743585c72c7366db3a9c7932bfd) fix: improve error message for ListArchivedWorkflows (#7345) - * [5a472dd39](https://github.com/argoproj/argo-workflows/commit/5a472dd39faaf57a8b4f1e2d748d5167b66d07a0) fix: cannot access HTTP template's outputs (#7200) - * [a85458e86](https://github.com/argoproj/argo-workflows/commit/a85458e86fa80f931f1a0a42230f843d26d84fad) fix(ui): Fix events error. Fixes #7320 (#7321) - * [6bcedb18b](https://github.com/argoproj/argo-workflows/commit/6bcedb18be40005f8f81eedf923e890a33e9d11e) fix: Validate the type of configmap before loading parameters. Fixes #7312 (#7314) - * [a142ac234](https://github.com/argoproj/argo-workflows/commit/a142ac234ee7a4e789ac626636837c00b296be23) fix: Handle the panic in operate function (#7262) - * [34f3d13e7](https://github.com/argoproj/argo-workflows/commit/34f3d13e7e603198548937beb8df7e84f022b918) fix: pod name shown in log when pod deletion (#7301) - * [06e5950b8](https://github.com/argoproj/argo-workflows/commit/06e5950b8f3fbafdfeb7d45a603caf03096f958e) fix: Use default value for empty env vars (#7297) - * [2f96c464a](https://github.com/argoproj/argo-workflows/commit/2f96c464a3098b34dfd94c44cc629c881ea3d33f) fix: Fix `argo auth token`. Fixes #7175 (#7186) - * [f8f93a6b1](https://github.com/argoproj/argo-workflows/commit/f8f93a6b16e4a1ec17060ef916ea6bd2e8cf80a4) fix: refactor/fix pod GC. Fixes #7159 (#7176) - * [728a1ff67](https://github.com/argoproj/argo-workflows/commit/728a1ff67364986cdfe2146dc3179d9705ee26ad) fix: Relative submodules in git artifacts. Fixes #7141 (#7162) - * [274c5f990](https://github.com/argoproj/argo-workflows/commit/274c5f990dd16b8f2523706549b07c40d60a3fab) fix: Reorder CI checks so required checks run first (#7142) - * [49b3f0cb2](https://github.com/argoproj/argo-workflows/commit/49b3f0cb2733dec438d8340f439467b7661b8bc2) fix(controller): default volume/mount to emissary (#7125) - * [f5f6899f5](https://github.com/argoproj/argo-workflows/commit/f5f6899f531126a18f5f42201156c995630fdf1b) fix: Add pod name format annotation. Fixes #6962 and #6989 (#6982) - * [30e34ada8](https://github.com/argoproj/argo-workflows/commit/30e34ada8cab77c56e3917144a29b96fb070a06d) fix: prevent bad commit messages, fix broken builds (#7086) - * [926108028](https://github.com/argoproj/argo-workflows/commit/926108028cea2e0ef305c24c86b9e685a0ac9c5e) fix: Format issue on WorkflowEventBinding parameters. Fixes #7042 (#7087) - * [a0ac28893](https://github.com/argoproj/argo-workflows/commit/a0ac28893b63a73f6d875b4087fc04f420595815) fix: add outputs.parameters scope to script/containerSet templates. Fixes #6439 (#7045) - * [cae69e62b](https://github.com/argoproj/argo-workflows/commit/cae69e62b37a6f8256a9cab53d793fc5102ebfe4) fix: Unit test TestNewOperation order of pipeline execution maybe different to order of submit (#7069) - * [fa0772fd9](https://github.com/argoproj/argo-workflows/commit/fa0772fd936364d915514da4ea1217c0e3639af1) docs(cli): fix cron delete flag description (#7058) - * [94fe92f12](https://github.com/argoproj/argo-workflows/commit/94fe92f12a21af225c0d44fa7b20a6b335edaadf) fix: OAuth2 callback with self-signed Root CA. Fixes #6793 (#6978) - * [fbb51ac20](https://github.com/argoproj/argo-workflows/commit/fbb51ac2002b896ea3320802b814adb4c3d0d5e4) fix: multi-steps workflow (#6957) - * [6b7e074f1](https://github.com/argoproj/argo-workflows/commit/6b7e074f149085f9fc2da48656777301e87e8aae) fix(docs): fix data transformation example (#6901) - * [24ffd36bf](https://github.com/argoproj/argo-workflows/commit/24ffd36bfc417fe382a1e015b0ec4d89b2a12280) fix: Allow self-signed Root CA for SSO. Fixes #6793 (#6961) - -### Contributors - - * Alex Collins - * Arthur Sudre - * BOOK - * Dillen Padhiar - * Dominik Deren - * J.P. Zivalich - * Jonathan - * NextNiclas - * Peter Evers - * Saravanan Balasubramanian - * Simon Behar - * Takumi Sue - * Tianchu Zhao - * Valér Orlovský - * Yuan Tang - * Ziv Levi - -## v3.2.4 (2021-11-17) - - * [bf72557b5](https://github.com/argoproj/argo-workflows/commit/bf72557b53792cf23ce3ee4cbec779bb7e420ba8) fix: add gh ecdsa and ed25519 to known hosts (#7226) - * [ee6939048](https://github.com/argoproj/argo-workflows/commit/ee6939048ab2b15103ece77b0d74afd6f0d3e691) fix: Fix ANSI color sequences escaping (#7211) - * [02b4c31c4](https://github.com/argoproj/argo-workflows/commit/02b4c31c41e3b509188057d31735b1f3684488f5) fix: ci sleep command syntax for macOS 12 (#7203) - * [e65d9d4a9](https://github.com/argoproj/argo-workflows/commit/e65d9d4a983670c70707d283573d06a68971f6b4) fix: allow wf templates without parameter values (Fixes #6044) (#7124) - * [7ea35fa1f](https://github.com/argoproj/argo-workflows/commit/7ea35fa1fd0fa739f16b5978a52a521fafb90d4e) fix(ui): Correctly show zero-state when CRDs not installed. Fixes #7001 (#7169) - * [bdcca4e17](https://github.com/argoproj/argo-workflows/commit/bdcca4e175ee71e402e567d857209f7ddce79d9a) fix: Return error when YAML submission is invalid (#7135) - * [a4390dd9a](https://github.com/argoproj/argo-workflows/commit/a4390dd9a9bbd1280774fe10cf455d655a4ea873) fix: Respect template.HTTP.timeoutSeconds (#7136) - * [c1553dfd7](https://github.com/argoproj/argo-workflows/commit/c1553dfd73e3734b6dbdb4fdb5828df1e6fff792) fix: typo in node-field-selector.md (#7116) - * [508027b35](https://github.com/argoproj/argo-workflows/commit/508027b3521ef2b51293aa1dc58a911c753d148c) fix: Daemon step in running state, but dependents don't start (#7107) - * [ccc8d839c](https://github.com/argoproj/argo-workflows/commit/ccc8d839c2da3c561bb7f5c078cd26c17ce9a9c5) fix: Ensure HTTP reconciliation occurs for onExit nodes (#7084) - * [00f953286](https://github.com/argoproj/argo-workflows/commit/00f953286f4e3a120b5dff4dc1dbd32adf1c7237) fix: Ensure HTTP templates have children assigned (#7082) - * [9b4dd1e83](https://github.com/argoproj/argo-workflows/commit/9b4dd1e83a3362b8f561e380566a7af3ab68ba8d) fix(ui): Correct HTTP connection in pipeline view (#7077) - * [f43d8b01a](https://github.com/argoproj/argo-workflows/commit/f43d8b01a752829e5c6208215b767e3ab68c9dc2) fix: Memozie for Step and DAG level (#7028) - * [7256dace6](https://github.com/argoproj/argo-workflows/commit/7256dace6c1bb6544f7a0e79220b993c32bc3daf) fix: Support RFC3339 in creationTimeStamp. Fixes #6906 (#7044) - * [0837d0c6a](https://github.com/argoproj/argo-workflows/commit/0837d0c6afc06798820a8b41f0acad35aac11143) fix(controller): use correct pod.name in retry/podspecpatch scenario. Fixes #7007 (#7008) - * [09d07111e](https://github.com/argoproj/argo-workflows/commit/09d07111e21ce9d01469315cc3a67ff10ed05617) fix(typo): correct typo in event-dispatch error log (#6688) - * [26afd8ec9](https://github.com/argoproj/argo-workflows/commit/26afd8ec9db0cfc98a4cee9b7bcd3a211c2119c4) fix: OAuth2 callback with self-signed Root CA. Fixes #6793 (#6978) - * [d9eafeee1](https://github.com/argoproj/argo-workflows/commit/d9eafeee1ce309726b32b3736086da1529487fa8) fix: Allow self-signed Root CA for SSO. Fixes #6793 (#6961) - * [46f88f423](https://github.com/argoproj/argo-workflows/commit/46f88f4230b546863f83ccf56b94697e39ab0e11) fix: response on canceled workflow action (#6859) (#6967) - * [32ecc4654](https://github.com/argoproj/argo-workflows/commit/32ecc4654cda8e84d6bb7a696675e14da8665747) fix: Unreachable code in util/tls/tls.go. Fixes #6950 (#6960) - * [2fbeb80f0](https://github.com/argoproj/argo-workflows/commit/2fbeb80f0c320805de72c42ea5b106ab31f560a8) fix(executor): add test for non-root user creating a script (#6905) - * [15e9ba84d](https://github.com/argoproj/argo-workflows/commit/15e9ba84d1b783fe26ed0e507b1d5a868b43ee0e) fix: Skip empty withParam tasks. Fixes #6834 (#6912) - * [d31860cd1](https://github.com/argoproj/argo-workflows/commit/d31860cd1d20c07ce28b0e7035fbf210019fa38a) fix: Parameter with Value and Default (#6887) - * [ba4ffdf8d](https://github.com/argoproj/argo-workflows/commit/ba4ffdf8d1948302942c9860a1d2fea8f8d6db8e) fix(ui): fixed width button (#6883) - -### Contributors - - * Alex Collins - * Bob Haddleton - * Guillaume Fillon - * Iven - * Kyle Hanks - * Mathew Wicks - * Miroslav Tomasik - * NextNiclas - * Rob Herley - * SalvadorC - * Saravanan Balasubramanian - * Simon Behar - * Tianchu Zhao - * Zadkiel - * Ziv Levi - * kennytrytek - -## v3.2.3 (2021-10-26) - - * [e5dc961b7](https://github.com/argoproj/argo-workflows/commit/e5dc961b7846efe0fe36ab3a0964180eaedd2672) fix: Precedence of ContainerRuntimeExecutor and ContainerRuntimeExecutors (#7056) - * [3f14c68e1](https://github.com/argoproj/argo-workflows/commit/3f14c68e166a6fbb9bc0044ead5ad4e5b424aab9) feat: Bring Java client into core. (#7026) - * [48e1aa974](https://github.com/argoproj/argo-workflows/commit/48e1aa9743b523abe6d60902e3aa8546edcd221b) fix: Minor corrections to Swagger/JSON schema (#7027) - * [10f5db67e](https://github.com/argoproj/argo-workflows/commit/10f5db67ec29c948dfa82d1f521352e0e7eb4bda) fix(controller): fix bugs in processing retry node output parameters. Fixes #6948 (#6956) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * smile-luobin - -## v3.2.2 (2021-10-21) - - * [8897fff15](https://github.com/argoproj/argo-workflows/commit/8897fff15776f31fbd7f65bbee4f93b2101110f7) fix: Restore default pod name version to v1 (#6998) - * [99d110985](https://github.com/argoproj/argo-workflows/commit/99d1109858ddcedfc9c5c85df53e1bd422887794) chore(ui): Move pod name functions and add tests. Fixes #6946 (#6954) - -### Contributors - - * J.P. Zivalich - -## v3.2.1 (2021-10-19) - - * [74182fb90](https://github.com/argoproj/argo-workflows/commit/74182fb9017e0f05c0fa6afd32196a1988423deb) lint - * [7cdbee05c](https://github.com/argoproj/argo-workflows/commit/7cdbee05c42e5d73e375bcd5d3db264fa6bc0d4b) fix(ui): Change pod names to new format. Fixes #6865 (#6925) - * [5df91b289](https://github.com/argoproj/argo-workflows/commit/5df91b289758e2f4953919621a207129a9418226) fix: BASE_HREF ignore (#6926) - * [d04aabf2c](https://github.com/argoproj/argo-workflows/commit/d04aabf2c3094db557c7edb1b342dcce54ada2c7) fix(controller): Fix getPodByNode, TestGetPodByNode. Fixes #6458 (#6897) - * [72446bf3b](https://github.com/argoproj/argo-workflows/commit/72446bf3bad0858a60e8269f5f476192071229e5) fix: do not delete expr tag tmpl values. Fixes #6909 (#6921) - * [2922a2a9d](https://github.com/argoproj/argo-workflows/commit/2922a2a9d8506ef2e84e2b1d3172168ae7ac6aeb) fix: Resource requests on init/wait containers. Fixes #6809 (#6879) - * [84623a4d6](https://github.com/argoproj/argo-workflows/commit/84623a4d687b962898bcc718bdd98682367586c1) fix: upgrade sprig to v3.2.2 (#6876) - -### Contributors - - * Alex Collins - * Hong Wang - * J.P. Zivalich - * Micah Beeman - * Saravanan Balasubramanian - * zorulo - -## v3.2.0 (2021-10-05) - - -### Contributors - - -## v3.2.0-rc6 (2021-10-05) - - * [994ff7454](https://github.com/argoproj/argo-workflows/commit/994ff7454b32730a50b13bcbf14196b1f6f404a6) fix(UI): use default params on template submit form (#6858) - * [47d713bbb](https://github.com/argoproj/argo-workflows/commit/47d713bbba9ac3a210c0b3c812f7e05522d8e7b4) fix: Add OIDC issuer alias. Fixes #6759 (#6831) - * [11a8c38bb](https://github.com/argoproj/argo-workflows/commit/11a8c38bbe77dcc5f85a60b4f7c298770a03aafc) fix(exec): Failed to load http artifact. Fixes #6825 (#6826) - * [147730d49](https://github.com/argoproj/argo-workflows/commit/147730d49090348e09027182dcd3339654993f41) fix(docs): cron backfill example (#6833) - * [4f4157bb9](https://github.com/argoproj/argo-workflows/commit/4f4157bb932fd277291851fb86ffcb9217c8522e) fix: add HTTP genre and sort (#6798) - -### Contributors - - * Raymond Wong - * Shea Sullivan - * Tianchu Zhao - * kennytrytek - * smile-luobin - -## v3.2.0-rc5 (2021-09-29) - - * [87a57328e](https://github.com/argoproj/argo-workflows/commit/87a57328e72d794b29481b7377c49fd58b2b9480) feat: implement WatchEvents for argoKubeWorkflowServiceClient. Fixes #6173 (#6816) - * [543366fab](https://github.com/argoproj/argo-workflows/commit/543366fab79ed79c36f172aba8a288ce73d6f675) fix(apiclient): remove default client in facade. Fixes #6733 (#6800) - * [2c3ac705a](https://github.com/argoproj/argo-workflows/commit/2c3ac705a20ae1cf38d0eb30b15826f2946857ac) fix: Missing duration metrics if controller restart (#6815) - * [a87e94b62](https://github.com/argoproj/argo-workflows/commit/a87e94b620784c93f13543de83cd784e20fad595) fix: Fix expression template random errors. Fixes #6673 (#6786) - * [254c73084](https://github.com/argoproj/argo-workflows/commit/254c73084da5f02a5edfea42d4671ae97703592f) fix: Fix bugs, unable to resolve tasks aggregated outputs in dag outputs. Fixes #6684 (#6692) - * [965309925](https://github.com/argoproj/argo-workflows/commit/96530992502bfd126fd7dcb0a704d3c36c166bd1) fix: remove windows UNC paths from wait/init containers. Fixes #6583 (#6704) - * [ffb0db711](https://github.com/argoproj/argo-workflows/commit/ffb0db711b611633e30a6586b716af02c37a9de6) fix: Missing duration metrics if controller restart (#6815) - * [81bfa21eb](https://github.com/argoproj/argo-workflows/commit/81bfa21eb56cdba45b871f9af577a9dc72aa69f2) feat(controller): add workflow level archivedLogs. Fixes #6663 (#6802) - * [6995d682d](https://github.com/argoproj/argo-workflows/commit/6995d682dabbaac7e44e97f9a18480723932a882) fix: update outdated links for cli (#6791) - * [b35aabe86](https://github.com/argoproj/argo-workflows/commit/b35aabe86be9fa5db80299cebcfb29c32be21047) fix(lint): checking error for viper command flag binding (#6788) - * [96c562649](https://github.com/argoproj/argo-workflows/commit/96c5626497df9eedad062df9b8aaaaeea3561407) feat: Add env vars config for argo-server and workflow-controller (#6767) - * [7a7171f46](https://github.com/argoproj/argo-workflows/commit/7a7171f464e5f2f71526c3cdb63e854e28fd3c01) fix: Fix expression template random errors. Fixes #6673 (#6786) - * [067576ed7](https://github.com/argoproj/argo-workflows/commit/067576ed72750158efd034078ab8102b72438798) fix(controller): fix template archivelocation-archivelog behaviour (#6754) - * [d747fc5ea](https://github.com/argoproj/argo-workflows/commit/d747fc5ea985ad39324282e8410ca6397e05832f) fix(ui): workflow event binding typo (#6782) - * [9dc33f617](https://github.com/argoproj/argo-workflows/commit/9dc33f6172a3bc1e0fc0e64d9ed56ed92981c349) fix: Fix bugs, unable to resolve tasks aggregated outputs in dag outputs. Fixes #6684 (#6692) - * [954292d50](https://github.com/argoproj/argo-workflows/commit/954292d500b1a63c1c467f0d404b38e8b372f22e) fix(controller): TestPodExists unit test, add delay to wait for informer getting pod info (#6783) - -### Contributors - - * Alex Collins - * Anish Dangi - * Niklas Hansson - * Philippe Richard - * Saravanan Balasubramanian - * Tianchu Zhao - * smile-luobin - * tooptoop4 - * ygelfand - -## v3.2.0-rc4 (2021-09-21) - - * [710e82366](https://github.com/argoproj/argo-workflows/commit/710e82366dc3b0b17f5bf52004d2f72622de7781) fix: fix a typo in example file dag-conditional-artifacts.yaml (#6775) - * [b82884600](https://github.com/argoproj/argo-workflows/commit/b8288460052125641ff1b4e1bcc4ee03ecfe319b) feat: upgrade Argo Dataflow to v0.0.104 (#6749) - * [1a76e6581](https://github.com/argoproj/argo-workflows/commit/1a76e6581dd079bdcfc76be545b3f7dd1ba48105) fix(controller): TestPodExists unit test (#6763) - * [6875479db](https://github.com/argoproj/argo-workflows/commit/6875479db8c466c443acbc15a3fe04d8d6a8b1d2) fix: Daemond status stuck with Running (#6742) - * [e5b131a33](https://github.com/argoproj/argo-workflows/commit/e5b131a333afac0ed3444b70e2fe846b86dc63e1) feat: Add template node to pod name. Fixes #1319 (#6712) - -### Contributors - - * Alex Collins - * J.P. Zivalich - * Saravanan Balasubramanian - * TCgogogo - * Tianchu Zhao - -## v3.2.0-rc3 (2021-09-14) - - * [69e438426](https://github.com/argoproj/argo-workflows/commit/69e438426e4d116e2c9a1716651af7ef14864f04) fix: correct minor typos in docs (#6722) - * [ae5398698](https://github.com/argoproj/argo-workflows/commit/ae5398698afd3676ba180874987bfc6c3563b9a6) fix(executor): allow emptyRepo artifact input. Fixes #5857 (#6711) - * [e57249e64](https://github.com/argoproj/argo-workflows/commit/e57249e647ec15c859e1035d451c65ae76cc27b6) fix: remove windows UNC paths from wait/init containers. Fixes #6583 (#6704) - * [0b3f62cbe](https://github.com/argoproj/argo-workflows/commit/0b3f62cbe747aa82cff1419cf26db6007d1d1079) fix: kill sidecar timeout issue (#6700) - * [cf14cad41](https://github.com/argoproj/argo-workflows/commit/cf14cad41e1a8428cae8382398ee778892e63198) feat(ui): logsViewer use archived log if node finish and archived (#6708) - * [3ba7d5fd6](https://github.com/argoproj/argo-workflows/commit/3ba7d5fd64f5bab7c96b6b4ff65e488f8faa570e) fix(ui): undefined cron timestamp (#6713) - * [4c9c92292](https://github.com/argoproj/argo-workflows/commit/4c9c922924be2a299995fc06efbaef15c7fb0f84) fix: panic in prepareMetricScope (#6720) - * [d1299ec80](https://github.com/argoproj/argo-workflows/commit/d1299ec8073789af8c9b6281770f9236013d5acf) fix(executor): handle hdfs optional artifact at retriving hdfs file stat (#6703) - * [11657fe16](https://github.com/argoproj/argo-workflows/commit/11657fe169e31319da431d77ed3355ab2848401d) feat: Provide context to NewAPIClient (#6667) - * [a1cc0f557](https://github.com/argoproj/argo-workflows/commit/a1cc0f557c08c1206df89e39d2c286f02a6675de) feat: archivewf add name filter. Fixes #5824 (#6706) - * [1e31eb856](https://github.com/argoproj/argo-workflows/commit/1e31eb85655d2118f2e3c3edaa8886f923de4f5b) fix(ui): report phase button alignment (#6707) - * [d45395b6f](https://github.com/argoproj/argo-workflows/commit/d45395b6f3b0cc40444e98af921b9e80284b74e8) fix: run Snyk on UI. Fixes #6604 (#6651) - * [2e174bd4c](https://github.com/argoproj/argo-workflows/commit/2e174bd4c585ccf72e34c8f72703a0950a67460c) fix(ui): button margin (#6699) - * [4b5d7ecfd](https://github.com/argoproj/argo-workflows/commit/4b5d7ecfd1087f22002bc63658dc5ad3fe30927f) fix(emissary): strip trailing slash from artifact src before creating… (#6696) - * [28c8dc7a9](https://github.com/argoproj/argo-workflows/commit/28c8dc7a9054fdf90fd7f98e03f86923dc6e6d2a) feat: Support loading parameter values from configmaps (#6662) - * [9c76cc34c](https://github.com/argoproj/argo-workflows/commit/9c76cc34c7591f0113dea4e35b58b902d8386544) fix(executor): Retry `kubectl` on transient error (#6472) - * [929351267](https://github.com/argoproj/argo-workflows/commit/9293512674c21a2494c704978990cf89eb5ad8c0) fix(cli): Added validatation for StartAt, FinishedAt and ID (#6661) - * [a147f178d](https://github.com/argoproj/argo-workflows/commit/a147f178d9ddbe139551bf5636f73fb1af2e61f8) fix(controller): Set finishedAt for workflow with Daemon steps (#6650) - * [5522d4b4c](https://github.com/argoproj/argo-workflows/commit/5522d4b4c6f3b2de68956998c877b2c596e158af) fix: Do not index complete workflow semaphores (#6639) - * [2ac3c48d3](https://github.com/argoproj/argo-workflows/commit/2ac3c48d33415b804067b07a13185b06d3b416bc) fix: `argo node set` panic: index out of range and correct docs (#6653) - -### Contributors - - * Alex Collins - * Anish Dangi - * Damian Czaja - * Elliot Maincourt - * Jesse Suen - * Joshua Carp - * Saravanan Balasubramanian - * Tianchu Zhao - * William Van Hevelingen - * Yuan Tang - * 大雄 - -## v3.2.0-rc2 (2021-09-01) - - * [6d46fd9f8](https://github.com/argoproj/argo-workflows/commit/6d46fd9f881a337b5b3d34d62e71d9b56ba05b1a) feat(controller): Add a shared index informer for ConfigMaps (#6644) - * [91abb47db](https://github.com/argoproj/argo-workflows/commit/91abb47db3c8ad20fac80914f1961842bc64a0b9) feat: Upgrade dataflow to v0.0.98 (#6637) - * [d8b90f2b8](https://github.com/argoproj/argo-workflows/commit/d8b90f2b89472f8dce9c134aeccd7cb70ee3b87b) fix: Fixed typo in clusterrole (#6626) - * [51307e11e](https://github.com/argoproj/argo-workflows/commit/51307e11ede253be6231dd007565fcc98ccc564b) fix: Upgrade Dataflow to v0.0.96 (#6622) - * [f1c188f3e](https://github.com/argoproj/argo-workflows/commit/f1c188f3eba61421a37dfcaea68e7e9f61f5842a) fix: Argo Workflow specs link to not go to raw content (#6624) - * [07e29263a](https://github.com/argoproj/argo-workflows/commit/07e29263a6254b9caf7a47e2761cba3e1d39c7b4) docs: Add slack exit handler example. Resolves #4152 (#6612) - * [29cf73548](https://github.com/argoproj/argo-workflows/commit/29cf73548d7246433cb1d835f25f34ab73389fe4) fix(controller): Initialize throttler during starting workflow-controller. Fixes: #6599 (#6608) - * [a394a91f5](https://github.com/argoproj/argo-workflows/commit/a394a91f59bc3086e0538265c0d9d399a43110c6) fix: manifests/quick-start/sso for running locally PROFILE=sso (#6503) - * [8678f007e](https://github.com/argoproj/argo-workflows/commit/8678f007e86ffa615e6ca90c52c7ca4d1e458b08) fix: Fix `gosec` warnings, disable pprof by default. Fixes #6594 (#6596) - * [3aac377e2](https://github.com/argoproj/argo-workflows/commit/3aac377e223f1a6bad05ec28404c89e435e47687) fix!: Enable authentication by default on Argo Server `/metrics` endpoint. Fixes #6592 (#6595) - * [656639666](https://github.com/argoproj/argo-workflows/commit/6566396666163198f2520c9a0790b01ada3863fd) fix(executor): Disambiguate PNS executor initialization log (#6582) - * [d6f5acb40](https://github.com/argoproj/argo-workflows/commit/d6f5acb407ddf2d6f7afbe3e380eda5a2908dcbd) fix: Fix unit test with missing createRunningPods() (#6585) - * [b0e050e54](https://github.com/argoproj/argo-workflows/commit/b0e050e54a96a1c46b279a37b7daf43b2942f791) feat: upgrade argo-dataflow to v0.0.90 (#6563) - * [30340c427](https://github.com/argoproj/argo-workflows/commit/30340c42785fcff1e864b2078c37139dc13bbfd7) fix(gcs): backoff bool should be false if error is transient (#6577) - * [1e34cec88](https://github.com/argoproj/argo-workflows/commit/1e34cec88e4fd1f65da923139efbf8fb38c97772) feat(artifact): Allow to set bucket logging for OSS artifact driver (#6554) - -### Contributors - - * Alex Collins - * Andrey Melnikov - * Antoine Dao - * J.P. Zivalich - * Saravanan Balasubramanian - * Tetsuya Shiota - * Yuan Tang - * smile-luobin - -## v3.2.0-rc1 (2021-08-19) - - * [3595ac59c](https://github.com/argoproj/argo-workflows/commit/3595ac59cefe63256bbac38bca27fb5cacee93f9) feat: Adding SSO support for Okta. Fixes #6165 (#6572) - * [f1cf7ee03](https://github.com/argoproj/argo-workflows/commit/f1cf7ee03c741ecdc9698123a3fae4e5ccafbd16) fix: Panic in getTemplateOutputsFromScope (#6560) - * [64fbf6955](https://github.com/argoproj/argo-workflows/commit/64fbf6955840b1bde28d36db106866da04047d4f) fix(executor/pns): panic of pidFileHandles concurrent writes (#6569) - * [ae7eeeb50](https://github.com/argoproj/argo-workflows/commit/ae7eeeb50dd0b7640913e7b30d1fe612c7e0ee4c) fix: Fix `x509: certificate signed by unknown authority` error (#6566) - * [205d233cd](https://github.com/argoproj/argo-workflows/commit/205d233cd8e85af24e451d6268af32e928aeb47c) fix(executor/docker): fix failed to wait for main container to complete: timed out waiting for the condition: container does not exist (#6561) - * [d41c03702](https://github.com/argoproj/argo-workflows/commit/d41c037027e062a149ce821dd377fb6b52269335) feat: S3 Encryption At Rest (#6549) - * [478d79469](https://github.com/argoproj/argo-workflows/commit/478d794693b3a965e3ba587da2c67e5e1effa418) fix: Generate TLS Certificates on startup and only keep in memory (#6540) - * [f711ce4d5](https://github.com/argoproj/argo-workflows/commit/f711ce4d5352b025f366f8e81ebbe9e457cc9054) fix: use golangci-lint v1.37.0 to support apple M1 (#6548) - * [37395d681](https://github.com/argoproj/argo-workflows/commit/37395d6818ba151213a1bb8338356cf553c2404a) fix: replace docker.io with quay.io to avoid the image pull limit (#6539) - * [a1a8d4421](https://github.com/argoproj/argo-workflows/commit/a1a8d4421e3b7e8c6bcd2677e7862ec6f3aed1cc) fix: argo-sever mistype (#6543) - * [a57b3ad9e](https://github.com/argoproj/argo-workflows/commit/a57b3ad9ed2afbcd3f22e912b252dd451d9c7ebc) feat: Show Argo Dataflow pipelines in the UI (#5742) - * [dc4f0a172](https://github.com/argoproj/argo-workflows/commit/dc4f0a172d6992cd34749d858bb0c402172c8eef) fix: use execWf when setting PodMetadata (#6512) - * [047ae4f5e](https://github.com/argoproj/argo-workflows/commit/047ae4f5e6d93e4e2c84d8af1f4df4d68a69bb75) docs(users): add arabesque (#6533) - * [903ce68ff](https://github.com/argoproj/argo-workflows/commit/903ce68ffa01400a7b57b2604091482a27ca64d4) fix: Fix the Status update for node with synchronization lock (#6525) - * [a38460342](https://github.com/argoproj/argo-workflows/commit/a38460342472b0515017d5a2ab2cbc6536b5592e) fix: Upgrade pkg to v0.10.1. Fixes #6521 (#6523) - * [0670f652c](https://github.com/argoproj/argo-workflows/commit/0670f652cd7ca5500aa77c682bb8b380bb4c79d3) fix(controller): fix tasket warning in Non-HTTP Template scanerio (#6518) - * [32970f4cd](https://github.com/argoproj/argo-workflows/commit/32970f4cd15923b62d750863c28270bc283071b6) fix: PROFILE=SSO to PROFILE=sso for case-sensitive filesystem (#6502) - * [3d5ac9b2b](https://github.com/argoproj/argo-workflows/commit/3d5ac9b2be71937e86eee1d71a4eefa294b27293) fix(controller): Fix panic in addParamToGlobalScope (#6485) - * [d1d96b0a6](https://github.com/argoproj/argo-workflows/commit/d1d96b0a6e8f045715b83a55f1aad056eb76bd96) feat(ui): use dl tag instead of p tag in user-info ui (#6505) - * [5b8f7977a](https://github.com/argoproj/argo-workflows/commit/5b8f7977a86a43061dca9ea916d32c02e23bd7f5) Add link to latest release in installation.md (#6509) - * [24bb1b77a](https://github.com/argoproj/argo-workflows/commit/24bb1b77a1b5cd2f78251aca26d007c7d75b8993) fix(executor/docker): re-revert -- fix random errors with message "No such container:path". Fixes #6352 (#6508) - * [e2e822dd5](https://github.com/argoproj/argo-workflows/commit/e2e822dd59e3ad62d978cdce0efa5ce7a4a273e2) fix: Remove client private key from client auth REST config (#6506) - * [a3fd704a1](https://github.com/argoproj/argo-workflows/commit/a3fd704a1715900f2144c0362e562f75f1524126) Revert "fix(executor/docker): fix random errors with message "No such container:path". Fixes #6352 (#6483)" - * [a105b137c](https://github.com/argoproj/argo-workflows/commit/a105b137c97e5aea852c6db6e77997ca3713cb08) fix(controller): Delete the PVCs in workflowtemplateRef (#6493) - * [3373dc512](https://github.com/argoproj/argo-workflows/commit/3373dc512804ae51d09ade02be53c597aead3c3f) feat: Annotate pod events with workflow name and UID (#6455) - * [e4a53d4bf](https://github.com/argoproj/argo-workflows/commit/e4a53d4bf021fd4dce1374bb7fd4320d733e57ba) fix(executor/docker): fix random errors with message "No such container:path". Fixes #6352 (#6483) - * [2a2ecc916](https://github.com/argoproj/argo-workflows/commit/2a2ecc916925642fd8cb1efd026588e6828f82e1) fix(controller): JSON-unmarshal marshaled expression template before evaluating (#6285) - * [ec9641531](https://github.com/argoproj/argo-workflows/commit/ec9641531c8283a4e6fcd684c8aecce92c6e14b7) feat(controller): Inline templates. Closes #5105 (#5749) - * [7ef0f4750](https://github.com/argoproj/argo-workflows/commit/7ef0f4750d7da4bb326fb0dab25f176db412993b) fix: Consider onExit children of Retry nodes (#6451) - * [7f2c58972](https://github.com/argoproj/argo-workflows/commit/7f2c58972177c5b7cfdfb6bc8d9ba4189a9f45d0) feat!: Upgrade to Golang 1.16. Fixes #5563 (#6471) - * [94244243c](https://github.com/argoproj/argo-workflows/commit/94244243ce07693317abdb250868d6a089111fa9) docs(users): add gitpod (#6466) - * [5fde8fa72](https://github.com/argoproj/argo-workflows/commit/5fde8fa72f2e5b0bcd7cfb048fd1eb9e24b6a950) fix: Exit template shouldn't fail with max parallelism reached (#6456) - * [c5d2461cf](https://github.com/argoproj/argo-workflows/commit/c5d2461cf5f9cd7569bc07c8a7cfde7e4c86e5a4) fix(controller): fix retry on different hosts (#6429) - * [0f6f36270](https://github.com/argoproj/argo-workflows/commit/0f6f362704e0c124a127438ced5df26e6c91a76b) fix(server): Fix nil pointer error when getting artifacts from a step without artifacts (#6465) - * [903415249](https://github.com/argoproj/argo-workflows/commit/90341524935287c7db30f34132c2a1aa4f1ea170) feat(server): Support OIDC custom claims for AuthN. Closes #5953 (#6444) - * [3e9d8373d](https://github.com/argoproj/argo-workflows/commit/3e9d8373d9165931aca1c1a3b65d81bba5a33720) fix(pods): set resources from script templates (#6450) - * [3abeb0120](https://github.com/argoproj/argo-workflows/commit/3abeb0120c80fcdf9b8b161178c296c6efccb63d) fix: Do not display clipboard if there is no text (#6452) - * [b16a0a098](https://github.com/argoproj/argo-workflows/commit/b16a0a09879413428fb93f196d4d4e63fe51e657) feat(controller): HTTP Template and Agent support feature (#5750) - * [dc043ce87](https://github.com/argoproj/argo-workflows/commit/dc043ce87b1c946d2ae4fe677862f31e18c758ff) feat(server): support changing MaxGRPCMessageSize using env variable (#6420) - * [51c15764d](https://github.com/argoproj/argo-workflows/commit/51c15764d52f87d8fc5a63e19cb1ad4d0b41a23e) fix(controller): Reinstate support for outputs.results for containers. Fixes #6428 (#6434) - * [40b08240d](https://github.com/argoproj/argo-workflows/commit/40b08240d7eed5ec19bef923201470b69096736f) fix: support archive.none for OSS directory artifacts (#6312) - * [7ec5b3ea9](https://github.com/argoproj/argo-workflows/commit/7ec5b3ea9e55618f1522dd7e50bbf54baad1ca39) fix(controller): Same workflow nodes are not executing parallel even semaphore locks available (#6418) - * [c29b275d5](https://github.com/argoproj/argo-workflows/commit/c29b275d56ef7f2dbf5822ee981f492c2ff61388) fix(controller): Randomly expr expression fail to resolve (#6410) - * [f1792f68c](https://github.com/argoproj/argo-workflows/commit/f1792f68cbf62b1bf6e584836bfe8fd35152d3a8) docs(executor): emissary executor also runs on GKE autopilot (#6430) - * [dd3c11252](https://github.com/argoproj/argo-workflows/commit/dd3c112523ea52a832c8df937dae37c43e2c86cd) fix(controller/cli): Resolve global artifacts created in nested workflows (#6422) - * [b17d1bf7b](https://github.com/argoproj/argo-workflows/commit/b17d1bf7b8db75fde30e0f808c2b57fddecf5b32) fix(emissary): throw argo error on file not exist (#6392) - * [946e4a4a6](https://github.com/argoproj/argo-workflows/commit/946e4a4a6254ff935df99095926905440263223a) fix(executor): Remove 15s guard for Docker executor. Fixes #6415 (#6427) - * [29ebc2a6a](https://github.com/argoproj/argo-workflows/commit/29ebc2a6ab40609784419191aef457ba83e8b062) fix(executor): remove unused import preventing compilation - * [cc701a1af](https://github.com/argoproj/argo-workflows/commit/cc701a1affdb4d29b4f48fdfb5dad719192597ec) feat(controller): opt-in to sending pod node events as pod (#6377) - * [959ce6b7f](https://github.com/argoproj/argo-workflows/commit/959ce6b7fe379e4bd79c565862b8bc03112dc154) feat(artifact): enable gcs ListObjects (#6409) - * [30e2518c2](https://github.com/argoproj/argo-workflows/commit/30e2518c2757d726a8164c6347235a88fd54c834) fix(executor/emissary): fix nonroot sidecars + input/output params & artifacts (#6403) - * [4da8fd940](https://github.com/argoproj/argo-workflows/commit/4da8fd94004d535bc79b2cbfa77f6c8683d0c547) fix(controller): Global parameter is not getting updated (#6401) - * [f2d24b1d9](https://github.com/argoproj/argo-workflows/commit/f2d24b1d9b7301fd9d1ffe2c9275caad25772bc1) fix(controller): Force main container name to be "main" as per v3.0. Fixes #6405 (#6408) - * [2df5f66a3](https://github.com/argoproj/argo-workflows/commit/2df5f66a33e197389ae906e6f7b8fb271f49c54c) fix(executor): fix GCS artifact retry (#6302) - * [092b4271b](https://github.com/argoproj/argo-workflows/commit/092b4271b9b57ce9dbff0d988b04ddbf9742425c) fix(controller): Mark workflows wait for semaphore as pending. Fixes #6351 (#6356) - * [453539690](https://github.com/argoproj/argo-workflows/commit/453539690e01827e97fd4921aaa425b2c864a3b1) fix(controller): allow initial duration to be 0 instead of current_time-0 (#6389) - * [b15a79cc3](https://github.com/argoproj/argo-workflows/commit/b15a79cc30509620fea703811f9a9c708f1b64d2) docs: Add 4intelligence (#6400) - * [f4b89dc8e](https://github.com/argoproj/argo-workflows/commit/f4b89dc8eebc280c5732ae06c2864bdaa1a30e87) fix: Server crash when opening timeline tab for big workflows (#6369) - * [99359a095](https://github.com/argoproj/argo-workflows/commit/99359a0950549515eed306c6839a181a2c356612) Revert "fix: examples/ci.yaml indent (#6328)" - * [66c441006](https://github.com/argoproj/argo-workflows/commit/66c441006e4d1b237de94c91d2f8eb7733ba88d0) fix(gcs): throw argo not found error if key not exist (#6393) - * [3f72fe506](https://github.com/argoproj/argo-workflows/commit/3f72fe506f6c10054692ce07f9b2eaf0f62830a7) fix: examples/ci.yaml indent (#6328) - * [9233a8de7](https://github.com/argoproj/argo-workflows/commit/9233a8de77911d1c22f3a10977a33b48eccb9e63) fix(controller): fix retry on transient errors when validating workflow spec (#6370) - * [488aec3ca](https://github.com/argoproj/argo-workflows/commit/488aec3cad640cd99e21a0c95898463a860a8c0e) fix(controller): allow workflow.duration to pass validator (#6376) - * [d6ec03238](https://github.com/argoproj/argo-workflows/commit/d6ec032388ab8d363faf4e6984b54950dd9abcad) feat(controller): Allow configurable host name label key when retrying different hosts (#6341) - * [bd5a8a99b](https://github.com/argoproj/argo-workflows/commit/bd5a8a99bc470c13a93894be9c0f7f23142a4a31) fix(fields): handle nexted fields when excluding (#6359) - * [cfab7db53](https://github.com/argoproj/argo-workflows/commit/cfab7db53c760ab4354562593b3a5e01e47c733d) feat(controller): sortDAGTasks supports sort by field Depends (#6307) - * [6e58b35c3](https://github.com/argoproj/argo-workflows/commit/6e58b35c34c70df11d7727519249fff46a23ab2b) fix(cli): Overridding name/generateName when creating CronWorkflows if specified (#6308) - * [5f0d6ab87](https://github.com/argoproj/argo-workflows/commit/5f0d6ab87e32fda900667cc592951c662cee8acc) docs(users): Add WooliesX (#6358) - * [b388c63d0](https://github.com/argoproj/argo-workflows/commit/b388c63d089cc8c302fdcdf81be3dcd9c12ab6f2) fix(crd): temp fix 34s timeout bug for k8s 1.20+ (#6350) - * [3db467e6b](https://github.com/argoproj/argo-workflows/commit/3db467e6b9bed209404c1a8a0152468ea832f06d) fix(cli): v3.1 Argo Auth Token (#6344) - * [d7c09778a](https://github.com/argoproj/argo-workflows/commit/d7c09778ab9e2c3ce88a2fc6de530832f3770698) fix(controller): Not updating StoredWorkflowSpec when WFT changed during workflow running (#6342) - * [7c38fb01b](https://github.com/argoproj/argo-workflows/commit/7c38fb01bb8862b6933603d73a5f300945f9b031) feat(controller): Differentiate CronWorkflow submission vs invalid spec error metrics (#6309) - * [85c9279a9](https://github.com/argoproj/argo-workflows/commit/85c9279a9019b400ee55d0471778eb3cc4fa20db) feat(controller): Store artifact repository in workflow status. Fixes #6255 (#6299) - * [d07d933be](https://github.com/argoproj/argo-workflows/commit/d07d933bec71675138a73ba53771c45c4f545801) require sso redirect url to be an argo url (#6211) - * [c2360c4c4](https://github.com/argoproj/argo-workflows/commit/c2360c4c47e073fde5df04d32fdb910dd8f7dd77) fix(cli): Only list needed fields. Fixes #6000 (#6298) - * [126701476](https://github.com/argoproj/argo-workflows/commit/126701476effdb9d71832c776d650a768428bbe1) docs(controller): add missing emissary executor (#6291) - * [c11584940](https://github.com/argoproj/argo-workflows/commit/c1158494033321ecff6e12ac1ac8a847a7d278bf) fix(executor): emissary - make /var/run/argo files readable from non-root users. Fixes #6238 (#6304) - * [c9246d3d4](https://github.com/argoproj/argo-workflows/commit/c9246d3d4c162e0f7fe76f2ee37c55bdbfa4b0c6) fix(executor): Tolerate docker re-creating containers. Fixes #6244 (#6252) - * [f78b759cf](https://github.com/argoproj/argo-workflows/commit/f78b759cfca07c47ae41990e1bbe031e862993f6) feat: Introduce when condition to retryStrategy (#6114) - * [05c901fd4](https://github.com/argoproj/argo-workflows/commit/05c901fd4f622aa9aa87b3eabfc87f0bec6dea30) fix(executor): emissary - make argoexec executable from non-root containers. Fixes #6238 (#6247) - * [73a36d8bf](https://github.com/argoproj/argo-workflows/commit/73a36d8bf4b45fd28f1cc80b39bf1bfe265cf6b7) feat: Add support for deletion delay when using PodGC (#6168) - * [19da54109](https://github.com/argoproj/argo-workflows/commit/19da5410943fe0b5f8d7f8b79c5db5d648b65d59) fix(conttroller): Always set finishedAt dote. Fixes #6135 (#6139) - * [92eb8b766](https://github.com/argoproj/argo-workflows/commit/92eb8b766b8501b697043fd1677150e1e565da49) fix: Reduce argoexec image size (#6197) - * [631b0bca5](https://github.com/argoproj/argo-workflows/commit/631b0bca5ed3e9e2436b541b2a270f12796961d1) feat(ui): Add copy to clipboard shortcut (#6217) - * [8d3627d3f](https://github.com/argoproj/argo-workflows/commit/8d3627d3fba46257d32d05be9fd0037ac11b0ab4) fix: Fix certain sibling tasks not connected to parent (#6193) - * [38f85482b](https://github.com/argoproj/argo-workflows/commit/38f85482ba30a187c243080c97904dfe8208e285) docs(executor): document k8s executor behaviour with program warnings (#6212) - * [4fd38facb](https://github.com/argoproj/argo-workflows/commit/4fd38facbfb66b06ab0205b04f6e1f1e9943eb6a) fix: Fix security issues related to file closing and paths (G307 & G304) (#6200) - * [cecc379ce](https://github.com/argoproj/argo-workflows/commit/cecc379ce23e708479e4253bbbf14f7907272c9c) refactor: Remove the need for pod annotations to be mounted as a volume (#6022) - * [0e94283ae](https://github.com/argoproj/argo-workflows/commit/0e94283aea641c6c927c9165900165a72022124f) fix(server): Fix issue with auto oauth redirect URL in callback and handle proxies (#6175) - * [0cc5a24c5](https://github.com/argoproj/argo-workflows/commit/0cc5a24c59309438e611223475cdb69c5e3aa01e) fix(controller): Wrong validate order when validate DAG task's argument (#6190) - * [9fe8c1085](https://github.com/argoproj/argo-workflows/commit/9fe8c10858a5a1f024abc812f2e3250f35d7f45e) fix(controller): dehydrate workflow before deleting offloaded node status (#6112) - * [510b4a816](https://github.com/argoproj/argo-workflows/commit/510b4a816dbb2d33f37510db1fd92b841c4d14d3) fix(controller): Allow retry on transient errors when validating workflow spec. Fixes #6163 (#6178) - * [4f847e099](https://github.com/argoproj/argo-workflows/commit/4f847e099ec2a2fef12e98af36b2e4995f8ba3e4) feat(server): Allow redirect_uri to be automatically resolved when using sso (#6167) - * [95ad561ae](https://github.com/argoproj/argo-workflows/commit/95ad561aec5ec360448267b09d8d2238c98012e0) feat(ui): Add checkbox to check all workflows in list. Fixes #6069 (#6158) - * [43f68f4aa](https://github.com/argoproj/argo-workflows/commit/43f68f4aa16ab696d26be6a33b8893418844d838) fix(ui): Fix event-flow scrolling. Fixes #6133 (#6147) - * [9f0cdbdd7](https://github.com/argoproj/argo-workflows/commit/9f0cdbdd78e8eb5b9001243c00cdff5915635401) fix(executor): Capture emissary main-logs. Fixes #6145 (#6146) - * [963bed34b](https://github.com/argoproj/argo-workflows/commit/963bed34bf2ac828384bbbda737e0d8a540bddbb) fix(ui): Fix-up local storage namespaces. Fixes #6109 (#6144) - * [80599325f](https://github.com/argoproj/argo-workflows/commit/80599325feab42bf473925aa9a28a805fc9e1e6e) fix(controller): Performance improvement for Sprig. Fixes #6135 (#6140) - * [868868ee2](https://github.com/argoproj/argo-workflows/commit/868868ee2eb836e9134bdb1f92e7dc2c458722ca) fix: Allow setting workflow input parameters in UI. Fixes #4234 (#5319) - * [357429635](https://github.com/argoproj/argo-workflows/commit/3574296358191edb583bf43d6459259c4156a1e6) build image output to docker (#6128) - * [b38fd1404](https://github.com/argoproj/argo-workflows/commit/b38fd14041e5e61618ea63975997d15704dac8f3) fix(executor): Check whether any errors within checkResourceState() are transient. Fixes #6118. (#6134) - * [db95dbfa1](https://github.com/argoproj/argo-workflows/commit/db95dbfa1edd4a31b1fbd6adbb8e47ca8f2ac428) add troubleshooting notes section for running-locally docs (#6132) - * [b5bd0242d](https://github.com/argoproj/argo-workflows/commit/b5bd0242dd30273161d0ae45bb9e82e85534a53b) Update events.md (#6119) - * [a497e82e0](https://github.com/argoproj/argo-workflows/commit/a497e82e0e6e7e17de20830cc8ea9d306d26d5ca) fix(executor): Fix docker not terminating. Fixes #6064 (#6083) - * [1d76c4815](https://github.com/argoproj/argo-workflows/commit/1d76c4815704e509d7aedc1a79224fbee65ae8ff) feat(manifests): add 'app' label to workflow-controller-metrics service (#6079) - * [1533dd467](https://github.com/argoproj/argo-workflows/commit/1533dd467fa8e0c08a2a5b5fe9d0a1b4dea15b89) fix(executor): Fix emissary kill. Fixes #6030 (#6084) - * [245764eab](https://github.com/argoproj/argo-workflows/commit/245764eab4f597d3bfcca75e86f9512d49792706) chore(executor): Adjust resource JSON object log to debug level (#6100) - * [00b56e543](https://github.com/argoproj/argo-workflows/commit/00b56e543092f2af24263ef83595b53c0bae9619) fix(executor): Fix `kubectl` permission error (#6091) - * [7dc6515ce](https://github.com/argoproj/argo-workflows/commit/7dc6515ce1ef76475ac7bd2a7a3c3cdbe795a13c) Point to latest stable release (#6092) - * [be63efe89](https://github.com/argoproj/argo-workflows/commit/be63efe8950e9ba3f15f1ad637e2b3863b85e093) feat(executor)!: Change `argoexec` base image to alpine. Closes #5720 (#6006) - * [937bbb9d9](https://github.com/argoproj/argo-workflows/commit/937bbb9d9a0afe3040afc3c6ac728f9c72759c6a) feat(executor): Configurable interval for wait container to check container statuses. Fixes #5943 (#6088) - * [c111b4294](https://github.com/argoproj/argo-workflows/commit/c111b42942e1edc4e32eb79e78ad86719f2d3f19) fix(executor): Improve artifact error messages. Fixes #6070 (#6086) - * [53bd960b6](https://github.com/argoproj/argo-workflows/commit/53bd960b6e87a3e77cb320e4b53f9f9d95934149) Update upgrading.md - * [493595a78](https://github.com/argoproj/argo-workflows/commit/493595a78258c13b9b0bfc86fd52bf729e8a9a8e) feat: Add TaskSet CRD and HTTP Template (#5628) - -### Contributors - - * Aaron Mell - * Alex Collins - * Alexander Matyushentsev - * Antoine Dao - * Antony Chazapis - * BOOK - * Daan Seynaeve - * David Collom - * Denis Bellotti - * Ed Marks - * Gage Orsburn - * Geoffrey Huntley - * Huan-Cheng Chang - * Joe McGovern - * KUNG HO BACK - * Kaito Ii - * Luces Huayhuaca - * Marcin Gucki - * Michael Crenshaw - * Miles Croxford - * Mohammad Ali - * Peixuan Ding - * Reijer Copier - * Saravanan Balasubramanian - * Sebastian Nyberg - * Simon Behar - * Stefan Sedich - * Tetsuya Shiota - * Thiago Bittencourt Gil - * Tianchu Zhao - * Tom Meadows - * William Van Hevelingen - * Windfarer - * Yuan (Bob) Gong - * Yuan Tang - * brgoode - * dpeer6 - * jibuji - * kennytrytek - * meijin - * wanghong230 - -## v3.1.15 (2021-11-17) - - * [a0d675692](https://github.com/argoproj/argo-workflows/commit/a0d6756922f7ba89f20b034dd265d0b1e393e70f) fix: add gh ecdsa and ed25519 to known hosts (#7226) - -### Contributors - - * Rob Herley - -## v3.1.14 (2021-10-19) - - * [f647435b6](https://github.com/argoproj/argo-workflows/commit/f647435b65d5c27e84ba2d2383f0158ec84e6369) fix: do not delete expr tag tmpl values. Fixes #6909 (#6921) - -### Contributors - - * Alex Collins - -## v3.1.13 (2021-09-28) - - * [78cd6918a](https://github.com/argoproj/argo-workflows/commit/78cd6918a8753a8448ed147b875588d56bd26252) fix: Missing duration metrics if controller restart (#6815) - * [1fe754ef1](https://github.com/argoproj/argo-workflows/commit/1fe754ef10bd95e3fe3485f67fa7e9c5523b1dea) fix: Fix expression template random errors. Fixes #6673 (#6786) - * [3a98174da](https://github.com/argoproj/argo-workflows/commit/3a98174dace34ffac7dd7626a253bbb1101df515) fix: Fix bugs, unable to resolve tasks aggregated outputs in dag outputs. Fixes #6684 (#6692) - * [6e93af099](https://github.com/argoproj/argo-workflows/commit/6e93af099d1c93d1d27fc86aba6d074d6d79cffc) fix: remove windows UNC paths from wait/init containers. Fixes #6583 (#6704) - -### Contributors - - * Alex Collins - * Anish Dangi - * Saravanan Balasubramanian - * smile-luobin - -## v3.1.12 (2021-09-16) - - * [e62b9a8dc](https://github.com/argoproj/argo-workflows/commit/e62b9a8dc8924e545d57d1f90f901fbb0b694e09) feat(ui): logsViewer use archived log if node finish and archived (#6708) - * [da5ce18cf](https://github.com/argoproj/argo-workflows/commit/da5ce18cf24103ca9418137229fc355a9dc725c9) fix: Daemond status stuck with Running (#6742) - -### Contributors - - * Saravanan Balasubramanian - * Tianchu Zhao - -## v3.1.11 (2021-09-13) - - * [665c08d29](https://github.com/argoproj/argo-workflows/commit/665c08d2906f1bb15fdd8c2f21e6877923e0394b) skippied flakytest - * [459a61170](https://github.com/argoproj/argo-workflows/commit/459a61170663729c912a9b387fd7fa5c8a147839) fix(executor): handle hdfs optional artifact at retriving hdfs file stat (#6703) - * [82e408297](https://github.com/argoproj/argo-workflows/commit/82e408297c65a2d64408d9f6fb01766192fcec42) fix: panic in prepareMetricScope (#6720) - * [808d897a8](https://github.com/argoproj/argo-workflows/commit/808d897a844b46487de65ce27ddeb2dad614f417) fix(ui): undefined cron timestamp (#6713) - -### Contributors - - * Saravanan Balasubramanian - * Tianchu Zhao - -## v3.1.10 (2021-09-10) - - * [2730a51a2](https://github.com/argoproj/argo-workflows/commit/2730a51a203d6b587db5fe43a0e3de018a35dbd8) fix: Fix `x509: certificate signed by unknown authority` error (#6566) - -### Contributors - - * Alex Collins - -## v3.1.9 (2021-09-03) - - * [e4f6bcb02](https://github.com/argoproj/argo-workflows/commit/e4f6bcb02f10bea5c76f2f91ff223b8a380b4557) fix the codegen - * [92153dcca](https://github.com/argoproj/argo-workflows/commit/92153dcca774bb3097f00b86b35edf966ead7de4) fixed test - * [117e85f47](https://github.com/argoproj/argo-workflows/commit/117e85f473fd6b4d9e7cebd4406503896f4d0639) fix(cli): Added validatation for StartAt, FinishedAt and ID (#6661) - * [01083d1d1](https://github.com/argoproj/argo-workflows/commit/01083d1d1f485b1ae1fb1e697090db0069e25e96) fix(controller): Set finishedAt for workflow with Daemon steps - * [926e43950](https://github.com/argoproj/argo-workflows/commit/926e439503f61766eea61c2eec079571d778a31e) fix: Do not index complete workflow semaphores (#6639) - * [a039a29ab](https://github.com/argoproj/argo-workflows/commit/a039a29ab27e6ce50ecaf345c3d826d90597523d) fix: `argo node set` panic: index out of range and correct docs (#6653) - * [8f8fc2bd9](https://github.com/argoproj/argo-workflows/commit/8f8fc2bd9e2904729bc75e71611673b70d55c2f6) fix(controller): Initialize throttler during starting workflow-controller. Fixes: #6599 (#6608) - * [940e993ff](https://github.com/argoproj/argo-workflows/commit/940e993ffccb737a45774f9fc623d5a548d57978) fix(gcs): backoff bool should be false if error is transient (#6577) - * [2af306a52](https://github.com/argoproj/argo-workflows/commit/2af306a52de80efd3b50bcbd6db144ddede851d1) fix(executor/pns): panic of pidFileHandles concurrent writes (#6569) - * [1019a13a6](https://github.com/argoproj/argo-workflows/commit/1019a13a6139d5867bb657ca8593fdb671bb3598) fix(executor/docker): fix failed to wait for main container to complete: timed out waiting for the condition: container does not exist (#6561) - * [563bb04c4](https://github.com/argoproj/argo-workflows/commit/563bb04c4f8d5d8e5bf83ecdf080926beb9e4bae) fix: Generate TLS Certificates on startup and only keep in memory (#6540) - * [36d2389f2](https://github.com/argoproj/argo-workflows/commit/36d2389f23dc832fe962025ad7b2a6cf6ed9bce3) fix: use execWf when setting PodMetadata (#6512) - -### Contributors - - * Alex Collins - * Antoine Dao - * David Collom - * Ed Marks - * Jesse Suen - * Saravanan Balasubramanian - * Windfarer - * Yuan (Bob) Gong - * smile-luobin - -## v3.1.8 (2021-08-18) - - * [0df0f3a98](https://github.com/argoproj/argo-workflows/commit/0df0f3a98fac4e2aa5bc02213fb0a2ccce9a682a) fix: Fix `x509: certificate signed by unknown authority` error (#6566) - -### Contributors - - * Alex Collins - -## v3.1.7 (2021-08-18) - - * [5463b5d4f](https://github.com/argoproj/argo-workflows/commit/5463b5d4feb626ac80def3c521bd20e6a96708c4) fix: Generate TLS Certificates on startup and only keep in memory (#6540) - -### Contributors - - * David Collom - -## v3.1.6 (2021-08-12) - - * [14e127857](https://github.com/argoproj/argo-workflows/commit/14e1278572b28d8b1854858ce7de355ce60199c9) ci-build.yaml-with-master-change - * [c0ac267ab](https://github.com/argoproj/argo-workflows/commit/c0ac267ab50ba8face0cc14eef0563dddd3f16f6) ci-build.yaml - * [c87ce923b](https://github.com/argoproj/argo-workflows/commit/c87ce923bfd6723f91213696c4ee3af5f210cdb8) Update ci-build.yaml - * [896bcbd7d](https://github.com/argoproj/argo-workflows/commit/896bcbd7d33348054833af20792f923eac728091) Update ci-build.yaml - * [cefddb273](https://github.com/argoproj/argo-workflows/commit/cefddb273d0edcd622a3df368a542cdf33df7f47) Update workflowpod_test.go - * [47720040a](https://github.com/argoproj/argo-workflows/commit/47720040afd142d5726f28757912e0589f4ea901) fixed codegen - * [501c1720a](https://github.com/argoproj/argo-workflows/commit/501c1720a2cf09907bf05a2641ad802e9d084c86) fix: use execWf when setting PodMetadata (#6512) - * [4458394a8](https://github.com/argoproj/argo-workflows/commit/4458394a8c1af8e7328d06cc417850e410f7dd72) fix: Fix the Status update for node with synchronization lock (#6525) - * [907effbfc](https://github.com/argoproj/argo-workflows/commit/907effbfcd4f3bf058fb0e5bbd6faea512401ea9) fix: Upgrade pkg to v0.10.1. Fixes #6521 (#6523) - * [46e2803f7](https://github.com/argoproj/argo-workflows/commit/46e2803f7e0a6d7fd3213d5f02d58fae9ee78880) fix(controller): Fix panic in addParamToGlobalScope (#6485) - * [e1149b61a](https://github.com/argoproj/argo-workflows/commit/e1149b61aca5fde7b63be2e8f5d9b0be148b5eee) fix(controller): JSON-unmarshal marshaled expression template before evaluating (#6285) - * [e6a3b0c76](https://github.com/argoproj/argo-workflows/commit/e6a3b0c764ae54985a7315e7dbf656e766ae33e8) fix(executor/docker): re-revert -- fix random errors with message "No such container:path". Fixes #6352 (#6508) - * [b37e81a98](https://github.com/argoproj/argo-workflows/commit/b37e81a98b7f7c8c11317edfc06950778cd482ad) fix: Remove client private key from client auth REST config (#6506) - * [cc51e71ce](https://github.com/argoproj/argo-workflows/commit/cc51e71ced57448839e98d44fe34780671f03066) fix(controller): JSON-unmarshal marshaled expression template before evaluating (#6285) - -### Contributors - - * Alex Collins - * Ed Marks - * Michael Crenshaw - * Saravanan Balasubramanian - * William Van Hevelingen - * Yuan (Bob) Gong - -## v3.1.5 (2021-08-03) - - * [3dbee0ec3](https://github.com/argoproj/argo-workflows/commit/3dbee0ec368f3ea8c31f49c8b1a4617cc32bcce9) fix(executor): emissary - make argoexec executable from non-root containers. Fixes #6238 (#6247) - -### Contributors - - * Yuan (Bob) Gong - -## v3.1.4 (2021-08-03) - - * [247776d66](https://github.com/argoproj/argo-workflows/commit/247776d66fa6bf988f861ba82f181e386a972626) removed unused import - * [89d662c39](https://github.com/argoproj/argo-workflows/commit/89d662c39e326977384683a255b7472839d957ee) fix: Exit template shouldn't fail with max parallelism reached (#6456) - * [4556ba27b](https://github.com/argoproj/argo-workflows/commit/4556ba27b81c2291353d93fd59a581e3a2a2bb21) fix(controller): fix retry on different hosts (#6429) - * [fc8260b6e](https://github.com/argoproj/argo-workflows/commit/fc8260b6e1f55d939f16bee682f73ba59774cbb9) fix(controller): fix retry on different hosts (#6429) - * [b489d03b4](https://github.com/argoproj/argo-workflows/commit/b489d03b417ecd89654bd6b524c6daf38675ec63) fix(server): Fix nil pointer error when getting artifacts from a step without artifacts (#6465) - * [4d99aac6e](https://github.com/argoproj/argo-workflows/commit/4d99aac6eb3b065eec2be215439dd5a77f337907) fix(pods): set resources from script templates (#6450) - * [3f594ca8d](https://github.com/argoproj/argo-workflows/commit/3f594ca8dd891149f1a07d123fd53097dc3b4438) fix(emissary): throw argo error on file not exist (#6392) - * [f4e20761f](https://github.com/argoproj/argo-workflows/commit/f4e20761f484ce3bf0b3610457193c0324cffa12) Update umask_windows.go - * [cc84fe94c](https://github.com/argoproj/argo-workflows/commit/cc84fe94cfb2df447bf8d1dbe28cc416b866b159) fix(executor): fix GCS artifact retry (#6302) - * [0b0f52788](https://github.com/argoproj/argo-workflows/commit/0b0f527881f5b0a48d8cf77c9e6a29fbeb27b4dc) fix(gcs): throw argo not found error if key not exist (#6393) - -### Contributors - - * Antoine Dao - * Marcin Gucki - * Peixuan Ding - * Saravanan Balasubramanian - * Tianchu Zhao - * Yuan (Bob) Gong - -## v3.1.3 (2021-07-27) - - * [9337abb00](https://github.com/argoproj/argo-workflows/commit/9337abb002d3c505ca45c5fd2e25447acd80a108) fix(controller): Reinstate support for outputs.results for containers. Fixes #6428 (#6434) - * [d2fc4dd62](https://github.com/argoproj/argo-workflows/commit/d2fc4dd62389b3b6726f12e68a86f3179cf957b2) fix(controller): Same workflow nodes are not executing parallel even semaphore locks available (#6418) - * [13c51d4b2](https://github.com/argoproj/argo-workflows/commit/13c51d4b2c1f2ed2e8b416953de2516b92a59da4) fix(controller): Randomly expr expression fail to resolve (#6410) - * [0e5dfe50b](https://github.com/argoproj/argo-workflows/commit/0e5dfe50b2737e1aa564a8684c1ddd08b95755bf) fix(executor): Remove 15s guard for Docker executor. Fixes #6415 (#6427) - * [4347acffc](https://github.com/argoproj/argo-workflows/commit/4347acffc94b50e6e665045f47b07ea0eedd1611) fix(executor): remove unused import preventing compilation - * [1eaa38199](https://github.com/argoproj/argo-workflows/commit/1eaa3819902aef028151e07deccdad2c7cf4fc0d) fix(executor/emissary): fix nonroot sidecars + input/output params & artifacts (#6403) - * [060b727ee](https://github.com/argoproj/argo-workflows/commit/060b727eeedd32102d918caad50557f9e0aa8cca) fix(controller): Global parameter is not getting updated (#6401) - * [adc17ff26](https://github.com/argoproj/argo-workflows/commit/adc17ff267f3b0951c0bedf0db3c9eab20af7f7c) fix(controller): Force main container name to be "main" as per v3.0. Fixes #6405 (#6408) - * [069816a0a](https://github.com/argoproj/argo-workflows/commit/069816a0aaf89590b98257e1e7360c925ee16ad1) fix(controller): Mark workflows wait for semaphore as pending. Fixes #6351 (#6356) - * [791c26b3c](https://github.com/argoproj/argo-workflows/commit/791c26b3cd6f56af90bfd3b69187921753d61d82) fix(controller): allow initial duration to be 0 instead of current_time-0 (#6389) - * [bd757e86c](https://github.com/argoproj/argo-workflows/commit/bd757e86c21ad9b52473ea8f1c6e3e6730694260) fix: Server crash when opening timeline tab for big workflows (#6369) - * [8b49e8c3a](https://github.com/argoproj/argo-workflows/commit/8b49e8c3a58a487eb9767569ad02ce2ac8a967eb) fix(controller): allow workflow.duration to pass validator (#6376) - * [24ff9450a](https://github.com/argoproj/argo-workflows/commit/24ff9450ad436eff34e383ce9dd625f4b29e3737) fix(fields): handle nexted fields when excluding (#6359) - * [a83ec79dd](https://github.com/argoproj/argo-workflows/commit/a83ec79dddec3c030526e58e9e06b3dc0604e21f) feat(controller): sortDAGTasks supports sort by field Depends (#6307) - * [8472227f5](https://github.com/argoproj/argo-workflows/commit/8472227f5a23435253ad6bfaf732318afdde1bf8) fix(crd): temp fix 34s timeout bug for k8s 1.20+ (#6350) - * [0522a68fc](https://github.com/argoproj/argo-workflows/commit/0522a68fc595a4d199e2bf57a0574ef9f12f875f) Revert "feat: added support for GRPC_MESSAGE_SIZE env var (#6258)" - * [49db7cd60](https://github.com/argoproj/argo-workflows/commit/49db7cd6038172c0d6c784882a253386c457695f) feat: added support for GRPC_MESSAGE_SIZE env var (#6258) - -### Contributors - - * Alex Collins - * Alexander Matyushentsev - * Antoine Dao - * BOOK - * Saravanan Balasubramanian - * Tianchu Zhao - * Yuan (Bob) Gong - * dpeer6 - -## v3.1.2 (2021-07-15) - - * [98721a96e](https://github.com/argoproj/argo-workflows/commit/98721a96eef8e4fe9a237b2105ba299a65eaea9a) fixed test - * [6041ffe22](https://github.com/argoproj/argo-workflows/commit/6041ffe228c8f79e6578e097a357dfebf768c78f) fix(controller): Not updating StoredWorkflowSpec when WFT changed during workflow running (#6342) - * [d14760182](https://github.com/argoproj/argo-workflows/commit/d14760182851c280b11d688b70a81f3fe014c52f) fix(cli): v3.1 Argo Auth Token (#6344) - * [ce5679c4b](https://github.com/argoproj/argo-workflows/commit/ce5679c4bd1040fa5d68eea24a4a82ef3844d43c) feat(controller): Store artifact repository in workflow status. Fixes - * [74581157f](https://github.com/argoproj/argo-workflows/commit/74581157f9fd8190027021dd5af409cd3e3e781f) fix(executor): Tolerate docker re-creating containers. Fixes #6244 (#6252) - * [cd208e27f](https://github.com/argoproj/argo-workflows/commit/cd208e27ff0e45f82262b18ebb65081ae5978761) fix(executor): emissary - make /var/run/argo files readable from non-root users. Fixes #6238 (#6304) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Yuan (Bob) Gong - -## v3.1.1 (2021-06-28) - - * [4d12bbfee](https://github.com/argoproj/argo-workflows/commit/4d12bbfee13faea6d2715c809fab40ae33a66074) fix(conttroller): Always set finishedAt dote. Fixes #6135 (#6139) - * [401a66188](https://github.com/argoproj/argo-workflows/commit/401a66188d25bef16078bba370fc26d1fbd56288) fix: Fix certain sibling tasks not connected to parent (#6193) - * [99b42eb1c](https://github.com/argoproj/argo-workflows/commit/99b42eb1c0902c7df6a3e2904dafd93b294c9e96) fix(controller): Wrong validate order when validate DAG task's argument (#6190) - * [18b2371e3](https://github.com/argoproj/argo-workflows/commit/18b2371e36f106062d1a2cc2e81ca37052b8296b) fix(controller): dehydrate workflow before deleting offloaded node status (#6112) - * [a58cbdc39](https://github.com/argoproj/argo-workflows/commit/a58cbdc3966188a1ea5d9207f99e289ee758804f) fix(controller): Allow retry on transient errors when validating workflow spec. Fixes #6163 (#6178) - -### Contributors - - * Alex Collins - * BOOK - * Reijer Copier - * Simon Behar - * Yuan Tang - -## v3.1.0 (2021-06-21) - - * [fad026e36](https://github.com/argoproj/argo-workflows/commit/fad026e367dd08b0217155c433f2f87c310506c5) fix(ui): Fix event-flow scrolling. Fixes #6133 (#6147) - * [422f5f231](https://github.com/argoproj/argo-workflows/commit/422f5f23176d5ef75e58c5c33b744cf2d9ac38ca) fix(executor): Capture emissary main-logs. Fixes #6145 (#6146) - * [e818b15cc](https://github.com/argoproj/argo-workflows/commit/e818b15ccfdd51b231cb0f9e8872cc673f196e61) fix(ui): Fix-up local storage namespaces. Fixes #6109 (#6144) - * [681e1e42a](https://github.com/argoproj/argo-workflows/commit/681e1e42aa1126d38bbc0cfe4bbd7b1664137c16) fix(controller): Performance improvement for Sprig. Fixes #6135 (#6140) - * [99139fea8](https://github.com/argoproj/argo-workflows/commit/99139fea8ff6325d02bb97a5966388aa37e3bd30) fix(executor): Check whether any errors within checkResourceState() are transient. Fixes #6118. (#6134) - -### Contributors - - * Alex Collins - * Yuan Tang - -## v3.1.0-rc14 (2021-06-10) - - * [d385e6107](https://github.com/argoproj/argo-workflows/commit/d385e6107ab8d4ea4826bd6972608f8fbc86fbe5) fix(executor): Fix docker not terminating. Fixes #6064 (#6083) - * [83da6deae](https://github.com/argoproj/argo-workflows/commit/83da6deae5eaaeca16e49edb584a0a46980239bb) feat(manifests): add 'app' label to workflow-controller-metrics service (#6079) - * [1c27b5f90](https://github.com/argoproj/argo-workflows/commit/1c27b5f90dea80b5dc7f088bef0dc908e8c19661) fix(executor): Fix emissary kill. Fixes #6030 (#6084) - -### Contributors - - * Alex Collins - * Daan Seynaeve - -## v3.1.0-rc13 (2021-06-08) - - * [5d4947ccf](https://github.com/argoproj/argo-workflows/commit/5d4947ccf3051a14aa7ca260ea16cdffffc20e6f) chore(executor): Adjust resource JSON object log to debug level (#6100) - * [0e37f6632](https://github.com/argoproj/argo-workflows/commit/0e37f6632576ffd5365c7f48d455bd9a9a0deefc) fix(executor): Improve artifact error messages. Fixes #6070 (#6086) - * [4bb4d528e](https://github.com/argoproj/argo-workflows/commit/4bb4d528ee4decba0ac4d736ff1ba6302163fccf) fix(ui): Tweak workflow log viewer (#6074) - * [f8f63e628](https://github.com/argoproj/argo-workflows/commit/f8f63e628674fcb6755e9ef50bea1d148ba49ac2) fix(controller): Handling panic in leaderelection (#6072) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Yuan Tang - -## v3.1.0-rc12 (2021-06-02) - - * [803855bc9](https://github.com/argoproj/argo-workflows/commit/803855bc9754b301603903ec7cb4cd9a2979a12b) fix(executor): Fix compatibility issue when selfLink is no longer populated for k8s>=1.21. Fixes #6045 (#6014) - * [1f3493aba](https://github.com/argoproj/argo-workflows/commit/1f3493abaf18d27e701b9f14083dae35447d289e) feat(ui): Add text filter to logs. Fixes #6059 (#6061) - * [eaeaec71f](https://github.com/argoproj/argo-workflows/commit/eaeaec71fd1fb2b0f2f217aada7f47036ace71dd) fix(controller): Only clean-up pod when both main and wait containers have terminated. Fixes #5981 (#6033) - -### Contributors - - * Alex Collins - * Yuan Tang - -## v3.1.0-rc11 (2021-06-01) - - * [ee283ee6d](https://github.com/argoproj/argo-workflows/commit/ee283ee6d360650622fc778f38d94994b20796ab) fix(ui): Add editor nav and make taller (#6047) - * [529c30dd5](https://github.com/argoproj/argo-workflows/commit/529c30dd53ba617a4fbea649fa3f901dd8066af6) fix(ui): Changed placing of chat/get help button. Fixes #5817 (#6016) - * [e262b3afd](https://github.com/argoproj/argo-workflows/commit/e262b3afd7c8ab77ef14fb858a5795b73630485c) feat(controller): Add per-namespace parallelism limits. Closes #6037 (#6039) - -### Contributors - - * Alex Collins - * Kasper Aaquist Johansen - -## v3.1.0-rc10 (2021-05-27) - - * [73539fadb](https://github.com/argoproj/argo-workflows/commit/73539fadbe81b644b912ef0ddddebb178c97cc94) feat(controller): Support rate-limitng pod creation. (#4892) - * [e566c106b](https://github.com/argoproj/argo-workflows/commit/e566c106bbe9baf8ab3628a80235467bb867b57e) fix(server): Only hydrate nodes if they are needed. Fixes #6000 (#6004) - * [d218ea717](https://github.com/argoproj/argo-workflows/commit/d218ea71776fa7d072bbeafa614b36eb34940023) fix(ui): typo (#6027) - -### Contributors - - * Alex Collins - * Stephan van Maris - -## v3.1.0-rc9 (2021-05-26) - - * [bad615550](https://github.com/argoproj/argo-workflows/commit/bad61555093f59a647b20df75f83e1cf9687f7b5) fix(ui): Fix link for archived logs (#6019) - * [3cfc96b7c](https://github.com/argoproj/argo-workflows/commit/3cfc96b7c3c90edec77be0841152dad4d9f18f52) revert: "fix(executor): Fix compatibility issue with k8s>=1.21 when s… (#6012) - * [7e27044b7](https://github.com/argoproj/argo-workflows/commit/7e27044b71620dc7c7dd338eac873e0cff244e2d) fix(controller): Increase readiness timeout from 1s to 30s (#6007) - * [79f5fa5f3](https://github.com/argoproj/argo-workflows/commit/79f5fa5f3e348fca5255d9c98b3fb186bc23cb3e) feat: Pass include script output as an environment variable (#5994) - * [d7517cfca](https://github.com/argoproj/argo-workflows/commit/d7517cfcaf141fc06e19720996d7b43ddb3fa7b6) Mention that 'archive' do not support logs of pods (#6005) - * [d7c5cf6c9](https://github.com/argoproj/argo-workflows/commit/d7c5cf6c95056a82ea94e37da925ed566991e548) fix(executor): Fix compatibility issue with k8s>=1.21 when selfLink is no longer populated (#5992) - * [a2c6241ae](https://github.com/argoproj/argo-workflows/commit/a2c6241ae21e749a3c5865153755136ddd878d5c) fix(validate): Fix DAG validation on task names when depends/dependencies is not used. Fixes #5993 (#5998) - * [a99d5b821](https://github.com/argoproj/argo-workflows/commit/a99d5b821bee5edb296f8af1c3badb503025f026) fix(controller): Fix sync manager panic. Fixes #5939 (#5991) - * [80f8473a1](https://github.com/argoproj/argo-workflows/commit/80f8473a13482387b9f54f9288f4a982a210cdea) fix(executor): resource patch for non-json patches regression (#5951) - -### Contributors - - * Alex Collins - * Antony Chazapis - * Christophe Blin - * Peixuan Ding - * William Reed - * Yuan Tang - -## v3.1.0-rc8 (2021-05-24) - - * [f3d95821f](https://github.com/argoproj/argo-workflows/commit/f3d95821faf8b87d416a2d6ee1334b9e45869c84) fix(controller): Listen on :6060 (#5988) - -### Contributors - - * Alex Collins - -## v3.1.0-rc7 (2021-05-24) - - * [d55a8dbb8](https://github.com/argoproj/argo-workflows/commit/d55a8dbb841a55db70b96568fdd9ef402548d567) feat(controller): Add liveness probe (#5875) - * [46dcaea53](https://github.com/argoproj/argo-workflows/commit/46dcaea53d91b522dfd87b442ce949e3a4de7e76) fix(controller): Lock nodes in pod reconciliation. Fixes #5979 (#5982) - * [60b6b5cf6](https://github.com/argoproj/argo-workflows/commit/60b6b5cf64adec380bc195aa87e4f0b12182fe16) fix(controller): Empty global output param crashes (#5931) - * [453086f94](https://github.com/argoproj/argo-workflows/commit/453086f94c9b540205784bd2944541b1b43555bd) fix(ui): ensure that the artifacts property exists before inspecting it (#5977) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * dherman - -## v3.1.0-rc6 (2021-05-21) - - * [67a38e33e](https://github.com/argoproj/argo-workflows/commit/67a38e33ed1a4d33085c9f566bf64b8b15c8199e) feat: add disableSubmodules for git artifacts (#5910) - * [7b54b182c](https://github.com/argoproj/argo-workflows/commit/7b54b182cfec367d876aead36ae03a1a16632527) small fixes of spelling mistakes (#5886) - * [56b71d07d](https://github.com/argoproj/argo-workflows/commit/56b71d07d91a5aae05b087577f1b47c2acf745df) fix(controller): Revert cb9676e88857193b762b417f2c45b38e2e0967f9. Fixes #5852 (#5933) - -### Contributors - - * Alex Collins - * Lars Kerick - * Zach Aller - -## v3.1.0-rc5 (2021-05-17) - - * [e05f7cbe6](https://github.com/argoproj/argo-workflows/commit/e05f7cbe624ffada191344848d3b0b7fb9ba79ae) fix(controller): Suspend and Resume is not working in WorkflowTemplateRef scenario (#5802) - * [8fde4e4f4](https://github.com/argoproj/argo-workflows/commit/8fde4e4f46f59a6af50e5cc432f632f6f5e774d9) fix(installation): Enable capacity to override namespace with Kustomize (#5907) - -### Contributors - - * Daverkex - * Saravanan Balasubramanian - -## v3.1.0-rc4 (2021-05-14) - - * [128861c50](https://github.com/argoproj/argo-workflows/commit/128861c50f2b60daded5abb7d47524e124451371) feat: DAG/TASK Custom Metrics Example (#5894) - * [0acaf3b40](https://github.com/argoproj/argo-workflows/commit/0acaf3b40b7704017842c81c0a9108fe4eee906e) Update configure-artifact-repository.md (#5909) - -### Contributors - - * Everton - * Jerguš Lejko - -## v3.1.0-rc3 (2021-05-13) - - * [e71d33c54](https://github.com/argoproj/argo-workflows/commit/e71d33c54bd3657a4d63ae8bfa3d899b3339d0fb) fix(controller): Fix pod spec jumbling. Fixes #5897 (#5899) - * [9a10bd475](https://github.com/argoproj/argo-workflows/commit/9a10bd475b273a1bc66025b89c8237a2263c840d) fix: workflow-controller: use parentId (#5831) - -### Contributors - - * Alex Collins - * Jan Heylen - -## v3.1.0-rc2 (2021-05-12) - - -### Contributors - - -## v3.1.0-rc1 (2021-05-12) - - * [3fff791e4](https://github.com/argoproj/argo-workflows/commit/3fff791e4ef5b7e1de82ccb36cae327e8eb726f6) build!: Automatically add manifests to `v*` tags (#5880) - * [2687e240c](https://github.com/argoproj/argo-workflows/commit/2687e240c536900a7119a9b988103f5a68234cc5) fix(controller): Fix active pods count in node pending status with pod deleted. (#5836) - * [3428b832d](https://github.com/argoproj/argo-workflows/commit/3428b832d68e1cfb42f4210c3ab5ff4a99620d70) fix(controller): Error template ref exit handlers. Fixes #5835 (#5837) - * [1a5393593](https://github.com/argoproj/argo-workflows/commit/1a5393593c9cc4b61734af63568a21e50b6c4f8c) fix(controller): Remove un-safe Sprig funcs. Fixes #5286 (#5850) - * [c6825acca](https://github.com/argoproj/argo-workflows/commit/c6825acca43ffeb537f8e0d3b62c2addd0d49389) fix(executor): Enable PNS executor to better kill sidecars. Fixes #5779 (#5794) - * [2b3396fad](https://github.com/argoproj/argo-workflows/commit/2b3396fad602013801f5c517567319f60bedb0bb) feat: configurable windows os version (#5816) - * [d66954f5b](https://github.com/argoproj/argo-workflows/commit/d66954f5b9b09e030408483502b03aa29727039a) feat(controller): Add config for potential CPU hogs (#5853) - * [7ec262a56](https://github.com/argoproj/argo-workflows/commit/7ec262a56b7e043aec5913fc9a9be8c6b0a9067d) feat(cli): Support input from device files for lint command (#5851) - * [ab786ecba](https://github.com/argoproj/argo-workflows/commit/ab786ecba6eb3e9d3fa7a717ded42727b8b64df8) fix: Reset started time for each node to current when retrying workflow (#5801) - * [e332be5ec](https://github.com/argoproj/argo-workflows/commit/e332be5ec2048c7a6491407b059339d4b2439a2e) fix(ui): dont show cluster workflows in namespaced mode. Closes #5841 (#5846) - * [c59f59ad0](https://github.com/argoproj/argo-workflows/commit/c59f59ad0e7609cf8b87d6733f73efa9ccf44484) feat: Support Arguments in Exit Handler (#5455) - * [5ff48bbc5](https://github.com/argoproj/argo-workflows/commit/5ff48bbc5c1b1a4589bdad9abacb7b64a37abfe1) feat: Allow to toggle GZip implementations in docker executor (#5698) - * [86545f63e](https://github.com/argoproj/argo-workflows/commit/86545f63e48007684e229c6f35a7dac436d0c1a8) 5739 (#5797) - * [461b0b3cd](https://github.com/argoproj/argo-workflows/commit/461b0b3cda111da1461c217d4a375c9e8a6fba50) fix(executor): Fix artifactory saving files. Fixes #5733 (#5775) - * [507b92cf9](https://github.com/argoproj/argo-workflows/commit/507b92cf93337e18e3f64716081a797e0f60973e) feat(cli): resubmit workflows by label and field selector (#5807) - * [bdd44c723](https://github.com/argoproj/argo-workflows/commit/bdd44c723a324d1c20bcc97f53022b586bfb8348) fix: Add note about hyphenated variables (#5805) - * [b9a79e065](https://github.com/argoproj/argo-workflows/commit/b9a79e065bffb5f442e185767074d1b616ae2aa7) feat(cli): Retry workflows by label selector and field selector (#5795) - * [8f2acee32](https://github.com/argoproj/argo-workflows/commit/8f2acee32e9921241a4e91eee2da4a9e8b5f3f44) fix: Node set updating global output parameter updates global. #5699 (#5700) - * [076ff18a8](https://github.com/argoproj/argo-workflows/commit/076ff18a804bbd3b4aba67024ac73dae82c2f049) feat(controller): Add validation for ContainerSet (#5758) - * [4b3a30f4e](https://github.com/argoproj/argo-workflows/commit/4b3a30f4e7e320538d256adb542715813a5a716d) fix: Reset workflow started time to current when retrying workflow. Fixes #5796 (#5798) - * [4af011318](https://github.com/argoproj/argo-workflows/commit/4af01131889a48989db0c251b8d9711e19ca3325) fix: change log level to warn level (#5790) - * [7e974dcda](https://github.com/argoproj/argo-workflows/commit/7e974dcda79049cbc931169e7134e113bcea5be8) fix(docs): Fix yaml snippet (#5788) - * [4a55e6f0b](https://github.com/argoproj/argo-workflows/commit/4a55e6f0bce53e47066cef75f7aca6c10fd490d6) feat: Support bucket lifecycle for OSS artifact driver (#5731) - * [3cdb22a1e](https://github.com/argoproj/argo-workflows/commit/3cdb22a1e18d02a91391c5282bba857ba3342ba6) feat: Emit WorkflowNodeRunning Event (#5531) - * [66c770993](https://github.com/argoproj/argo-workflows/commit/66c7709937f84cd6c21d92b8e95871b83d808e72) upgrade github.com/gogo/protobuf (#5780) - * [cb55cba07](https://github.com/argoproj/argo-workflows/commit/cb55cba07394cfaf44ae7180d950770c6880d0cb) fix(ui): Fix an UI dropdown flickering issue (#5772) - * [60a64c825](https://github.com/argoproj/argo-workflows/commit/60a64c8254d406ff85e8f936d6c76da8d7a028e8) feat(cli): Stop workflows by label selector and field selector (#5752) - * [05af5edfc](https://github.com/argoproj/argo-workflows/commit/05af5edfc6931e0ea53b0544de579b7ffd56ee86) fix(ui): Fix the UI crashing issue (#5751) - * [407740046](https://github.com/argoproj/argo-workflows/commit/407740046f853e0cac485e410d276ce60a41f649) fix(ui): Remove the ability to change namespaces via the UI in Managed Namespace Mode. Closes #5577 (#5729) - * [2a050348b](https://github.com/argoproj/argo-workflows/commit/2a050348b17274b3bf64ca0e4ca78f2142d6d62f) fix(ui): Fix workflow summary page unscrollable issue (#5743) - * [500d93387](https://github.com/argoproj/argo-workflows/commit/500d93387c1593f3f2315ec633b9d363c7c21e44) fix(ui): Fix greediness in regex for auth token replacement (#5746) - * [284adfe16](https://github.com/argoproj/argo-workflows/commit/284adfe16aeb11536a1c98f1956fdeb76dac4f1c) fix(server): Fix the issue where GetArtifact didn't look for input artifacts (#5705) - * [511bbed2b](https://github.com/argoproj/argo-workflows/commit/511bbed2b35abad5144a99234f48f4dc03b3a97e) fix(ui): Fix workflow list table column width mismatch (#5736) - * [0a1bff19d](https://github.com/argoproj/argo-workflows/commit/0a1bff19d066b0f1b839d8edeada819c0f08da57) chore(url): move edge case paths to /argo-workflows/ (#5730) - * [2b8740943](https://github.com/argoproj/argo-workflows/commit/2b87409431bb778a4264296bea2fd4173d00651d) fix(executor): Remove unnecessary check on resource group (#5723) - * [dba2c044e](https://github.com/argoproj/argo-workflows/commit/dba2c044e6d471f65dec868ff2453b90c088bd3e) fix: Only save memoization cache when node succeeded (#5711) - * [8e9e6d676](https://github.com/argoproj/argo-workflows/commit/8e9e6d6760bc0dff260aef4296eac61e6d0bc72f) fix(controller): Fix cron timezone support. Fixes #5653 (#5712) - * [0a6f2fc3a](https://github.com/argoproj/argo-workflows/commit/0a6f2fc3a8271e1a1d168100f0e12a6414114f5b) fix(ui): Fix `showWorkflows` button. Fixes #5645 (#5693) - * [f96355631](https://github.com/argoproj/argo-workflows/commit/f963556312548edc38000b5c6ba36c8ed1c92d63) fix(ui): Fix YAML/JSON toggle. Fixes #5690 (#5694) - * [b267e3cf8](https://github.com/argoproj/argo-workflows/commit/b267e3cf88d084d3dda10307af673753ac73b3af) fix(cli): Validate cron on update. Fixes #5691 (#5692) - * [9a872de13](https://github.com/argoproj/argo-workflows/commit/9a872de13929af14cb2488b98e211ca857d4ee67) fix(executor): Ignore not existing metadata. Fixes #5656 (#5695) - * [91c08cdd8](https://github.com/argoproj/argo-workflows/commit/91c08cdd83386bfcf48fcb237dd05216bc61b7a0) fix(executor): More logs for PNS sidecar termination. #5627 (#5683) - * [f6be5691e](https://github.com/argoproj/argo-workflows/commit/f6be5691e5a25d3f82c708d0bb5bb2f099ab8966) fix(controller): Correct bug for repository ref without default key. Fixes #5646 (#5660) - * [e3d1d1e82](https://github.com/argoproj/argo-workflows/commit/e3d1d1e822c01e2765bab2d57d9537849cd0f720) feat(controller): Allow to disable leader election (#5638) (#5648) - * [cad916ef5](https://github.com/argoproj/argo-workflows/commit/cad916ef52ae1392369baca7e4aa781b7904165d) docs(tls): 3.0 defaults to tls enabled (#5686) - * [860739147](https://github.com/argoproj/argo-workflows/commit/8607391477e816e6e685fa5719c0d3c55ff1bc00) feat(cli): Add offline linting (#5569) - * [a01852364](https://github.com/argoproj/argo-workflows/commit/a01852364ba6c4208146ef676c5918dc3faa1b18) feat(ui): Support expression evaluation in links (#5666) - * [24ac7252d](https://github.com/argoproj/argo-workflows/commit/24ac7252d27454b8f6d0cca02201fe23a35dd915) fix(executor): Correctly surface error when resource is deleted during status checking (#5675) - * [3fab1e5d3](https://github.com/argoproj/argo-workflows/commit/3fab1e5d3c2bea4e498c6482ad902488a6c2b77b) docs(cron): add dst description (#5679) - * [1d367ddfd](https://github.com/argoproj/argo-workflows/commit/1d367ddfd48d8d17b48cca83da9454cee5c6463f) fix(ui): strip inner quotes from argoToken (#5677) - * [bf5d7bfab](https://github.com/argoproj/argo-workflows/commit/bf5d7bfab2d6dde057f3e79e5d0a2fb490a621ee) fix: Increase Name width to 3 and decrease NameSpace width to 1 (#5678) - * [71dfc7974](https://github.com/argoproj/argo-workflows/commit/71dfc797425976e8b013d2b3e1daf46aa6ce04cf) feat(ui): support any yaml reference in link (#5667) - * [ec3b82d92](https://github.com/argoproj/argo-workflows/commit/ec3b82d92ce0f9aba6cfb524b48a6400585441f8) fix: git clone on non-default branch fails (Fixes #5629) (#5630) - * [d5e492c2a](https://github.com/argoproj/argo-workflows/commit/d5e492c2a2f2b5fd65d11c625f628ed75aa8a8ff) fix(executor):Failure node failed to get archived log (#5671) - * [b7d69053d](https://github.com/argoproj/argo-workflows/commit/b7d69053dba478327b926041094349b7295dc499) fix(artifacts): only retry on transient S3 errors (#5579) - * [defbd600e](https://github.com/argoproj/argo-workflows/commit/defbd600e37258c8cdf30f64d4da9f4563eb7901) fix: Default ARGO_SECURE=true. Fixes #5607 (#5626) - * [46ec3028c](https://github.com/argoproj/argo-workflows/commit/46ec3028ca4299deff4966e647857003a89a3d66) fix: Make task/step name extractor robust (#5672) - * [88917cbd8](https://github.com/argoproj/argo-workflows/commit/88917cbd81b5da45c840645ae156baa7afcb7bb6) fix: Surface error during wait timeout for OSS artifact driver API calls (#5601) - * [b76fac754](https://github.com/argoproj/argo-workflows/commit/b76fac754298d0602a2da9902bafa2764e7f6bae) fix(ui): Fix editor. Fixes #5613 Fixes #5617 (#5620) - * [9d175cf9b](https://github.com/argoproj/argo-workflows/commit/9d175cf9b9e0bd57e11ec4e4cce60a6d354ace05) fix(ui): various ui fixes (#5606) - * [b4ce78bbe](https://github.com/argoproj/argo-workflows/commit/b4ce78bbef054e2f4f659e48459eec08a4addf97) feat: Identifiable user agents in various Argo commands (#5624) - * [22a8e93c8](https://github.com/argoproj/argo-workflows/commit/22a8e93c8b52889e9119e6d15d1a9bcc6ae8134a) feat(executor): Support accessing output parameters by PNS executor running as non-root (#5564) - * [2baae1dc2](https://github.com/argoproj/argo-workflows/commit/2baae1dc2fdf990530e62be760fc2ba4104fc286) add -o short option for argo cli get command (#5533) - * [0edd32b5e](https://github.com/argoproj/argo-workflows/commit/0edd32b5e8ae3cbeaf6cb406d7344ff4801d36ba) fix(controller): Workflow hangs indefinitely during ContainerCreating if the Pod or Node unexpectedly dies (#5585) - * [d0a0289ee](https://github.com/argoproj/argo-workflows/commit/d0a0289eea79527d825a10c35f8a9fcbaee29877) feat(ui): let workflow dag and node info scroll independently (#5603) - * [2651bd619](https://github.com/argoproj/argo-workflows/commit/2651bd6193acc491f4a20b6e68c082227f9e60f6) fix: Improve error message when missing required fields in resource manifest (#5578) - * [4f3bbdcbc](https://github.com/argoproj/argo-workflows/commit/4f3bbdcbc9c57dae6c2ce2b93f0395230501f749) feat: Support security token for OSS artifact driver (#5491) - * [9b6c8b453](https://github.com/argoproj/argo-workflows/commit/9b6c8b45321c958b2055236b18449ba6db802878) fix: parse username from git url when using SSH key auth (#5156) - * [7276bc399](https://github.com/argoproj/argo-workflows/commit/7276bc399eae7e318d1937b7b02f86fbe812f9e3) fix(controller): Consider nested expanded task in reference (#5594) - * [4e450e250](https://github.com/argoproj/argo-workflows/commit/4e450e250168e6b4d51a126b784e90b11a0162bc) fix: Switch InsecureSkipVerify to true (#5575) - * [ed54f158d](https://github.com/argoproj/argo-workflows/commit/ed54f158dd8b0b3cee5ba24d703e7de3552ea52d) fix(controller): clean up before insert into argo_archived_workflows_labels (#5568) - * [2b3655ecb](https://github.com/argoproj/argo-workflows/commit/2b3655ecb117beb14bf6dca62b2610fb3ee33283) fix: Remove invalid label value for last hit timestamp from caches (#5528) - * [2ba0a4369](https://github.com/argoproj/argo-workflows/commit/2ba0a4369af0860975250b5fd3d81c563822a6a1) fix(executor): GODEBUG=x509ignoreCN=0 (#5562) - * [3c3754f98](https://github.com/argoproj/argo-workflows/commit/3c3754f983373189ad6d2252b251152e7cba1cf0) fix: Build static files in engineering builds (#5559) - * [23ccd9cf3](https://github.com/argoproj/argo-workflows/commit/23ccd9cf3730e20cd49d37ec5540fea533713898) fix(cli): exit when calling subcommand node without args (#5556) - * [aa0494859](https://github.com/argoproj/argo-workflows/commit/aa0494859341b02189f61561ab4f20ee91718d34) fix: Reference new argo-workflows url in in-app links (#5553) - * [20f00470e](https://github.com/argoproj/argo-workflows/commit/20f00470e8177a89afd0676cedcfb8dac39b34de) fix(server): Disable CN check (Go 15 does not support). Fixes #5539 (#5550) - * [872897ff9](https://github.com/argoproj/argo-workflows/commit/872897ff964df88995410cf2e7f9249439cf7461) fix: allow mountPaths with traling slash (#5521) - * [4c3b0ac53](https://github.com/argoproj/argo-workflows/commit/4c3b0ac530acaac22abb453df3de09e8c74068fb) fix(controller): Enable metrics server on stand-by controller (#5540) - * [76b6a0eff](https://github.com/argoproj/argo-workflows/commit/76b6a0eff9345ff18f34ba3b2c44847c317293fb) feat(controller): Add last hit timestamp to memoization caches (#5487) - * [a61d84cc0](https://github.com/argoproj/argo-workflows/commit/a61d84cc05b86719d1b2704ea1524afef5bbb9b5) fix: Default to insecure mode when no certs are present (#5511) - * [4a1caca1e](https://github.com/argoproj/argo-workflows/commit/4a1caca1e52b0be87f5a1e05efc240722f2a4a49) fix: add softonic as user (#5522) - * [bbdf651b7](https://github.com/argoproj/argo-workflows/commit/bbdf651b790a0b432d800362210c0f4f072922f6) fix: Spelling Mistake (#5507) - * [b8af3411b](https://github.com/argoproj/argo-workflows/commit/b8af3411b17b5ab4b359852a66ecfc6999fc0da8) fix: avoid short names in deployment manifests (#5475) - * [d964fe448](https://github.com/argoproj/argo-workflows/commit/d964fe4484c6ad4a313deb9994288d402a543018) fix(controller): Use node.Name instead of node.DisplayName for onExit nodes (#5486) - * [80cea6a36](https://github.com/argoproj/argo-workflows/commit/80cea6a3679fa87983643defb6681881228043ae) fix(ui): Correct Argo Events swagger (#5490) - * [865b1fe8b](https://github.com/argoproj/argo-workflows/commit/865b1fe8b501526555e3518410836e277d04184c) fix(executor): Always poll for Docker injected sidecars. Resolves #5448 (#5479) - * [c13755b16](https://github.com/argoproj/argo-workflows/commit/c13755b1692c376468554c20a8fa3f5efd18d896) fix: avoid short names in Dockerfiles (#5474) - * [beb0f26be](https://github.com/argoproj/argo-workflows/commit/beb0f26bed9d33d42d9153fdd4ffd24e7fe62ffd) fix: Add logging to aid troubleshooting (#5501) - * [306594164](https://github.com/argoproj/argo-workflows/commit/306594164ab46d31ee1e7b0d7d773a857b52bdde) fix: Run controller as un-privileged (#5460) - * [2a099f8ab](https://github.com/argoproj/argo-workflows/commit/2a099f8abf97f5be27738e93f76a3cb473622763) fix: certs in non-root (#5476) - * [4eb351cba](https://github.com/argoproj/argo-workflows/commit/4eb351cbaf82bbee5903b91b4ef094190e1e0134) fix(ui): Multiple UI fixes (#5498) - * [dfe6ceb43](https://github.com/argoproj/argo-workflows/commit/dfe6ceb430d2bd7c13987624105450a0994e08fc) fix(controller): Fix workflows with retryStrategy left Running after completion (#5497) - * [ea26a964b](https://github.com/argoproj/argo-workflows/commit/ea26a964b7dffed2fe147db69ccce5c5f542c308) fix(cli): Linting improvements (#5224) - * [513756ebf](https://github.com/argoproj/argo-workflows/commit/513756ebff2d12c1938559a3109d3d13211cd14a) fix(controller): Only set global parameters after workflow validation succeeded to avoid panics (#5477) - * [9a1c046ee](https://github.com/argoproj/argo-workflows/commit/9a1c046ee4e2a2cabc3e358cf8093e71dd8d4090) fix(controller): Enhance output capture (#5450) - * [46aaa700e](https://github.com/argoproj/argo-workflows/commit/46aaa700ebab322e112fa0b54cde96fb2b865ea9) feat(server): Disable Basic auth if server is not running in client mode (#5401) - * [e638981bf](https://github.com/argoproj/argo-workflows/commit/e638981bf0542acc9ee57820849ee569d0dcc91f) fix(controller): Add permissions to create/update configmaps for memoization in quick-start manifests (#5447) - * [b01ca3a1d](https://github.com/argoproj/argo-workflows/commit/b01ca3a1d5f764c8366afb6e31a7de9009880f6b) fix(controller): Fix the issue of {{retries}} in PodSpecPatch not being updated (#5389) - * [72ee1cce9](https://github.com/argoproj/argo-workflows/commit/72ee1cce9e5ba874f3cb84fe1483cb28dacdee45) fix: Set daemoned nodes to Succeeded when boudary ends (#5421) - * [d9f201001](https://github.com/argoproj/argo-workflows/commit/d9f201001bb16b0610e2534515b4aadf38e6f2b2) fix(executor): Ignore non-running Docker kill errors (#5451) - * [7e4e1b78c](https://github.com/argoproj/argo-workflows/commit/7e4e1b78c9be52066573c915aba45d30edff1765) feat: Template defaults (#5410) - * [440a68976](https://github.com/argoproj/argo-workflows/commit/440a689760b56e35beaf3eeb22f276ef71a68743) fix: Fix getStepOrDAGTaskName (#5454) - * [8d2006181](https://github.com/argoproj/argo-workflows/commit/8d20061815b1021558c2f8cca6b3b04903781b5a) fix: Various UI fixes (#5449) - * [2371a6d3f](https://github.com/argoproj/argo-workflows/commit/2371a6d3f49f0c088074a8829e37463d99fc7acc) fix(executor): PNS support artifacts for short-running containers (#5427) - * [07ef0e6b8](https://github.com/argoproj/argo-workflows/commit/07ef0e6b876fddef6e48e889fdfd471af50864a5) fix(test): TestWorkflowTemplateRefWithShutdownAndSuspend flakiness (#5441) - * [c16a471cb](https://github.com/argoproj/argo-workflows/commit/c16a471cb9927248ba84400ec45763f014ec6a3b) fix(cli): Only append parse result when not nil to avoid panic (#5424) - * [8f03970be](https://github.com/argoproj/argo-workflows/commit/8f03970bea3749c0b338dbf533e81ef02c597100) fix(ui): Fix link button. Fixes #5429 (#5430) - * [f4432043c](https://github.com/argoproj/argo-workflows/commit/f4432043c5c1c26612e235bb7069e5c86ec2d050) fix(executor): Surface error when wait container fails to establish pod watch (#5372) - * [d71786571](https://github.com/argoproj/argo-workflows/commit/d717865716ea399284c6193ceff9970e66bc5f45) feat(executor): Move exit code capture to controller. See #5251 (#5328) - * [04f3a957b](https://github.com/argoproj/argo-workflows/commit/04f3a957be7ad9a1f99183c18a900264cc524ed8) fix(test): Fix TestWorkflowTemplateRefWithShutdownAndSuspend flakyness (#5418) - * [ed957dd9c](https://github.com/argoproj/argo-workflows/commit/ed957dd9cf257b1db9a71dcdca49fc38678a4dcb) feat(executor): Switch to use SDK and poll-based resource status checking (#5364) - * [d3eeddb1f](https://github.com/argoproj/argo-workflows/commit/d3eeddb1f5672686d349da7f99517927cad04953) feat(executor) Add injected sidecar support to Emissary (#5383) - * [189b6a8e3](https://github.com/argoproj/argo-workflows/commit/189b6a8e3e0b0d4601d00417b9d205f3c1f77250) fix: Do not allow cron workflow names with more than 52 chars (#5407) - * [8e137582c](https://github.com/argoproj/argo-workflows/commit/8e137582cc41465f07226f8ab0191bebf3c11106) feat(executor): Reduce poll time 3s to 1s for PNS and Emissary executors (#5386) - * [b24aaeaff](https://github.com/argoproj/argo-workflows/commit/b24aaeaffd2199794dc0079a494aac212b6e83a5) feat: Allow transient errors in StopWorkflow() (#5396) - * [1ec7ac0fa](https://github.com/argoproj/argo-workflows/commit/1ec7ac0fa0155f936a407887117c8496bba42241) fix(controller): Fixed wrong error message (#5390) - * [4b7e3513e](https://github.com/argoproj/argo-workflows/commit/4b7e3513e72d88c0f20cbb0bfc659bd16ef2a629) fix(ui): typo (#5391) - * [982e5e9df](https://github.com/argoproj/argo-workflows/commit/982e5e9df483e0ce9aa43080683fabadf54e83f2) fix(test): TestWorkflowTemplateRefWithShutdownAndSuspend flaky (#5381) - * [57c05dfab](https://github.com/argoproj/argo-workflows/commit/57c05dfabb6d5792c29b4d19a7b4733dc4354388) feat(controller): Add failFast flag to DAG and Step templates (#5315) - * [fcb098995](https://github.com/argoproj/argo-workflows/commit/fcb098995e4703028e09e580cb3909986a65a595) fix(executor): Kill injected sidecars. Fixes #5337 (#5345) - * [1f7cf1e3b](https://github.com/argoproj/argo-workflows/commit/1f7cf1e3b31d06d0a4bf32ed0ac1fd0e3ae77262) feat: Add helper functions to expr when parsing metadata. Fixes #5351 (#5374) - * [d828717c5](https://github.com/argoproj/argo-workflows/commit/d828717c51f9ba4275c47d5878b700d7477dcb7b) fix(controller): Fix `podSpecPatch` (#5360) - * [2d331f3a4](https://github.com/argoproj/argo-workflows/commit/2d331f3a47a8bc520873f4a4fc95d42efe995d35) fix: Fix S3 file loading (#5353) - * [9faae18a1](https://github.com/argoproj/argo-workflows/commit/9faae18a1d2d7c890510e01abc18402ac9dccc1b) fix(executor): Make docker executor more robust. (#5363) - * [e0f71f3af](https://github.com/argoproj/argo-workflows/commit/e0f71f3af750064d86c1a5de658db75572f12a01) fix(executor): Fix resource patch when not providing flags. Fixes #5310 (#5311) - * [94e155b08](https://github.com/argoproj/argo-workflows/commit/94e155b0839edf2789175624dac46d38bdd424ee) fix(controller): Correctly log pods/exec call (#5359) - * [80b5ab9b8](https://github.com/argoproj/argo-workflows/commit/80b5ab9b8e35b4dba71396062abe32918cd76ddd) fix(ui): Fix container-set log viewing in UI (#5348) - * [bde9f217e](https://github.com/argoproj/argo-workflows/commit/bde9f217ee19f69230a7ad2d256b86b4b6c28f58) fix: More Makefile fixes (#5347) - * [849a5f9aa](https://github.com/argoproj/argo-workflows/commit/849a5f9aaa75f6ee363708113dae32ce6bc077c9) fix: Ensure release images are 'clean' (#5344) - * [23b8c0319](https://github.com/argoproj/argo-workflows/commit/23b8c031965d5f4bae4bb8f3134a43eec975d6ab) fix: Ensure DEV_BRANCH is correct (#5343) - * [ba949c3a6](https://github.com/argoproj/argo-workflows/commit/ba949c3a64e203197dee4f1d9837c47a993132b6) fix(executor): Fix container set bugs (#5317) - * [9d2e9615e](https://github.com/argoproj/argo-workflows/commit/9d2e9615e4cf7739aabb1df4601265b078d98738) feat: Support structured JSON logging for controller, executor, and server (#5158) - * [7fc1f2f24](https://github.com/argoproj/argo-workflows/commit/7fc1f2f24ebeaba2779140fbc17a4d9745860d62) fix(test): Flaky TestWorkflowShutdownStrategy (#5331) - * [3dce211c5](https://github.com/argoproj/argo-workflows/commit/3dce211c54e6c54cf55819486133f1d2617bd13b) fix: Only retry on transient errors for OSS artifact driver (#5322) - * [8309fd831](https://github.com/argoproj/argo-workflows/commit/8309fd83169e3540123e44c9f2d427ff34cea393) fix: Minor UI fixes (#5325) - * [67f8ca27b](https://github.com/argoproj/argo-workflows/commit/67f8ca27b323aa9fe3eac7e5ece9fc5b2969f4fd) fix: Disallow object names with more than 63 chars (#5324) - * [b048875dc](https://github.com/argoproj/argo-workflows/commit/b048875dc55aba9bb07d7ee6ea2f6290b82798e6) fix(executor): Delegate PNS wait to K8SAPI executor. (#5307) - * [a5d1accff](https://github.com/argoproj/argo-workflows/commit/a5d1accffcd48c1a666f0c733787087f26d58b87) fix(controller): shutdownstrategy on running workflow (#5289) - * [112378fc7](https://github.com/argoproj/argo-workflows/commit/112378fc70818d45ef41a6acc909be1934dc99fb) fix: Backward compatible workflowTemplateRef from 2.11.x to 2.12.x (#5314) - * [103bf2bca](https://github.com/argoproj/argo-workflows/commit/103bf2bcaa72f42286ebece1f726d599cbeda088) feat(executor): Configurable retry backoff settings for workflow executor (#5309) - * [2e857f095](https://github.com/argoproj/argo-workflows/commit/2e857f095621c385b2541b2bff89cac7f9debaf8) fix: Makefile target (#5313) - * [1c6775a04](https://github.com/argoproj/argo-workflows/commit/1c6775a04fdf702a666b57dd6e3ddfcd0e4cb238) feat: Track nodeView tab in URL (#5300) - * [dc5bb12e5](https://github.com/argoproj/argo-workflows/commit/dc5bb12e53c22388ae618b8897d1613cacc9f61d) fix: Use ScopedLocalStorage instead of direct localStorage (#5301) - * [a31fd4456](https://github.com/argoproj/argo-workflows/commit/a31fd44560587e9a24f81d7964a855eabd6c1b31) feat: Improve OSS artifact driver usability when load/save directories (#5293) - * [757e0be18](https://github.com/argoproj/argo-workflows/commit/757e0be18e34c5d1c34bba40aa925e0c5264d727) fix(executor): Enhance PNS executor. Resolves #5251 (#5296) - * [78ec644cd](https://github.com/argoproj/argo-workflows/commit/78ec644cd9a30539397dda3359bcf9be91d37767) feat: Conditional Artifacts and Parameters (#4987) - * [1a8ce1f13](https://github.com/argoproj/argo-workflows/commit/1a8ce1f1334e34b09cb4e154e2993ec4fc610b4b) fix(executor): Fix emissary resource template bug (#5295) - * [8729587ee](https://github.com/argoproj/argo-workflows/commit/8729587eec647e3f75181888fa3a23d7f9c1d102) feat(controller): Container set template. Closes #2551 (#5099) - * [e56da57a3](https://github.com/argoproj/argo-workflows/commit/e56da57a3bc5cc926079f656a397b4140a6833f8) fix: Use bucket.ListObjects() for OSS ListObjects() implementation (#5283) - * [b6961ce6f](https://github.com/argoproj/argo-workflows/commit/b6961ce6f9f6cb3bb6c033142fc9c7f304e752bc) fix: Fixes around archiving workflows (#5278) - * [ab68ea4c3](https://github.com/argoproj/argo-workflows/commit/ab68ea4c345c698f61cd36c074cde1dd796c1a11) fix: Correctly log sub-resource Kubernetes API requests (#5276) - * [66fa8da0f](https://github.com/argoproj/argo-workflows/commit/66fa8da0f6cef88e49b6c8112c0ac4b0004e1187) feat: Support ListObjects() for Alibaba OSS artifact driver (#5261) - * [b062cbf04](https://github.com/argoproj/argo-workflows/commit/b062cbf0498592ed27732049dfb2fe2b5c569f14) fix: Fix swapped artifact repository key and ref in error message (#5272) - * [69c40c09a](https://github.com/argoproj/argo-workflows/commit/69c40c09a491fda1a0bc8603aa397f908cc5d968) fix(executor): Fix concurrency error in PNS executor. Fixes #5250 (#5258) - * [9b538d922](https://github.com/argoproj/argo-workflows/commit/9b538d9221d7dd6e4c4640c9c6d8d861e85a038a) fix(executor): Fix docker "created" issue. Fixes #5252 (#5249) - * [07283cda6](https://github.com/argoproj/argo-workflows/commit/07283cda6f2de21865bbad53f731c0530e5d307a) fix(controller): Take labels change into account in SignificantPodChange() (#5253) - * [c4bcabd7c](https://github.com/argoproj/argo-workflows/commit/c4bcabd7c4ae253f8fefcf9a4f143614d1c38e19) fix(controller): Work-around Golang bug. Fixes #5192 (#5230) - * [e6fa41a1b](https://github.com/argoproj/argo-workflows/commit/e6fa41a1b91be2e56884ca16427aaaae4558fa00) feat(controller): Expression template tags. Resolves #4548 & #1293 (#5115) - * [bd4b46cd1](https://github.com/argoproj/argo-workflows/commit/bd4b46cd13d955826c013ec3e58ce8184765c9ea) feat(controller): Allow to modify time related configurations in leader election (#5234) - * [cb9676e88](https://github.com/argoproj/argo-workflows/commit/cb9676e88857193b762b417f2c45b38e2e0967f9) feat(controller): Reused existing workflow informer. Resolves #5202 (#5204) - * [d7dc48c11](https://github.com/argoproj/argo-workflows/commit/d7dc48c111948611b57254cc4d039adfd71cd205) fix(controller): Leader lease shared name improvments (#5218) - * [2d2fba30c](https://github.com/argoproj/argo-workflows/commit/2d2fba30c4aeaf7d57d3b0f4bef62fb89d139805) fix(server): Enable HTTPS probe for TLS by default. See #5205 (#5228) - * [fb19af1cf](https://github.com/argoproj/argo-workflows/commit/fb19af1cf9bb065ecb1b57533c8d9f68c6528461) fix: Flakey TestDataTransformationArtifactRepositoryRef (#5226) - * [6412bc687](https://github.com/argoproj/argo-workflows/commit/6412bc687e7a030422163eeb85a6cf3fd74820b8) fix: Do not display pagination warning when there is no pagination (#5221) - * [0c226ca49](https://github.com/argoproj/argo-workflows/commit/0c226ca49e6b709cc2e3a63305ce8676be9117f3) feat: Support for data sourcing and transformation with `data` template (#4958) - * [7a91ade85](https://github.com/argoproj/argo-workflows/commit/7a91ade858aea6fe4012b3ae5a416db87821a76a) chore(server): Enable TLS by default. Resolves #5205 (#5212) - * [01d310235](https://github.com/argoproj/argo-workflows/commit/01d310235a9349e6d552c758964cc2250a9e9616) chore(server)!: Required authentication by default. Resolves #5206 (#5211) - * [694690b0e](https://github.com/argoproj/argo-workflows/commit/694690b0e6211d97f8047597fa5045e84e004ae2) fix: Checkbox is not clickable (#5213) - * [f0e8df07b](https://github.com/argoproj/argo-workflows/commit/f0e8df07b855219866f35f86903e557a10ef260a) fix(controller): Leader Lease Shared Name (#5214) - * [47ac32376](https://github.com/argoproj/argo-workflows/commit/47ac32376d4d75c43106ee16106d819d314c0a2d) fix(controller): Support emissary on Windows (#5203) - * [8acdb1baf](https://github.com/argoproj/argo-workflows/commit/8acdb1baf020adf386528bb33b63715aaf20e724) fix(controller): More emissary minor bugs (#5193) - * [48811117c](https://github.com/argoproj/argo-workflows/commit/48811117c83e041c1bef8db657e0b566a1744b0a) feat(cli): Add cost optimization nudges for Argo CLI (#5168) - * [26ce0c090](https://github.com/argoproj/argo-workflows/commit/26ce0c0909eea5aa437343885569aa9f6fc82f12) fix: Ensure whitespaces is allowed between name and bracket (#5176) - * [2abf08eb4](https://github.com/argoproj/argo-workflows/commit/2abf08eb4de46fbffc44e26a16c9f1ff9d5bd4c5) fix: Consder templateRef when filtering by tag (#5190) - * [23415b2c1](https://github.com/argoproj/argo-workflows/commit/23415b2c1a90d1468912c29051fc8287eb30f84b) fix(executor): Fix emissary bugs (#5187) - * [f5dcd1bd4](https://github.com/argoproj/argo-workflows/commit/f5dcd1bd40668b42fdd6aa1ce92e91a4d684608d) fix: Propagate URL changes to react state (#5188) - * [e5a5f0394](https://github.com/argoproj/argo-workflows/commit/e5a5f0394b535784daa21ad213f454e09f408914) fix(controller): Fix timezone support. Fixes #5181 (#5182) - * [199016a6b](https://github.com/argoproj/argo-workflows/commit/199016a6bed5284df3ec5caebbef9f2d018a2d43) feat(server): Enforce TLS >= v1.2 (#5172) - * [ab361667a](https://github.com/argoproj/argo-workflows/commit/ab361667a8b8c5ccf126eb1c34962c86c1b738d4) feat(controller) Emissary executor. (#4925) - -### Contributors - - * AIKAWA - * Alex Collins - * BOOK - * Iven - * Kevin Hwang - * Markus Lippert - * Michael Ruoss - * Michael Weibel - * Niklas Hansson - * Peixuan Ding - * Pruthvi Papasani - * Radolumbo - * Reijer Copier - * Riccardo Piccoli - * Roi Kramer - * Saravanan Balasubramanian - * Saïfane FARFAR - * Shoubhik Bose - * Simon Behar - * Stephan van Maris - * Tianchu Zhao - * Tim Collins - * Vivek Kumar - * Vlad Losev - * Vladimir Ivanov - * Wen-Chih (Ryan) Lo - * Yuan Tang - * Zach Aller - * alexey - * dinever - * kennytrytek - * markterm - * tczhao - * tobisinghania - * uucloud - * wanglong001 - -## v3.0.10 (2021-08-18) - - * [0177e73b9](https://github.com/argoproj/argo-workflows/commit/0177e73b962136200517b7f301cd98cfbed02a31) Update manifests to v3.0.10 - * [587b17539](https://github.com/argoproj/argo-workflows/commit/587b1753968dd5ab4d8bc7e5e60ee6e9ca8e1b7b) fix: Fix `x509: certificate signed by unknown authority` error (#6566) - -### Contributors - - * Alex Collins - -## v3.0.9 (2021-08-18) - - * [d5fd9f14f](https://github.com/argoproj/argo-workflows/commit/d5fd9f14fc6f55c5d6c1f382081b68e86574d74d) Update manifests to v3.0.9 - * [4eb16eaa5](https://github.com/argoproj/argo-workflows/commit/4eb16eaa58ea2de4c4b071c6b3a565dc62e4a07a) fix: Generate TLS Certificates on startup and only keep in memory (#6540) - * [419b7af08](https://github.com/argoproj/argo-workflows/commit/419b7af08582252d6f0722930d026ba728fc19d6) fix: Remove client private key from client auth REST config (#6506) - -### Contributors - - * Alex Collins - * David Collom - -## v3.0.8 (2021-06-21) - - * [6d7887cce](https://github.com/argoproj/argo-workflows/commit/6d7887cce650f999bb6f788a43fcefe3ca398185) Update manifests to v3.0.8 - * [449237971](https://github.com/argoproj/argo-workflows/commit/449237971ba81e8397667be77a01957ec15d576e) fix(ui): Fix-up local storage namespaces. Fixes #6109 (#6144) - * [87852e94a](https://github.com/argoproj/argo-workflows/commit/87852e94aa2530ebcbd3aeaca647ae8ff42774ac) fix(controller): dehydrate workflow before deleting offloaded node status (#6112) - * [d8686ee1a](https://github.com/argoproj/argo-workflows/commit/d8686ee1ade34d7d5ef687bcb638415756b2f364) fix(executor): Fix docker not terminating. Fixes #6064 (#6083) - * [c2abdb8e6](https://github.com/argoproj/argo-workflows/commit/c2abdb8e6f16486a0785dc852d293c19bd721399) fix(controller): Handling panic in leaderelection (#6072) - -### Contributors - - * Alex Collins - * Reijer Copier - * Saravanan Balasubramanian - -## v3.0.7 (2021-05-25) - - * [e79e7ccda](https://github.com/argoproj/argo-workflows/commit/e79e7ccda747fa4487bf889142c744457c26e9f7) Update manifests to v3.0.7 - * [b6e986c85](https://github.com/argoproj/argo-workflows/commit/b6e986c85f36e6a182bf1e58a992d2e26bce1feb) fix(controller): Increase readiness timeout from 1s to 30s (#6007) - -### Contributors - - * Alex Collins - -## v3.0.6 (2021-05-24) - - * [4a7004d04](https://github.com/argoproj/argo-workflows/commit/4a7004d045e2d8f5f90f9e8caaa5e44c013be9d6) Update manifests to v3.0.6 - * [10ecb7e5b](https://github.com/argoproj/argo-workflows/commit/10ecb7e5b1264c283d5b88a214431743c8da3468) fix(controller): Listen on :6060 (#5988) - -### Contributors - - * Alex Collins - -## v3.0.5 (2021-05-24) - - * [98b930cb1](https://github.com/argoproj/argo-workflows/commit/98b930cb1a9f4304f879e33177d1c6e5b45119b7) Update manifests to v3.0.5 - * [f893ea682](https://github.com/argoproj/argo-workflows/commit/f893ea682f1c30619195f32b58ebc4499f318d21) feat(controller): Add liveness probe (#5875) - * [e64607efa](https://github.com/argoproj/argo-workflows/commit/e64607efac779113dd57a9925cd06f9017186f63) fix(controller): Empty global output param crashes (#5931) - * [eeb5acba4](https://github.com/argoproj/argo-workflows/commit/eeb5acba4565a178cde119ab92a36b291d0b3bb8) fix(ui): ensure that the artifacts property exists before inspecting it (#5977) - * [49979c2fa](https://github.com/argoproj/argo-workflows/commit/49979c2fa5c08602b56cb21ef5e31594a1a9ddd4) fix(controller): Revert cb9676e88857193b762b417f2c45b38e2e0967f9. Fixes #5852 (#5933) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * dherman - -## v3.0.4 (2021-05-13) - - * [d7ebc548e](https://github.com/argoproj/argo-workflows/commit/d7ebc548e30cccc6b6bfc755f69145147dbe73f2) Update manifests to v3.0.4 - * [06744da67](https://github.com/argoproj/argo-workflows/commit/06744da6741dd9d8c6bfec3753bb1532f77e8a7b) fix(ui): Fix workflow summary page unscrollable issue (#5743) - * [d3ed51e7a](https://github.com/argoproj/argo-workflows/commit/d3ed51e7a8528fc8051fe64d1a1fda18d64faa85) fix(controller): Fix pod spec jumbling. Fixes #5897 (#5899) - * [d9e583a12](https://github.com/argoproj/argo-workflows/commit/d9e583a12b9ab684c8f44d5258b65b4d9ff24604) fix: Fix active pods count in node pending status with pod deleted. (#5898) - -### Contributors - - * Alex Collins - * Radolumbo - * Saravanan Balasubramanian - * dinever - -## v3.0.3 (2021-05-11) - - * [e450ea7fa](https://github.com/argoproj/argo-workflows/commit/e450ea7facd6811ecc6b4acc8269e1bbb4db7ab5) Update manifests to v3.0.3 - * [80142b120](https://github.com/argoproj/argo-workflows/commit/80142b120dae997ecad52b686fb8944f4fc40239) fix(controller): Error template ref exit handlers. Fixes #5835 (#5837) - * [8a4a3729d](https://github.com/argoproj/argo-workflows/commit/8a4a3729dbe4517bde945709f1dfa3dd5b0333f7) fix(executor): Enable PNS executor to better kill sidecars. Fixes #5779 (#5794) - * [cb8a54793](https://github.com/argoproj/argo-workflows/commit/cb8a547936af509ea07e13673e616c9434dad739) feat(controller): Add config for potential CPU hogs (#5853) - * [702bfb245](https://github.com/argoproj/argo-workflows/commit/702bfb245af90d13e6c0ed0616ab9b0d6cb762ab) 5739 (#5797) - * [a4c246b2b](https://github.com/argoproj/argo-workflows/commit/a4c246b2b5d97f5ab856aafb4c5e00d3b73d6f7e) fix(ui): dont show cluster workflows in namespaced mode. Closes #5841 (#5846) - * [910f552de](https://github.com/argoproj/argo-workflows/commit/910f552defa04396cce9f7e2794f35a2845455e5) fix: certs in non-root (#5476) - * [f6493ac36](https://github.com/argoproj/argo-workflows/commit/f6493ac36223f2771a8da4599bfceafc8465ee60) fix(executor): Fix artifactory saving files. Fixes #5733 (#5775) - * [6c16cec61](https://github.com/argoproj/argo-workflows/commit/6c16cec619cc30187de7385bc7820055e1c5f511) fix(controller): Enable metrics server on stand-by controller (#5540) - * [b6d703475](https://github.com/argoproj/argo-workflows/commit/b6d7034753fa21ba20637dddd806d17905f1bc56) feat(controller): Allow to disable leader election (#5638) (#5648) - * [0ae8061c0](https://github.com/argoproj/argo-workflows/commit/0ae8061c08809c7d96adcd614812a9000692a11e) fix: Node set updating global output parameter updates global. #5699 (#5700) - * [0d3ad801c](https://github.com/argoproj/argo-workflows/commit/0d3ad801c105e442f61ba3f81fd61d2c6689897d) fix: Reset workflow started time to current when retrying workflow. Fixes #5796 (#5798) - * [e67cb424d](https://github.com/argoproj/argo-workflows/commit/e67cb424dae7cdfc623c67573b959d1c59e2444f) fix: change log level to warn level (#5790) - * [cfd0fad05](https://github.com/argoproj/argo-workflows/commit/cfd0fad05a16d1281056a27e750efb2178b2d068) fix(ui): Remove the ability to change namespaces via the UI in Managed Namespace Mode. Closes #5577 - * [d2f53eae3](https://github.com/argoproj/argo-workflows/commit/d2f53eae3bab4b9fc1e5110d044fe4681291a19a) fix(ui): Fix greediness in regex for auth token replacement (#5746) - -### Contributors - - * Alex Collins - * Michael Ruoss - * Radolumbo - * Saravanan Balasubramanian - * Shoubhik Bose - * Wen-Chih (Ryan) Lo - * Yuan Tang - * alexey - * markterm - * tobisinghania - -## v3.0.2 (2021-04-20) - - * [38fff9c0e](https://github.com/argoproj/argo-workflows/commit/38fff9c0e0f04663b0ee1e44ae0a3183bed6561d) Update manifests to v3.0.2 - * [a43caa577](https://github.com/argoproj/argo-workflows/commit/a43caa5770303abb6d489b4105c2a5b8e7524f4d) fix binary build - * [ca8489988](https://github.com/argoproj/argo-workflows/commit/ca84899881844893de4e8fba729b3d44605804d0) fix: Build argosay binary if it doesn't exist - * [9492e12b0](https://github.com/argoproj/argo-workflows/commit/9492e12b05897e7dacf479b31606ecc9a13a5212) fix(executor): More logs for PNS sidecar termination. #5627 (#5683) - * [c8c7ce3bb](https://github.com/argoproj/argo-workflows/commit/c8c7ce3bb2ff5fdb735bd169926f2efdc2b26ba1) fix: Only save memoization cache when node succeeded (#5711) - * [1ba1d61f1](https://github.com/argoproj/argo-workflows/commit/1ba1d61f123ec2e53f160b4666ee3e6637e0bfe9) fix(controller): Fix cron timezone support. Fixes #5653 (#5712) - * [408d31a5f](https://github.com/argoproj/argo-workflows/commit/408d31a5fa289505beb2db857fc65e0fbb704b91) fix(ui): Fix `showWorkflows` button. Fixes #5645 (#5693) - * [b7b4b3f71](https://github.com/argoproj/argo-workflows/commit/b7b4b3f71383ee339003e3d51749e41307903448) fix(ui): Fix YAML/JSON toggle. Fixes #5690 (#5694) - * [279b78b43](https://github.com/argoproj/argo-workflows/commit/279b78b43da692d98bd86dc532f4bc7ad0a308e2) fix(cli): Validate cron on update. Fixes #5691 (#5692) - * [f7200402f](https://github.com/argoproj/argo-workflows/commit/f7200402fa5cdd4ad88bfcfe04efd763192877de) fix(executor): Ignore not existing metadata. Fixes #5656 (#5695) - * [193f87512](https://github.com/argoproj/argo-workflows/commit/193f8751296db9ae5f1f937cb30757cdf6639152) fix(controller): Correct bug for repository ref without default key. Fixes #5646 (#5660) - * [e20813308](https://github.com/argoproj/argo-workflows/commit/e20813308adec6ea05ee8d01b51b489207fe3b96) fix(ui): strip inner quotes from argoToken (#5677) - * [493e5d656](https://github.com/argoproj/argo-workflows/commit/493e5d656fd27f48c14f1a232770532d629edbd9) fix: git clone on non-default branch fails (Fixes #5629) (#5630) - * [f8ab29b4b](https://github.com/argoproj/argo-workflows/commit/f8ab29b4bd8af591154b01da6dc269f8159c282f) fix: Default ARGO_SECURE=true. Fixes #5607 (#5626) - * [49a4926d1](https://github.com/argoproj/argo-workflows/commit/49a4926d15d7fc76b7a79b99beded78cbb1d20ab) fix: Make task/step name extractor robust (#5672) - * [0cea6125e](https://github.com/argoproj/argo-workflows/commit/0cea6125ec6b03e609741dac861b6aabf4844849) fix: Surface error during wait timeout for OSS artifact driver API calls (#5601) - * [026c12796](https://github.com/argoproj/argo-workflows/commit/026c12796b5ea1abfde9c8f59c2cc0836b8044fe) fix(ui): Fix editor. Fixes #5613 Fixes #5617 (#5620) - * [dafa98329](https://github.com/argoproj/argo-workflows/commit/dafa9832920fc5d6b711d88f182d277b76a5c930) fix(ui): various ui fixes (#5606) - * [c17e72e8b](https://github.com/argoproj/argo-workflows/commit/c17e72e8b00126abc972a6fd16b5cadbbbe87523) fix(controller): Workflow hangs indefinitely during ContainerCreating if the Pod or Node unexpectedly dies (#5585) - * [3472b4f5f](https://github.com/argoproj/argo-workflows/commit/3472b4f5ffd345bed318433318a3c721ea0fd62b) feat(ui): let workflow dag and node info scroll independently (#5603) - * [f6c47e4b7](https://github.com/argoproj/argo-workflows/commit/f6c47e4b7a2d33ba5d994d4756270b678ea018fb) fix: parse username from git url when using SSH key auth (#5156) - * [5bc28dee2](https://github.com/argoproj/argo-workflows/commit/5bc28dee20d0439fb50fdd585af268501f649390) fix(controller): Consider nested expanded task in reference (#5594) - -### Contributors - - * Alex Collins - * Iven - * Michael Ruoss - * Saravanan Balasubramanian - * Simon Behar - * Vladimir Ivanov - * Yuan Tang - * kennytrytek - * tczhao - -## v3.0.1 (2021-04-01) - - * [a8c7d54c4](https://github.com/argoproj/argo-workflows/commit/a8c7d54c47b8dc08fd94d8347802d8d0604b09c3) Update manifests to v3.0.1 - * [65250dd68](https://github.com/argoproj/argo-workflows/commit/65250dd68c6d9f3b2262197dd6a9d1402057da24) fix: Switch InsecureSkipVerify to true (#5575) - * [0de125ac3](https://github.com/argoproj/argo-workflows/commit/0de125ac3d3d36f7b9f8a18a86b62706c9a442d2) fix(controller): clean up before insert into argo_archived_workflows_labels (#5568) - * [f05789459](https://github.com/argoproj/argo-workflows/commit/f057894594b7f55fb19feaf7bfc386e6c7912f05) fix(executor): GODEBUG=x509ignoreCN=0 (#5562) - * [bda3af2e5](https://github.com/argoproj/argo-workflows/commit/bda3af2e5a7b1dda403c14987eba4e7e867ea8f5) fix: Reference new argo-workflows url in in-app links (#5553) - -### Contributors - - * Alex Collins - * BOOK - * Simon Behar - * Tim Collins - -## v3.0.0 (2021-03-29) - - * [46628c88c](https://github.com/argoproj/argo-workflows/commit/46628c88cf7de2f1e0bcd5939a91e4ce1592e236) Update manifests to v3.0.0 - * [3089d8a2a](https://github.com/argoproj/argo-workflows/commit/3089d8a2ada5844850e806c89d0574c0635ea43a) fix: Add 'ToBeFailed' - * [5771c60e6](https://github.com/argoproj/argo-workflows/commit/5771c60e67da3082eb856a4c1a1c5bdf586b4c97) fix: Default to insecure mode when no certs are present (#5511) - * [c77f1eceb](https://github.com/argoproj/argo-workflows/commit/c77f1eceba89b5eb27c843d712d9d0022b05cd63) fix(controller): Use node.Name instead of node.DisplayName for onExit nodes (#5486) - * [0e91e5f13](https://github.com/argoproj/argo-workflows/commit/0e91e5f13d1886f0c99062351681017d20067ec9) fix(ui): Correct Argo Events swagger (#5490) - * [aa07d93a2](https://github.com/argoproj/argo-workflows/commit/aa07d93a2e9ddd139705829c85d19662ac07b43a) fix(executor): Always poll for Docker injected sidecars. Resolves #5448 (#5479) - -### Contributors - - * Alex Collins - * Simon Behar - -## v3.0.0-rc9 (2021-03-23) - - * [02b87aa7d](https://github.com/argoproj/argo-workflows/commit/02b87aa7dea873404dc88a91507d8f662465c55f) Update manifests to v3.0.0-rc9 - * [0f5a9ad1e](https://github.com/argoproj/argo-workflows/commit/0f5a9ad1e9d630d2d2b5c71b8a66e30041f24fc3) fix(ui): Multiple UI fixes (#5498) - * [ac5f17144](https://github.com/argoproj/argo-workflows/commit/ac5f171440fd0cbec6416319b974af74abf6d41d) fix(controller): Fix workflows with retryStrategy left Running after completion (#5497) - * [3e81ed4c8](https://github.com/argoproj/argo-workflows/commit/3e81ed4c851cdb609d483965f7f0d92678f27be6) fix(controller): Only set global parameters after workflow validation succeeded to avoid panics (#5477) - * [6d70f9cc7](https://github.com/argoproj/argo-workflows/commit/6d70f9cc7801d76c7fa8e80bb04c201be7ed501e) fix: Set daemoned nodes to Succeeded when boudary ends (#5421) - * [de31db412](https://github.com/argoproj/argo-workflows/commit/de31db412713991eb3a97990718ff5aa848f7d02) fix(executor): Ignore non-running Docker kill errors (#5451) - * [f6ada612a](https://github.com/argoproj/argo-workflows/commit/f6ada612aed817ad6f21d02421475358d0efc791) fix: Fix getStepOrDAGTaskName (#5454) - * [586a04c15](https://github.com/argoproj/argo-workflows/commit/586a04c15806422f5abc95980fc61ff1e72d38c0) fix: Various UI fixes (#5449) - * [78939009e](https://github.com/argoproj/argo-workflows/commit/78939009ecc63231dc0ae344db477f1441a9dbd2) fix(executor): PNS support artifacts for short-running containers (#5427) - * [8f0235a01](https://github.com/argoproj/argo-workflows/commit/8f0235a014588f06562fab7cb86501a64067da01) fix(test): TestWorkflowTemplateRefWithShutdownAndSuspend flakiness (#5441) - * [6f1027a1d](https://github.com/argoproj/argo-workflows/commit/6f1027a1d139a7650c5051dfe499012c28bf37b7) fix(cli): Only append parse result when not nil to avoid panic (#5424) - * [5b871adbe](https://github.com/argoproj/argo-workflows/commit/5b871adbe4a7de3183d7a88cb9fcab2189a76f22) fix(ui): Fix link button. Fixes #5429 (#5430) - * [41eaa357d](https://github.com/argoproj/argo-workflows/commit/41eaa357d7ff3c2985eb38725862d037cb2009d3) fix(executor): Surface error when wait container fails to establish pod watch (#5372) - * [f55d41ac8](https://github.com/argoproj/argo-workflows/commit/f55d41ac8495d1fb531c07106faf0c7cf39668a9) fix(test): Fix TestWorkflowTemplateRefWithShutdownAndSuspend flakyness (#5418) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - -## v3.0.0-rc8 (2021-03-17) - - * [ff5040016](https://github.com/argoproj/argo-workflows/commit/ff504001640d6e47345ff00b7f3ef14ccec314e9) Update manifests to v3.0.0-rc8 - * [50fe7970c](https://github.com/argoproj/argo-workflows/commit/50fe7970c19dc686e752a7b4b8b5db50e16f24c8) fix(server): Enable HTTPS probe for TLS by default. See #5205 (#5228) - -### Contributors - - * Alex Collins - * Simon Behar - -## v3.0.0-rc7 (2021-03-16) - - * [8049ed820](https://github.com/argoproj/argo-workflows/commit/8049ed820fc45a21acf7c39a35566b1ae53a963b) Update manifests to v3.0.0-rc7 - * [c2c441027](https://github.com/argoproj/argo-workflows/commit/c2c4410276c1ef47f1ec4f76a4d1909ea484f3a8) fix(executor): Kill injected sidecars. Fixes #5337 (#5345) - * [c9d7bfc65](https://github.com/argoproj/argo-workflows/commit/c9d7bfc650bbcc12dc52457870f5663d3bcd5b73) chore(server): Enable TLS by default. Resolves #5205 (#5212) - * [701623f75](https://github.com/argoproj/argo-workflows/commit/701623f756bea95fcfcbcae345ea77979925e738) fix(executor): Fix resource patch when not providing flags. Fixes #5310 (#5311) - * [ae34e4d74](https://github.com/argoproj/argo-workflows/commit/ae34e4d74dabe00423d848bc950abdad98263897) fix: Do not allow cron workflow names with more than 52 chars (#5407) - * [4468c26fa](https://github.com/argoproj/argo-workflows/commit/4468c26fa2b0dc6fea2a228265418b12f722352f) fix(test): TestWorkflowTemplateRefWithShutdownAndSuspend flaky (#5381) - * [1ce011e45](https://github.com/argoproj/argo-workflows/commit/1ce011e452c60c643e16e4e3e36033baf90de0f5) fix(controller): Fix `podSpecPatch` (#5360) - * [a4dacde81](https://github.com/argoproj/argo-workflows/commit/a4dacde815116351eddb31c90de2ea5697d2c941) fix: Fix S3 file loading (#5353) - * [452b37081](https://github.com/argoproj/argo-workflows/commit/452b37081fa9687bc37c8fa4f5fb181f469c79ad) fix(executor): Make docker executor more robust. (#5363) - * [83fc1c38b](https://github.com/argoproj/argo-workflows/commit/83fc1c38b215948934b3eb69de56a1f4bee420a3) fix(test): Flaky TestWorkflowShutdownStrategy (#5331) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - -## v3.0.0-rc6 (2021-03-09) - - * [ab611694f](https://github.com/argoproj/argo-workflows/commit/ab611694fd91236ccbfd978834cc3bc1d364e0ac) Update manifests to v3.0.0-rc6 - * [309fd1114](https://github.com/argoproj/argo-workflows/commit/309fd1114755401c082a0d8c80a06f6509d25251) fix: More Makefile fixes (#5347) - * [f77340500](https://github.com/argoproj/argo-workflows/commit/f7734050074bb0ddfcb2b2d914ca4014fe84c512) fix: Ensure release images are 'clean' (#5344) - * [ce915f572](https://github.com/argoproj/argo-workflows/commit/ce915f572ef52b50acc0fb758e1e9ca86e2c7308) fix: Ensure DEV_BRANCH is correct (#5343) - -### Contributors - - * Simon Behar - -## v3.0.0-rc5 (2021-03-08) - - * [3b422776f](https://github.com/argoproj/argo-workflows/commit/3b422776fde866792d16dff25bbe7430d2e08fab) Update manifests to v3.0.0-rc5 - * [145847d77](https://github.com/argoproj/argo-workflows/commit/145847d775cd040433a6cfebed5eecbe5b378443) cherry-picked fix(controller): shutdownstrategy on running workflow (#5289) - * [29723f49e](https://github.com/argoproj/argo-workflows/commit/29723f49e221bd0b4897858e6a2e403fb89a1e2c) codegen - * [ec1304654](https://github.com/argoproj/argo-workflows/commit/ec1304654fd199a07dbd08c8690a0f12638b699c) fix: Makefile target (#5313) - * [8c69f4faa](https://github.com/argoproj/argo-workflows/commit/8c69f4faaa456bc55b234b1e92037e01e0359a1d) add json/fix.go - * [4233d0b78](https://github.com/argoproj/argo-workflows/commit/4233d0b7855b8b62c5a64f488f0803735dff1acf) fix: Minor UI fixes (#5325) - * [87b62c085](https://github.com/argoproj/argo-workflows/commit/87b62c0852b179c865066a3325870ebbdf29c99b) fix: Disallow object names with more than 63 chars (#5324) - * [e16bd95b4](https://github.com/argoproj/argo-workflows/commit/e16bd95b438f53c4fb3146cba4595370f579b618) fix(executor): Delegate PNS wait to K8SAPI executor. (#5307) - * [62956be0e](https://github.com/argoproj/argo-workflows/commit/62956be0e1eb9c7c5ec8a33cdda956b9acb37025) fix: Backward compatible workflowTemplateRef from 2.11.x to 2.12.x (#5314) - * [95dd7f4b1](https://github.com/argoproj/argo-workflows/commit/95dd7f4b140e4fdd5c939eaecd00341be4adabdd) feat: Track nodeView tab in URL (#5300) - * [a3c12df51](https://github.com/argoproj/argo-workflows/commit/a3c12df5154dbc8236bf3833157d7d5165ead440) fix: Use ScopedLocalStorage instead of direct localStorage (#5301) - * [f368c32f2](https://github.com/argoproj/argo-workflows/commit/f368c32f299f3361b07c989e6615f592654903d6) fix(executor): Enhance PNS executor. Resolves #5251 (#5296) - * [4b2fd9f7d](https://github.com/argoproj/argo-workflows/commit/4b2fd9f7d3a251840ec283fa320da1b6a43f0aba) fix: Fixes around archiving workflows (#5278) - * [afe2cdb6e](https://github.com/argoproj/argo-workflows/commit/afe2cdb6e6a611707f20736500c359408d6cadef) fix: Correctly log sub-resource Kubernetes API requests (#5276) - * [27956b71c](https://github.com/argoproj/argo-workflows/commit/27956b71c39a7c6042c9df662a438ea8205e76a4) fix(executor): Fix concurrency error in PNS executor. Fixes #5250 (#5258) - * [0a8b8f719](https://github.com/argoproj/argo-workflows/commit/0a8b8f71948d4992cc3e3ebb3fa11e5d37838a59) fix(executor): Fix docker "created" issue. Fixes #5252 (#5249) - * [71d1130d2](https://github.com/argoproj/argo-workflows/commit/71d1130d2b24e1054d8e41b3dfa74762d35ffdf9) fix(controller): Take labels change into account in SignificantPodChange() (#5253) - * [39adcd5f3](https://github.com/argoproj/argo-workflows/commit/39adcd5f3bc36a7a38b4fd15b0eb6c359212da45) fix(controller): Work-around Golang bug. Fixes #5192 (#5230) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - -## v3.0.0-rc4 (2021-03-02) - - * [ae5587e97](https://github.com/argoproj/argo-workflows/commit/ae5587e97dad0e4806f7a230672b998fe140a767) Update manifests to v3.0.0-rc4 - * [a7ecfc085](https://github.com/argoproj/argo-workflows/commit/a7ecfc085cebd67366aeda62952015789d83198b) feat(controller): Allow to modify time related configurations in leader election (#5234) - * [9b9043a64](https://github.com/argoproj/argo-workflows/commit/9b9043a6483637c01bed703fdf897abd2e4757ab) feat(controller): Reused existing workflow informer. Resolves #5202 (#5204) - * [4e9f6350f](https://github.com/argoproj/argo-workflows/commit/4e9f6350f892266ebf3ac9c65288fd43c0f958d3) fix(controller): Leader lease shared name improvments (#5218) - * [942113933](https://github.com/argoproj/argo-workflows/commit/9421139334d87cd4391e0ee30903e9e1d7f915ba) fix: Do not display pagination warning when there is no pagination (#5221) - * [0891dc2f6](https://github.com/argoproj/argo-workflows/commit/0891dc2f654350c8748a03bd10cca26d3c545ca5) fix: Checkbox is not clickable (#5213) - * [9a1971efd](https://github.com/argoproj/argo-workflows/commit/9a1971efd85c9e4038d6ddf3a364fa12752d315c) fix(controller): Leader Lease Shared Name (#5214) - * [339bf4e89](https://github.com/argoproj/argo-workflows/commit/339bf4e8915933bc42353525e05019fa343b75c2) fix: Ensure whitespaces is allowed between name and bracket (#5176) - * [df032f629](https://github.com/argoproj/argo-workflows/commit/df032f629d17f20ae60840bde393975cf16027d7) fix: Consder templateRef when filtering by tag (#5190) - * [d9d831cad](https://github.com/argoproj/argo-workflows/commit/d9d831cadec897a6f4506aff007e7c6d5de85407) fix: Propagate URL changes to react state (#5188) - * [db6577584](https://github.com/argoproj/argo-workflows/commit/db6577584621ebe0f369f69b4910d180f9964907) fix(controller): Fix timezone support. Fixes #5181 (#5182) - -### Contributors - - * Alex Collins - * Simon Behar - * Yuan Tang - * Zach Aller - -## v3.0.0-rc3 (2021-02-23) - - * [c0c364c22](https://github.com/argoproj/argo-workflows/commit/c0c364c229e3b72306bd0b73161df090d24e0c31) Update manifests to v3.0.0-rc3 - * [a9c420060](https://github.com/argoproj/argo-workflows/commit/a9c42006079398228d6fb666ee9fe5f3e9149499) fix: Specify where in YAML validation error occurred (#5174) - * [4b78a7ee4](https://github.com/argoproj/argo-workflows/commit/4b78a7ee4c2db9e56bf1ff0387c2de3cbe38ebf1) fix: Fix node filters in UI (#5162) - * [d9fb0c30f](https://github.com/argoproj/argo-workflows/commit/d9fb0c30f9aedd4909b7e9c38fc69fb679ddd2f6) feat(controller): Support pod GC strategy based on label selector on pods (#5090) - * [91528cc85](https://github.com/argoproj/argo-workflows/commit/91528cc8526ff1b519ad19d8a3cb92009d4aca90) fix(executor): Do not make unneeded `get pod` when no sidecars (#5161) - * [bec80c868](https://github.com/argoproj/argo-workflows/commit/bec80c86857357a9ba00cc904a90531e477597c1) fix: Better message formating for nodes (#5160) - * [d33b5cc06](https://github.com/argoproj/argo-workflows/commit/d33b5cc0673fe4f66fb63a3ca85d34dfc03c91dc) fix: send periodic keepalive packets on eventstream connections (#5101) - * [0f9b22b6e](https://github.com/argoproj/argo-workflows/commit/0f9b22b6eb20431f4db73c96139808fc4468fc43) fix: Append the error message prior to offloading node status (#5043) - * [4611a1673](https://github.com/argoproj/argo-workflows/commit/4611a167341e922bb1978ed65e5941031769c52d) feat: Support automatically create OSS bucket if not exists (#5133) - * [687479fa4](https://github.com/argoproj/argo-workflows/commit/687479fa4dcf160e293efd3e6199f5e37b523696) feat(controller): Use different container runtime executors for each workflow. Close #4254 (#4998) - * [590df1dca](https://github.com/argoproj/argo-workflows/commit/590df1dcacf557880133e4e8dd5087830d97f815) feat: Add `argo submit --verify` hidden flag. Closes #5136 (#5141) - * [377c5f84c](https://github.com/argoproj/argo-workflows/commit/377c5f84c1c69a2aa7d450fc17a79984dba5ee81) feat: added lint from stdin (#5095) - * [633da2584](https://github.com/argoproj/argo-workflows/commit/633da25843d68ea377ddf35010d9849203d04fb3) feat(server): Write an audit log entry for SSO users (#5145) - * [2ab02d95e](https://github.com/argoproj/argo-workflows/commit/2ab02d95e65ede297040d7e683c7761428d8af72) fix: Revert the unwanted change in example (#5139) - * [1c7921299](https://github.com/argoproj/argo-workflows/commit/1c79212996312c4b2328b807c74da690862c8e38) fix: Multiple UI fixes (#5140) - * [46538d958](https://github.com/argoproj/argo-workflows/commit/46538d958fae0e689fe24de7261956f8d3bc7bec) feat(ui): Surface result and exit-code outputs (#5137) - * [5e64ec402](https://github.com/argoproj/argo-workflows/commit/5e64ec402805b8de114e9b5cd7fb197eecaaa88e) feat: Build dev-* branches as engineering builds (#5129) - * [4aa9847e2](https://github.com/argoproj/argo-workflows/commit/4aa9847e25efe424864875ac1b4a7367c916091c) fix(ui): add a tooltip for commonly truncated fields in the events pane (#5062) - * [b1535e533](https://github.com/argoproj/argo-workflows/commit/b1535e533ca513b17589f53d503a1121e0ffc261) feat: Support pgzip as an alternative (de)compression implementation (#5108) - -### Contributors - - * Alex Collins - * Roi Kramer - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - * dherman - -## v3.0.0-rc2 (2021-02-16) - - * [ea3439c91](https://github.com/argoproj/argo-workflows/commit/ea3439c91c9fd0c2a57db0d8a5ccf2b9fb2454a3) Update manifests to v3.0.0-rc2 - * [b0685bdd0](https://github.com/argoproj/argo-workflows/commit/b0685bdd08616a0bb909d12f2821fd6e576468eb) fix(executor): Fix S3 policy based auth. Fixes #5110 (#5111) - * [fcf4e9929](https://github.com/argoproj/argo-workflows/commit/fcf4e9929a411a7c6083e67c1c37e9c798e4c7d9) fix: Invalid OpenAPI Spec (Issue 4817) (#4831) - * [19b22f25a](https://github.com/argoproj/argo-workflows/commit/19b22f25a4bfd900752947f695f7a3a1567149ef) feat: Add checker to ensure that env variable doc is up to date (#5091) - * [210080a0c](https://github.com/argoproj/argo-workflows/commit/210080a0c0cb5fc40ec82859cc496a948e30687a) feat(controller): Logs Kubernetes API requests (#5084) - * [2ff4db115](https://github.com/argoproj/argo-workflows/commit/2ff4db115daa4e801da10938ecdb9e27d5810b35) feat(executor): Minimize the number of Kubernetes API requests made by executors (#4954) - * [68979f6e3](https://github.com/argoproj/argo-workflows/commit/68979f6e3dab8225765e166d346502e7e66b0c77) fix: Do not create pods under shutdown strategy (#5055) - * [75d09b0f2](https://github.com/argoproj/argo-workflows/commit/75d09b0f2b48dd87d6562436e220c58dca9e06fa) fix: Synchronization lock handling in Step/DAG Template level (#5081) - * [3b7e373ee](https://github.com/argoproj/argo-workflows/commit/3b7e373eeeb486efa2bef8f722394ef279ba1606) feat(ui): Display pretty cron schedule (#5088) - * [1a0889cf3](https://github.com/argoproj/argo-workflows/commit/1a0889cf3bd2fb3482dd740a929e828744d363b2) fix: Revert "fix(controller): keep special characters in json string when … … 19da392 …use withItems (#4814)" (#5076) - * [893e9c9fe](https://github.com/argoproj/argo-workflows/commit/893e9c9fe1bfc6cb2b3a97debb531614b2b2432a) fix: Prefer to break labels by '-' in UI (#5083) - * [77b23098c](https://github.com/argoproj/argo-workflows/commit/77b23098cf2d361647dd978cbaeaa3628c169a16) fix(controller): Fix creator dashes (#5082) - * [f461b040a](https://github.com/argoproj/argo-workflows/commit/f461b040a537342b996e43989f94d6ac7a3e5205) feat(controller): Add podMetadata field to workflow spec. Resolves #4985 (#5031) - * [3b63e7d85](https://github.com/argoproj/argo-workflows/commit/3b63e7d85257126b7a2098aa72d90fdc47d212b0) feat(controller): Add retry policy to support retry only on transient errors (#4999) - * [21e137bab](https://github.com/argoproj/argo-workflows/commit/21e137bab849a9affb1e0bb0acb4b36ae7663b52) fix(executor): Correct usage of time.Duration. Fixes #5046 (#5049) - * [19a34b1fa](https://github.com/argoproj/argo-workflows/commit/19a34b1fa5c99d9bdfc51b73630c0605a198b8c1) feat(executor): Add user agent to workflow executor (#5014) - * [f31e0c6f9](https://github.com/argoproj/argo-workflows/commit/f31e0c6f92ec5e383d2f32f57a822a518cbbef86) chore!: Remove deprecated fields (#5035) - * [f59d46229](https://github.com/argoproj/argo-workflows/commit/f59d4622990b9d81ce80829431725c43f0a78e16) fix: Invalid URL for API Docs (#5063) - * [daf1a71b4](https://github.com/argoproj/argo-workflows/commit/daf1a71b4602e179796624aadfcdb2acea4af4b8) feat: Allow to specify grace period for pod GC (#5033) - * [26f48a9d9](https://github.com/argoproj/argo-workflows/commit/26f48a9d99932ad608e2614b61b203007433ae90) fix: Use React state to avoid new page load in Workflow view (#5058) - * [a0669b5d0](https://github.com/argoproj/argo-workflows/commit/a0669b5d02e489f234eb396136f3885cec8fa175) fix: Don't allow graph container to have its own scroll (#5056) - -### Contributors - - * Alex Collins - * Dylan Hellems - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - * drannenberg - * kennytrytek - -## v3.0.0-rc1 (2021-02-08) - - * [9d0be9081](https://github.com/argoproj/argo-workflows/commit/9d0be9081396d369901f3bdb247a61a8d7af8b32) Update manifests to v3.0.0-rc1 - * [425173a28](https://github.com/argoproj/argo-workflows/commit/425173a28057492631590f2fb3b586490d62efb9) fix(cli): Add insecure-skip-verify for HTTP1. Fixes #5008 (#5015) - * [48b669cca](https://github.com/argoproj/argo-workflows/commit/48b669ccab13965900806bd2b1eebcca9b64f975) M is demonstrably not less than 1 in the examples (#5021) - * [5915a2164](https://github.com/argoproj/argo-workflows/commit/5915a216427d2d79d5d54746eede61d4e54f31fe) feat(controller): configurable terminationGracePeriodSeconds (#4940) - * [5824fc6bb](https://github.com/argoproj/argo-workflows/commit/5824fc6bb4fbee74d9016e4da97bc177b4d1f081) Fix golang build (#5039) - * [ef76f729a](https://github.com/argoproj/argo-workflows/commit/ef76f729a853bc8512caa504258462c1ba51630f) feat: DAG render options panel float through scrolling (#5036) - * [b4ea47e05](https://github.com/argoproj/argo-workflows/commit/b4ea47e05dcfe3113f906b252736a18f0c90273c) fix: Skip the Workflow not found error in Concurrency policy (#5030) - * [edbe5bc9e](https://github.com/argoproj/argo-workflows/commit/edbe5bc9eb6256329d6b492921e1ff5fa426dae2) fix(ui): Display all node inputs/output in one tab. Resolves #5027 (#5029) - * [c4e8d1cf2](https://github.com/argoproj/argo-workflows/commit/c4e8d1cf2f42f405c4f4efd80c83b29dde1f1a23) feat(executor): Log `verb kind statusCode` for executor Kubernetes API requests (#4989) - * [d1abcb055](https://github.com/argoproj/argo-workflows/commit/d1abcb05507007676ff12ef97652ca4c8a325ccd) fix: Unmark daemoned nodes after stopping them (#5005) - * [38e98f7ee](https://github.com/argoproj/argo-workflows/commit/38e98f7eecc593b63192c4fcb53d80b06c3cc618) Video (#5019) - * [342caeff5](https://github.com/argoproj/argo-workflows/commit/342caeff5b6126d2bedaf5c6836cd0fe0fc1fca1) fix(ui): Fix event-flow hidden nodes (#5013) - * [d5ccc8e01](https://github.com/argoproj/argo-workflows/commit/d5ccc8e0119c3263e6806b4a13e2fa9ec3fff88c) feat(executor): Upgrade kubectl to v1.19 (#5011) - * [8f5e17ac4](https://github.com/argoproj/argo-workflows/commit/8f5e17ac430a48195cc7695313af6d304a0b6cac) feat: Set CORS headers (#4990) - * [99c049bd2](https://github.com/argoproj/argo-workflows/commit/99c049bd27eb93b3a9719fde9ed8e8c60ca75511) feat(ui): Node search tool in UI Workflow viewer (#5000) - * [5047f0738](https://github.com/argoproj/argo-workflows/commit/5047f07381eb59373db60021ffd13f7a8ca9292e) fix: Fail DAG templates with variables with invalid dependencies (#4992) - * [ccd669e44](https://github.com/argoproj/argo-workflows/commit/ccd669e448bf5d9b39f55421e80dd0db6dbc3a39) fix: Coalesce UI filtering menus (#4972) - * [ce508c896](https://github.com/argoproj/argo-workflows/commit/ce508c8967bbc353d645d1326c9cd77f1335f2b7) feat: Configurable retry backoff settings when retrying API calls (#4979) - * [44a4f7e10](https://github.com/argoproj/argo-workflows/commit/44a4f7e10ce1d88e82d5df86c000b93a422484e2) fix(controller): Revert prepending ExecutorScriptSourcePath which brought a breaking change in args handling (#4884) - * [b68d63eb2](https://github.com/argoproj/argo-workflows/commit/b68d63eb2064be0d0544a6d5997940ba4805f4fa) fix(controller): Adds PNS_PRIVILEGED, fixed termination bug (#4983) - * [d324b43c7](https://github.com/argoproj/argo-workflows/commit/d324b43c7777c500521193ebbdf1223966dfe916) fix: Use button in side panel links (#4977) - * [655c7e253](https://github.com/argoproj/argo-workflows/commit/655c7e253635ecf8b9bb650cbbe36607cb0ad22b) fix: Surface the underlying error on wait timeout. (#4966) - * [a00aa3257](https://github.com/argoproj/argo-workflows/commit/a00aa3257a6f9037c010f2bf6f0ee2c4309eaf5f) fix: Correct usage of wait.ExponentialBackoff (#4962) - * [e00623d61](https://github.com/argoproj/argo-workflows/commit/e00623d614f83afe2aead4bfdf27dc572940bea2) fix(server): Fix missing logs bug (#4960) - * [eabe96376](https://github.com/argoproj/argo-workflows/commit/eabe963761019f2981bfc4967c03a3c6733ce0ee) feat(server): add ServiceAccount info to api/v1/userinfo and ui user tab (#4944) - * [15156d193](https://github.com/argoproj/argo-workflows/commit/15156d1934a3a84f22c97dcd7c4f9fdd16664e4c) Added Astraea (#4855) - * [7404b1f8a](https://github.com/argoproj/argo-workflows/commit/7404b1f8a417a95a57b33d5ad077e0121db447f7) fix(controller): report OOM when wait container OOM (#4930) - * [6166e80c5](https://github.com/argoproj/argo-workflows/commit/6166e80c571783f8acf8e6d7448dac2c11f607b3) feat: Support retry on transient errors during executor status checking (#4946) - * [6e116e46e](https://github.com/argoproj/argo-workflows/commit/6e116e46e3ebc19b757bb7fb65a2d2799fb2cde6) feat(crds): Update CRDs to apiextensions.k8s.io/v1 (#4918) - * [261625324](https://github.com/argoproj/argo-workflows/commit/261625324c531c27353df6377541429a811446ef) feat(server): Add Prometheus metrics. Closes #4751 (#4952) - * [7c69898ed](https://github.com/argoproj/argo-workflows/commit/7c69898ed0df5c12ab48c718c3a4cc33613f7766) fix(cli): Allow full node name in node-field-selector (#4913) - * [c7293062a](https://github.com/argoproj/argo-workflows/commit/c7293062ac0267baa216e32230f8d61823ba7b37) fix(cli): Update the map-reduce example, fix bug. (#4948) - * [e7e51d08a](https://github.com/argoproj/argo-workflows/commit/e7e51d08a9857c5c4e16965cbe20ba4bcb5b6038) feat: Check the workflow is not being deleted for Synchronization workflow (#4935) - * [9d4edaef4](https://github.com/argoproj/argo-workflows/commit/9d4edaef47c2674861d5352e2ae6ecb10bcbb8f1) fix(ui): v3 UI tweaks (#4933) - * [2d73d58a5](https://github.com/argoproj/argo-workflows/commit/2d73d58a5428fa940bf4ef55e161f007b9824475) fix(ui): fix object-editor text render issue (#4921) - * [6e961ec92](https://github.com/argoproj/argo-workflows/commit/6e961ec928ee35e3ae022826f020c9722ad614d6) feat: support K8S json patch (#4908) - * [f872366f3](https://github.com/argoproj/argo-workflows/commit/f872366f3b40fc346266e3ae328bdc25eb2082ec) fix(controller): Report reconciliation errors better (#4877) - * [c8215f972](https://github.com/argoproj/argo-workflows/commit/c8215f972502435e6bc5b232823ecb6df919f952) feat(controller)!: Key-only artifacts. Fixes #3184 (#4618) - * [cd7c16b23](https://github.com/argoproj/argo-workflows/commit/cd7c16b235be369b0e44ade97c71cbe5b6d15f68) fix(ui): objecteditor only runs onChange when values are modified (#4911) - * [ee1f82764](https://github.com/argoproj/argo-workflows/commit/ee1f8276460b287da4df617b5c76a1e05764da3f) fix(ui): Fix workflow refresh bug (#4906) - * [929cd50e4](https://github.com/argoproj/argo-workflows/commit/929cd50e427db88fefff4810d83a4f85fc563de2) fix: Mutex not being released on step completion (#4847) - * [c1f9280a2](https://github.com/argoproj/argo-workflows/commit/c1f9280a204a3e305e378e34acda46d11708140f) fix(ui): UI bug fixes (#4895) - * [25abd1a03](https://github.com/argoproj/argo-workflows/commit/25abd1a03b3f490169220200b9add4da4846ac0b) feat: Support specifying the pattern for transient and retryable errors (#4889) - * [16f25ba09](https://github.com/argoproj/argo-workflows/commit/16f25ba09a87d9c29bee1c7b7aef80ec8424ba1d) Revert "feat(cli): add selector and field-selector option to the stop command. (#4853)" - * [53f7998eb](https://github.com/argoproj/argo-workflows/commit/53f7998ebc88be2db3beedbfbe2ea2f8ae230630) feat(cli): add selector and field-selector option to the stop command. (#4853) - * [1f13241fe](https://github.com/argoproj/argo-workflows/commit/1f13241fe9a7367fa3ebba4006f89b662b912d10) fix(workflow-event-bindings): removing unneeded ':' in protocol (#4893) - * [ecbca6ce7](https://github.com/argoproj/argo-workflows/commit/ecbca6ce7dd454f9df97bc7a6c6ec0b06a09bb0f) fix(ui): Show non-pod nodes (#4890) - * [4a5db1b79](https://github.com/argoproj/argo-workflows/commit/4a5db1b79e98d6ddd9f5cae15d0422624061c0bf) fix(controller): Consider processed retry node in metrics. Fixes #4846 (#4872) - * [dd8c1ba02](https://github.com/argoproj/argo-workflows/commit/dd8c1ba023831e8d127ffc9369b73299fad241b4) feat(controller): optional database migration (#4869) - * [a8e934826](https://github.com/argoproj/argo-workflows/commit/a8e9348261c77cb3b13bef864520128279f2e6b8) feat(ui): Argo Events API and UI. Fixes #888 (#4470) - * [17e79e8a2](https://github.com/argoproj/argo-workflows/commit/17e79e8a2af973711d428d7bb20be16a6aeccceb) fix(controller): make creator label DNS compliant. Fixes #4880 (#4881) - * [2ff11cc98](https://github.com/argoproj/argo-workflows/commit/2ff11cc987f852cd642d45ae058517a817b2b94e) fix(controller): Fix node status when daemon pod deleted but its children nodes are still running (#4683) - * [955a4bb12](https://github.com/argoproj/argo-workflows/commit/955a4bb12a2692b3b447b00558d8d84c7c44f2a9) fix: Do not error on duplicate workflow creation by cron (#4871) - * [622624e81](https://github.com/argoproj/argo-workflows/commit/622624e817705b06d5cb135388063762dd3d8b4f) fix(controller): Add matrix tests for node offload disabled. Resolves #2333 (#4864) - * [f38c9a2d7](https://github.com/argoproj/argo-workflows/commit/f38c9a2d78db061b398583dfc9a86c0da349a290) feat: Expose exitCode to step level metrics (#4861) - * [45c792a59](https://github.com/argoproj/argo-workflows/commit/45c792a59052db20da74713b29bdcd1145fc6748) feat(controller): `k8s_request_total` and `workflow_condition` metrics (#4811) - * [e3320d360](https://github.com/argoproj/argo-workflows/commit/e3320d360a7ba006796ebdb638349153d438dcff) feat: Publish images on Quay.io (#4860) - * [b674aa30b](https://github.com/argoproj/argo-workflows/commit/b674aa30bc1c204a63fd2e34d451f84390cbe7b8) feat: Publish images to Quay.io (#4854) - * [a6301d7c6](https://github.com/argoproj/argo-workflows/commit/a6301d7c64fb27e4ab68209da7ee9718bf257252) refactor: upgrade kube client version to v0.19.6. Fixes #4425, #4791 (#4810) - * [6b3ce5045](https://github.com/argoproj/argo-workflows/commit/6b3ce504508707472d4d31c6c522d1af02104b05) feat: Worker busy and active pod metrics (#4823) - * [53110b61d](https://github.com/argoproj/argo-workflows/commit/53110b61d14a5bdaa5c3b4c12527150dfc40b56a) fix: Preserve the original slice when removing string (#4835) - * [adfa988f9](https://github.com/argoproj/argo-workflows/commit/adfa988f9df64b629e08687737a80f2f6e0a6289) fix(controller): keep special characters in json string when use withItems (#4814) - * [6e158780e](https://github.com/argoproj/argo-workflows/commit/6e158780ef202c9d5fb1cb8161fc57bae80bb763) feat(controller): Retry pod creation on API timeout (#4820) - * [01e6c9d5c](https://github.com/argoproj/argo-workflows/commit/01e6c9d5c87d57611c2f3193d56e8af5e5fc91e7) feat(controller): Add retry on different host (#4679) - * [2243d3497](https://github.com/argoproj/argo-workflows/commit/2243d349781973ee0603c215c284da669a2811d5) fix: Metrics documentation (#4829) - * [f0a315cf4](https://github.com/argoproj/argo-workflows/commit/f0a315cf4353589507a37d5787d2424d65a249f3) fix(crds): Inline WorkflowSteps schema to generate valid OpenAPI spec (#4828) - * [f037fd2b4](https://github.com/argoproj/argo-workflows/commit/f037fd2b4e7bb23dfe1ca0ae793e14b1fab42c36) feat(controller): Adding Eventrecorder on LeaderElection - * [a0024d0d4](https://github.com/argoproj/argo-workflows/commit/a0024d0d4625c8660badff5a7d8eca883e7e2a3e) fix(controller): Various v2.12 fixes. Fixes #4798, #4801, #4806 (#4808) - * [ee59d49d9](https://github.com/argoproj/argo-workflows/commit/ee59d49d91d5cdaaa28a34a73339ecc072f8264e) fix: Memoize Example (Issue 4626) (#4818) - * [b73bd2b61](https://github.com/argoproj/argo-workflows/commit/b73bd2b6179840906ef5d2e0c9cccce987cb069a) feat: Customize workfow metadata from event data (#4783) - * [7e6c799af](https://github.com/argoproj/argo-workflows/commit/7e6c799afc025ecc4a9a861b6e2d36908d9eea41) fix: load all supported authentication plugins for k8s client-go (#4802) - * [78b0bffd3](https://github.com/argoproj/argo-workflows/commit/78b0bffd39ec556182e81374b2328450b8dd2e9b) fix(executor): Do not delete local artifacts after upload. Fixes #4676 (#4697) - * [af03a74fb](https://github.com/argoproj/argo-workflows/commit/af03a74fb334c88493e38ed4cb94f771a97bffc5) refactor(ui): replace node-sass with sass (#4780) - * [15ec9f5e4](https://github.com/argoproj/argo-workflows/commit/15ec9f5e4bc9a4b14b7ab1a56c3975948fecb591) chore(example): Add watch timeout and print out workflow status message (#4740) - * [4ac436d5c](https://github.com/argoproj/argo-workflows/commit/4ac436d5c7eef4a5fdf93fcb8c6e8a224e236bdd) fix(server): Do not silently ignore sso secret creation error (#4775) - * [442d367b1](https://github.com/argoproj/argo-workflows/commit/442d367b1296722b613dd86658ca0e3764b192ac) feat(controller): unix timestamp support on creationTimestamp var (#4763) - * [9f67b28c7](https://github.com/argoproj/argo-workflows/commit/9f67b28c7f7cc767ff1bfb72eb6c16e46071871a) feat(controller): Rate-limit workflows. Closes #4718 (#4726) - * [aed25fefe](https://github.com/argoproj/argo-workflows/commit/aed25fefe00734de0dfa734860fc7af03dbf62cf) Change argo-server crt/key owner (#4750) - * [fbb4e8d44](https://github.com/argoproj/argo-workflows/commit/fbb4e8d447fec32daf63795a9c7b1d7af3499d46) fix(controller): Support default database port. Fixes #4756 (#4757) - * [69ce2acfb](https://github.com/argoproj/argo-workflows/commit/69ce2acfbef761cd14aefb905aa1e396be9eb21e) refactor(controller): Enhanced pod clean-up scalability (#4728) - * [9c4d735a9](https://github.com/argoproj/argo-workflows/commit/9c4d735a9c01987f093e027332be2da71be85124) feat: Add a minimal prometheus server manifest (#4687) - * [625e3ce26](https://github.com/argoproj/argo-workflows/commit/625e3ce265e17df9315231e82e9a346aba400b14) fix(ui): Remove unused Heebo files. Fixes #4730 (#4739) - * [2e278b011](https://github.com/argoproj/argo-workflows/commit/2e278b011083195c8237522311f1ca94dcba4b59) fix(controller): Fixes resource version misuse. Fixes #4714 (#4741) - * [300db5e62](https://github.com/argoproj/argo-workflows/commit/300db5e628bee4311c1d50c5027abb4af2266564) fix(controller): Requeue when the pod was deleted. Fixes #4719 (#4742) - * [a1f7aedbf](https://github.com/argoproj/argo-workflows/commit/a1f7aedbf21c5930cb507ed495901ae430b10b43) fix(controller): Fixed workflow stuck with mutex lock (#4744) - * [1a7ed7342](https://github.com/argoproj/argo-workflows/commit/1a7ed7342312b658c501ee63ece8cb79d6792f88) feat(controller): Enhanced TTL controller scalability (#4736) - * [7437f4296](https://github.com/argoproj/argo-workflows/commit/7437f42963419e8d84b6da32f780b8be7a120ee0) fix(executor): Always check if resource has been deleted in checkResourceState() (#4738) - * [122c5fd2e](https://github.com/argoproj/argo-workflows/commit/122c5fd2ecd10dfeb3c0695dba7fc680bd5d46f9) fix(executor): Copy main/executor container resources from controller by value instead of reference (#4737) - * [440d732d1](https://github.com/argoproj/argo-workflows/commit/440d732d18c2364fe5d6c74b6e4f14dc437d78fc) fix(ui): Fix YAML for workflows with storedWorkflowTemplateSpec. Fixes #4691 (#4695) - * [ed853eb0e](https://github.com/argoproj/argo-workflows/commit/ed853eb0e366e92889a54a63714f9b9a74e5091f) fix: Allow Bearer token in server mode (#4735) - * [1f421df6b](https://github.com/argoproj/argo-workflows/commit/1f421df6b8eae90882eca974694ecbbf5bf660a6) fix(executor): Deal with the pod watch API call timing out (#4734) - * [724fd80c4](https://github.com/argoproj/argo-workflows/commit/724fd80c4cad6fb30ad665b36652b93e068c9509) feat(controller): Pod deletion grace period. Fixes #4719 (#4725) - * [380268943](https://github.com/argoproj/argo-workflows/commit/380268943efcf509eb28d43f9cbd4ceac195ba61) feat(controller): Add Prometheus metric: `workflow_ttl_queue` (#4722) - * [55019c6ea](https://github.com/argoproj/argo-workflows/commit/55019c6ead5dea100a49cc0c15d99130dff925e3) fix(controller): Fix incorrect main container customization precedence and isResourcesSpecified check (#4681) - * [625189d86](https://github.com/argoproj/argo-workflows/commit/625189d86bc38761b469a18677d83539a487f255) fix(ui): Fix "Using Your Login". Fixes #4707 (#4708) - * [433dc5b99](https://github.com/argoproj/argo-workflows/commit/433dc5b99ab2bbaee8e140a88c4f5860bd8d515a) feat(server): Support email for SSO+RBAC. Closes #4612 (#4644) - * [ae0c0bb84](https://github.com/argoproj/argo-workflows/commit/ae0c0bb84ebcd51b02e3137ea30f9dc215bdf80a) fix(controller): Fixed RBAC on leases (#4715) - * [cd4adda1d](https://github.com/argoproj/argo-workflows/commit/cd4adda1d9737985481dbf73f9ac0bae8a963b2c) fix(controller): Fixed Leader election name (#4709) - * [aec22189f](https://github.com/argoproj/argo-workflows/commit/aec22189f651980e878453009c239348f625412a) fix(test): Fixed Flaky e2e tests TestSynchronizationWfLevelMutex and TestResourceTemplateStopAndTerminate/ResourceTemplateStop (#4688) - * [ab837753b](https://github.com/argoproj/argo-workflows/commit/ab837753bec1f78ad66c0d41b5fbb1739428da88) fix(controller): Fix the RBAC for leader-election (#4706) - * [9669aa522](https://github.com/argoproj/argo-workflows/commit/9669aa522bd18e869c9a5133d8b8acedfc3d22c8) fix(controller): Increate default EventSpamBurst in Eventrecorder (#4698) - * [96a55ce5e](https://github.com/argoproj/argo-workflows/commit/96a55ce5ec91e195c019d648e7f30eafe2a0cf95) feat(controller): HA Leader election support on Workflow-controller (#4622) - * [ad1b6de4d](https://github.com/argoproj/argo-workflows/commit/ad1b6de4d09b6b9284eeed15c5b61217b4da921f) fix: Consider optional artifact arguments (#4672) - * [d9d5f5fb7](https://github.com/argoproj/argo-workflows/commit/d9d5f5fb707d95c1c4d6fe761115ceface26a5cf) feat(controller): Use deterministic name for cron workflow children (#4638) - * [f47fc2227](https://github.com/argoproj/argo-workflows/commit/f47fc2227c5a84a2eace7b977a7761674b81e6f3) fix(controller): Only patch status.active in cron workflows when syncing (#4659) - * [9becf3036](https://github.com/argoproj/argo-workflows/commit/9becf3036f5bfbde8c54a1eebf50c4ce48ca6352) fix(ui): Fixed reconnection hot-loop. Fixes #4580 (#4663) - * [e8cc2fbb4](https://github.com/argoproj/argo-workflows/commit/e8cc2fbb44313b6c9a988072d8947aef2270c038) feat: Support per-output parameter aggregation (#4374) - * [b1e2c2077](https://github.com/argoproj/argo-workflows/commit/b1e2c207722be8ec9f26011957ccdeaa95da2ded) feat(controller): Allow to configure main container resources (#4656) - * [4f9fab981](https://github.com/argoproj/argo-workflows/commit/4f9fab9812ab1bbf5858c51492983774f1f22e93) fix(controller): Cleanup the synchronize pending queue once Workflow deleted (#4664) - * [705542053](https://github.com/argoproj/argo-workflows/commit/7055420536270fa1cd5560e4bf964bcd65813be9) feat(ui): Make it easy to use SSO login with CLI. Resolves #4630 (#4645) - * [76bcaecde](https://github.com/argoproj/argo-workflows/commit/76bcaecde01dbc539fcd10564925eeff14e30093) feat(ui): add countdown to cronWorkflowList Closes #4636 (#4641) - * [5614700b7](https://github.com/argoproj/argo-workflows/commit/5614700b7bd466aeae8a175ca586a1ff47981430) feat(ui): Add parameter value enum support to the UI. Fixes #4192 (#4365) - * [95ad3349c](https://github.com/argoproj/argo-workflows/commit/95ad3349cf464a421a8beb329d41bf494343cf89) feat: Add shorthanded option -A for --all-namespaces (#4658) - * [3b66f74c9](https://github.com/argoproj/argo-workflows/commit/3b66f74c9b5761f548aa494facecbd06df8fe296) fix(ui): DataLoaderDropdown fix input type from promise to function that (#4655) - * [c4d986ab6](https://github.com/argoproj/argo-workflows/commit/c4d986ab60b8b0a00d9507da34b832845e4630a7) feat(ui): Replace 3 buttons with drop-down (#4648) - * [fafde1d67](https://github.com/argoproj/argo-workflows/commit/fafde1d677361521b4b55a23dd0dbca7f75e3219) fix(controller): Deal with hyphen in creator. Fixes #4058 (#4643) - * [30e172d5e](https://github.com/argoproj/argo-workflows/commit/30e172d5e968e644c80e0739624ec7c8245b4be4) fix(manifests): Drop capabilities, add CNCF badge. Fixes #2614 (#4633) - * [f726b9f87](https://github.com/argoproj/argo-workflows/commit/f726b9f872612e3501a7bcf2a359790c32e4cca0) feat(ui): Add links to init and wait logs (#4642) - * [94be7da35](https://github.com/argoproj/argo-workflows/commit/94be7da35a63aae4b2563f1f3f90647b661f53c7) feat(executor): Auto create s3 bucket if not present. Closes #3586 (#4574) - * [1212df4d1](https://github.com/argoproj/argo-workflows/commit/1212df4d19dd18045fd0aded7fd1dc5726f7d5c5) feat(controller): Support .AnySucceeded / .AllFailed for TaskGroup in depends logic. Closes #3405 (#3964) - * [6175458a6](https://github.com/argoproj/argo-workflows/commit/6175458a6407aae3788b2ffb96b1bd9b14661069) fix: Count Workflows with no phase as Pending for metrics (#4628) - * [a2566b953](https://github.com/argoproj/argo-workflows/commit/a2566b9534c0012038400a5c6ed8884b855d4c64) feat(executor): More informative log when executors do not support output param from base image layer (#4620) - * [e1919c86b](https://github.com/argoproj/argo-workflows/commit/e1919c86b3ecbd1760a404de6d8637ac0ae6ce0b) fix(ui): Fix Snyk issues (#4631) - * [454f3ae35](https://github.com/argoproj/argo-workflows/commit/454f3ae35418c05e114fd6f181a85cf25900a037) fix(ui): Reference secrets in EnvVars. Fixes #3973 (#4419) - * [1f0392075](https://github.com/argoproj/argo-workflows/commit/1f0392075031c83640a7490ab198bc3af9d1b4ba) fix: derive jsonschema and fix up issues, validate examples dir… (#4611) - * [92a283275](https://github.com/argoproj/argo-workflows/commit/92a283275a1bf1ccde7e6a9ae90385459bd1f6fc) fix(argo-server): fix global variable validation error with reversed dag.tasks (#4369) - * [79ca27f35](https://github.com/argoproj/argo-workflows/commit/79ca27f35e8b07c9c6361be342aa3f097d554b53) fix: Fix TestCleanFieldsExclude (#4625) - * [b3336e732](https://github.com/argoproj/argo-workflows/commit/b3336e7321df6dbf7e14bd49ed77fea8cc8f0666) feat(ui): Add columns--narrower-height to AttributeRow (#4371) - * [91bce2574](https://github.com/argoproj/argo-workflows/commit/91bce2574fab15f4fab4bc4df9e50563aa748838) fix(server): Correct webhook event payload marshalling. Fixes #4572 (#4594) - * [39c805fa0](https://github.com/argoproj/argo-workflows/commit/39c805fa0ed167a3cc111556cf1eb864b87627e8) fix: Perform fields filtering server side (#4595) - * [3af8195b2](https://github.com/argoproj/argo-workflows/commit/3af8195b27dfc3e2e426bb649eed923beeaf7e19) fix: Null check pagination variable (#4617) - * [c84d56b64](https://github.com/argoproj/argo-workflows/commit/c84d56b6439cf48814f9ab86e5b899929ab426a8) feat(controller): Enhanced artifact repository ref. See #3184 (#4458) - * [5c538d7a9](https://github.com/argoproj/argo-workflows/commit/5c538d7a918e41029d3911a92c6ac615f04d3b80) fix(executor): Fixed waitMainContainerStart returning prematurely. Closes #4599 (#4601) - * [b92d889a5](https://github.com/argoproj/argo-workflows/commit/b92d889a5a44b01d5d62135848db36be20c20e9d) fix(docs): Bring minio chart instructions up to date (#4586) - * [6c46aab7d](https://github.com/argoproj/argo-workflows/commit/6c46aab7d54678c21df17d6c885473c17f8c66a6) fix(controller): Prevent tasks with names starting with digit to use either 'depends' or 'dependencies' (#4598) - * [4531d7936](https://github.com/argoproj/argo-workflows/commit/4531d7936c25174b3251e926288866c69fc2dba3) refactor: Use polling model for workflow phase metric (#4557) - * [ef779bbf8](https://github.com/argoproj/argo-workflows/commit/ef779bbf8ffc548c4ecc34650f737936ffa5352a) fix(executor): Handle sidecar killing in a process-namespace-shared pod (#4575) - * [9ee4d446c](https://github.com/argoproj/argo-workflows/commit/9ee4d446c1908f59240ca4b814ba565bb1acbc1f) fix(server): serve artifacts directly from disk to support large artifacts (#4589) - * [e3aaf2fb4](https://github.com/argoproj/argo-workflows/commit/e3aaf2fb4f34eeca12778b4caa70c1aa8d80ca14) fix(server): use the correct name when downloading artifacts (#4579) - * [1c62586eb](https://github.com/argoproj/argo-workflows/commit/1c62586eb015e64596bc898166700769364a9d10) feat(controller): Retry transient offload errors. Resolves #4464 (#4482) - * [15fd57942](https://github.com/argoproj/argo-workflows/commit/15fd5794250a2e54e388b394fd288420482df924) feat(controller): Make MAX_OPERATION_TIME configurable. Close #4239 (#4562) - -### Contributors - - * Alastair Maw - * Alex Capras - * Alex Collins - * Alexey Volkov - * Arthur Outhenin-Chalandre - * BOOK - * Basanth Jenu H B - * Daisuke Taniwaki - * Huan-Cheng Chang - * Isaac Gaskin - * J.P. Zivalich - * Kristoffer Johansson - * Marcin Gucki - * Michael Albers - * Noah Hanjun Lee - * Paul Brabban - * RossyWhite - * Saravanan Balasubramanian - * Simeon H.K. Fitch - * Simon Behar - * Simon Frey - * Song Juchao - * Stéphane Este-Gracias - * Tomáš Coufal - * Trevor Wood - * Viktor Farcic - * Yuan Tang - * aletepe - * bei-re - * cocotyty - * dherman - * ermeaney - * fsiegmund - * hermanhobnob - * joyciep - * kennytrytek - * lonsdale8734 - * makocchi - * markterm - * saranyaeu2987 - * tczhao - * zhengchenyu - -## v2.12.13 (2021-08-18) - - * [08c9964d5](https://github.com/argoproj/argo-workflows/commit/08c9964d5049c85621ee1cd2ceaa133944a650aa) Update manifests to v2.12.13 - * [17eb51db5](https://github.com/argoproj/argo-workflows/commit/17eb51db5e563d3e7911a42141efe7624ecc4c24) fix: Fix `x509: certificate signed by unknown authority` error (#6566) - -### Contributors - - * Alex Collins - -## v2.12.12 (2021-08-18) - - * [f83ece141](https://github.com/argoproj/argo-workflows/commit/f83ece141ccb7804ffcdd0d9aecbdb016fc97d6b) Update manifests to v2.12.12 - * [26df32eb1](https://github.com/argoproj/argo-workflows/commit/26df32eb1af1597bf66c3b5532ff1d995bb5b940) fix: Generate TLS Certificates on startup and only keep in memory (#6540) - * [46d744f01](https://github.com/argoproj/argo-workflows/commit/46d744f010479b34005f8848297131c14a266b76) fix: Remove client private key from client auth REST config (#6506) - -### Contributors - - * Alex Collins - * David Collom - -## v2.12.11 (2021-04-05) - - * [71d00c787](https://github.com/argoproj/argo-workflows/commit/71d00c7878e2b904ad35ca25712bef7e84893ae2) Update manifests to v2.12.11 - * [d5e0823f1](https://github.com/argoproj/argo-workflows/commit/d5e0823f1a237bffc56a61601a5d2ef011e66b0e) fix: InsecureSkipVerify true - * [3b6c53af0](https://github.com/argoproj/argo-workflows/commit/3b6c53af00843a17dc2f030e08dec1b1c070e3f2) fix(executor): GODEBUG=x509ignoreCN=0 (#5562) - * [631e55d00](https://github.com/argoproj/argo-workflows/commit/631e55d006a342b20180e6cbd82d10f891e4d60f) feat(server): Enforce TLS >= v1.2 (#5172) - -### Contributors - - * Alex Collins - * Simon Behar - -## v2.12.10 (2021-03-08) - - * [f1e0c6174](https://github.com/argoproj/argo-workflows/commit/f1e0c6174b48af69d6e8ecd235a2d709f44f8095) Update manifests to v2.12.10 - * [1ecc5c009](https://github.com/argoproj/argo-workflows/commit/1ecc5c0093cbd4e74efbd3063cbe0499ce81d54a) fix(test): Flaky TestWorkflowShutdownStrategy (#5331) - * [fa8f63c6d](https://github.com/argoproj/argo-workflows/commit/fa8f63c6db3dfc0dfed2fb99f40850beee4f3981) Cherry-pick 5289 - * [d56c420b7](https://github.com/argoproj/argo-workflows/commit/d56c420b7af25bca13518180da185ac70380446e) fix: Disallow object names with more than 63 chars (#5324) - * [6ccfe46d6](https://github.com/argoproj/argo-workflows/commit/6ccfe46d68c6ddca231c746d8d0f6444546b20ad) fix: Backward compatible workflowTemplateRef from 2.11.x to 2.12.x (#5314) - * [0ad734623](https://github.com/argoproj/argo-workflows/commit/0ad7346230ef148b1acd5e78de69bd552cb9d49c) fix: Ensure whitespaces is allowed between name and bracket (#5176) - -### Contributors - - * Saravanan Balasubramanian - * Simon Behar - -## v2.12.9 (2021-02-16) - - * [737905345](https://github.com/argoproj/argo-workflows/commit/737905345d70ba1ebd566ce1230e4f971993dfd0) Update manifests to v2.12.9 - * [81c07344f](https://github.com/argoproj/argo-workflows/commit/81c07344fe5d84e09284bd1fea4f01239524a842) codegen - * [26d2ec0a1](https://github.com/argoproj/argo-workflows/commit/26d2ec0a10913b7df994f7d354fea2be1db04ea9) cherry-picked 5081 - * [92ad730a2](https://github.com/argoproj/argo-workflows/commit/92ad730a28a4eb613b8e5105c9c2ccbb2ed2c3f3) fix: Revert "fix(controller): keep special characters in json string when … … 19da392 …use withItems (#4814)" (#5076) - * [1e868ec1a](https://github.com/argoproj/argo-workflows/commit/1e868ec1adf95dd0e53e7939cc8a9d7834cf8fbf) fix(controller): Fix creator dashes (#5082) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.12.8 (2021-02-08) - - * [d19d4eeed](https://github.com/argoproj/argo-workflows/commit/d19d4eeed3224ea7e854c658d3544663e86cd509) Update manifests to v2.12.8 - * [cf3b1980d](https://github.com/argoproj/argo-workflows/commit/cf3b1980dc35c615de53b0d07d13a2c828f94bbf) fix: Fix build - * [a8d0b67e8](https://github.com/argoproj/argo-workflows/commit/a8d0b67e87daac56f310136e56f4dbe5acb98267) fix(cli): Add insecure-skip-verify for HTTP1. Fixes #5008 (#5015) - * [a3134de95](https://github.com/argoproj/argo-workflows/commit/a3134de95090c7b980a741f28dde9ca94650ab18) fix: Skip the Workflow not found error in Concurrency policy (#5030) - * [a60e4105d](https://github.com/argoproj/argo-workflows/commit/a60e4105d0e15ba94625ae83dbd728841576a5ee) fix: Unmark daemoned nodes after stopping them (#5005) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.12.7 (2021-02-01) - - * [5f5150730](https://github.com/argoproj/argo-workflows/commit/5f5150730c644865a5867bf017100732f55811dd) Update manifests to v2.12.7 - * [637154d02](https://github.com/argoproj/argo-workflows/commit/637154d02b0829699a31b283eaf9045708d96acf) feat: Support retry on transient errors during executor status checking (#4946) - * [8e7ed235e](https://github.com/argoproj/argo-workflows/commit/8e7ed235e8b4411fda6d0b6c088dd4a6e931ffb9) feat(server): Add Prometheus metrics. Closes #4751 (#4952) - -### Contributors - - * Alex Collins - * Simon Behar - * Yuan Tang - -## v2.12.6 (2021-01-25) - - * [4cb5b7eb8](https://github.com/argoproj/argo-workflows/commit/4cb5b7eb807573e167f3429fb5fc8bf5ade0685d) Update manifests to v2.12.6 - * [2696898b3](https://github.com/argoproj/argo-workflows/commit/2696898b3334a08af47bdbabb85a7d9fa1f37050) fix: Mutex not being released on step completion (#4847) - * [067b60363](https://github.com/argoproj/argo-workflows/commit/067b60363f260edf8a680c4cb5fa36cc561ff20a) feat(server): Support email for SSO+RBAC. Closes #4612 (#4644) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.12.5 (2021-01-19) - - * [53f022c3f](https://github.com/argoproj/argo-workflows/commit/53f022c3f740b5a8636d74873462011702403e42) Update manifests to v2.12.5 - * [86d7b3b6b](https://github.com/argoproj/argo-workflows/commit/86d7b3b6b4fc4d9336eefea0a0ff44201e35fa47) fix tests - * [633909402](https://github.com/argoproj/argo-workflows/commit/6339094024e23d9dcea1f24981c366e00f36099b) fix tests - * [0c7aa1498](https://github.com/argoproj/argo-workflows/commit/0c7aa1498c900b6fb65b72f82186bab2ff7f0130) fix: Mutex not being released on step completion (#4847) - * [b3742193e](https://github.com/argoproj/argo-workflows/commit/b3742193ef19ffeb33795a39456b3bc1a3a667f5) fix(controller): Consider processed retry node in metrics. Fixes #4846 (#4872) - * [9063a94d6](https://github.com/argoproj/argo-workflows/commit/9063a94d6fc5ab684e3c52c3d99e4dc4a0d034f6) fix(controller): make creator label DNS compliant. Fixes #4880 (#4881) - * [84b44cfdb](https://github.com/argoproj/argo-workflows/commit/84b44cfdb44c077b190070fac98b9ee45c06bfc8) fix(controller): Fix node status when daemon pod deleted but its children nodes are still running (#4683) - * [8cd963520](https://github.com/argoproj/argo-workflows/commit/8cd963520fd2a560b5f2df84c98936c72b894997) fix: Do not error on duplicate workflow creation by cron (#4871) - -### Contributors - - * Saravanan Balasubramanian - * Simon Behar - * ermeaney - * lonsdale8734 - -## v2.12.4 (2021-01-12) - - * [f97bef5d0](https://github.com/argoproj/argo-workflows/commit/f97bef5d00361f3d1cbb8574f7f6adf632673008) Update manifests to v2.12.4 - * [c521b27e0](https://github.com/argoproj/argo-workflows/commit/c521b27e04e2fc40d69d215cf80808a72ed22f1d) feat: Publish images on Quay.io (#4860) - * [1cd2570c7](https://github.com/argoproj/argo-workflows/commit/1cd2570c75a56b50bc830a5727221082b422d0c9) feat: Publish images to Quay.io (#4854) - * [7eb16e617](https://github.com/argoproj/argo-workflows/commit/7eb16e617034a9798bef3e0d6c51c798a42758ac) fix: Preserve the original slice when removing string (#4835) - * [e64183dbc](https://github.com/argoproj/argo-workflows/commit/e64183dbcb80e8b654acec517487661de7cf7dd4) fix(controller): keep special characters in json string when use withItems (#4814) - -### Contributors - - * Simon Behar - * Song Juchao - * cocotyty - -## v2.12.3 (2021-01-04) - - * [93ee53012](https://github.com/argoproj/argo-workflows/commit/93ee530126cc1fc154ada84d5656ca82d491dc7f) Update manifests to v2.12.3 - * [3ce298e29](https://github.com/argoproj/argo-workflows/commit/3ce298e2972a67267d9783e2c094be5af8b48eb7) fix tests - * [8177b53c2](https://github.com/argoproj/argo-workflows/commit/8177b53c299a7e4fb64bc3b024ad46a3584b6de0) fix(controller): Various v2.12 fixes. Fixes #4798, #4801, #4806 (#4808) - * [19c7bdabd](https://github.com/argoproj/argo-workflows/commit/19c7bdabdc6d4de43896527ec850f14f38678e38) fix: load all supported authentication plugins for k8s client-go (#4802) - * [331aa4ee8](https://github.com/argoproj/argo-workflows/commit/331aa4ee896a83504144175da404c580dbfdc48c) fix(server): Do not silently ignore sso secret creation error (#4775) - * [0bbc082cf](https://github.com/argoproj/argo-workflows/commit/0bbc082cf33a78cc332e75c31321c80c357aa83b) feat(controller): Rate-limit workflows. Closes #4718 (#4726) - * [a60279827](https://github.com/argoproj/argo-workflows/commit/a60279827f50579d2624f4fa150af5d2e9458588) fix(controller): Support default database port. Fixes #4756 (#4757) - * [5d8573581](https://github.com/argoproj/argo-workflows/commit/5d8573581913ae265c869638904ec74b87f07a6b) feat(controller): Enhanced TTL controller scalability (#4736) - -### Contributors - - * Alex Collins - * Kristoffer Johansson - * Simon Behar - -## v2.12.2 (2020-12-18) - - * [7868e7237](https://github.com/argoproj/argo-workflows/commit/7868e723704bcfe1b943bc076c2e0b83777d6267) Update manifests to v2.12.2 - * [e8c4aa4a9](https://github.com/argoproj/argo-workflows/commit/e8c4aa4a99a5ea06c8c0cf1807df40e99d86da85) fix(controller): Requeue when the pod was deleted. Fixes #4719 (#4742) - * [11bc9c41a](https://github.com/argoproj/argo-workflows/commit/11bc9c41abb1786bbd06f83bf3222865c7da320c) feat(controller): Pod deletion grace period. Fixes #4719 (#4725) - -### Contributors - - * Alex Collins - -## v2.12.1 (2020-12-17) - - * [9a7e044e2](https://github.com/argoproj/argo-workflows/commit/9a7e044e27b1e342748d9f41ea60d1998b8907ab) Update manifests to v2.12.1 - * [d21c45286](https://github.com/argoproj/argo-workflows/commit/d21c452869330658083b5066bd84b6cbd9f1f745) Change argo-server crt/key owner (#4750) - -### Contributors - - * Daisuke Taniwaki - * Simon Behar - -## v2.12.0 (2020-12-17) - - * [53029017f](https://github.com/argoproj/argo-workflows/commit/53029017f05a369575a1ff73387bafff9fc9b451) Update manifests to v2.12.0 - * [434580669](https://github.com/argoproj/argo-workflows/commit/4345806690634f23427ade69a72bae2e0b289fc7) fix(controller): Fixes resource version misuse. Fixes #4714 (#4741) - * [e192fb156](https://github.com/argoproj/argo-workflows/commit/e192fb15616e3a192e1b4b3db0a596a6c70e2430) fix(executor): Copy main/executor container resources from controller by value instead of reference (#4737) - * [4fb0d96d0](https://github.com/argoproj/argo-workflows/commit/4fb0d96d052136914f3772276f155b92db9289fc) fix(controller): Fix incorrect main container customization precedence and isResourcesSpecified check (#4681) - * [1aac79e9b](https://github.com/argoproj/argo-workflows/commit/1aac79e9bf04d2fb15f080db1359ba09e0c1a257) feat(controller): Allow to configure main container resources (#4656) - -### Contributors - - * Alex Collins - * Simon Behar - * Yuan Tang - -## v2.12.0-rc6 (2020-12-15) - - * [e55b886ed](https://github.com/argoproj/argo-workflows/commit/e55b886ed4706a403a8895b2819b168bd638b256) Update manifests to v2.12.0-rc6 - * [1fb0d8b97](https://github.com/argoproj/argo-workflows/commit/1fb0d8b970f95e98a324e106f431b4782eb2b88f) fix(controller): Fixed workflow stuck with mutex lock (#4744) - * [4059820ea](https://github.com/argoproj/argo-workflows/commit/4059820ea4c0fd7c278c3a8b5cf05cb00c2e3380) fix(executor): Always check if resource has been deleted in checkResourceState() (#4738) - * [739af45b5](https://github.com/argoproj/argo-workflows/commit/739af45b5cf018332d9c5397e6beda826cf4a143) fix(ui): Fix YAML for workflows with storedWorkflowTemplateSpec. Fixes #4691 (#4695) - * [359803433](https://github.com/argoproj/argo-workflows/commit/3598034335bb6eb9bb95dd79375570e19bb07e1e) fix: Allow Bearer token in server mode (#4735) - * [bf589b014](https://github.com/argoproj/argo-workflows/commit/bf589b014cbe81d1ba46b3a08d9426e97c2683c3) fix(executor): Deal with the pod watch API call timing out (#4734) - * [fabf20b59](https://github.com/argoproj/argo-workflows/commit/fabf20b5928cc1314e20e9047a9b122fdbe5ed62) fix(controller): Increate default EventSpamBurst in Eventrecorder (#4698) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - * hermanhobnob - -## v2.12.0-rc5 (2020-12-10) - - * [3aa86fffb](https://github.com/argoproj/argo-workflows/commit/3aa86fffb7c975e3a39302f5b2e37f99fe58fa4f) Update manifests to v2.12.0-rc5 - * [3581a1e77](https://github.com/argoproj/argo-workflows/commit/3581a1e77c927830908ba42f9b63b31c28501346) fix: Consider optional artifact arguments (#4672) - * [50210fc38](https://github.com/argoproj/argo-workflows/commit/50210fc38bdd80fec1c1affd9836b8b0fcf41e31) feat(controller): Use deterministic name for cron workflow children (#4638) - * [3a4e974c0](https://github.com/argoproj/argo-workflows/commit/3a4e974c0cf14ba24df70258a5b5ae19a966397d) fix(controller): Only patch status.active in cron workflows when syncing (#4659) - * [2aaad26fe](https://github.com/argoproj/argo-workflows/commit/2aaad26fe129a6c4eeccb60226941b14664aca4a) fix(ui): DataLoaderDropdown fix input type from promise to function that (#4655) - * [72ca92cb4](https://github.com/argoproj/argo-workflows/commit/72ca92cb4459007968b13e097ef68f3e307454ce) fix: Count Workflows with no phase as Pending for metrics (#4628) - * [8ea219b86](https://github.com/argoproj/argo-workflows/commit/8ea219b860bc85622c120d495860d8a62eb67e5a) fix(ui): Reference secrets in EnvVars. Fixes #3973 (#4419) - * [3b35ba2bd](https://github.com/argoproj/argo-workflows/commit/3b35ba2bdee31c8d512acf145c10bcb3f73d7286) fix: derive jsonschema and fix up issues, validate examples dir… (#4611) - * [2f49720aa](https://github.com/argoproj/argo-workflows/commit/2f49720aa7bea619b8691cb6d9e41b20971a178e) fix(ui): Fixed reconnection hot-loop. Fixes #4580 (#4663) - * [4f8e4a515](https://github.com/argoproj/argo-workflows/commit/4f8e4a515dbde688a23147a40625198e1f9b91a0) fix(controller): Cleanup the synchronize pending queue once Workflow deleted (#4664) - * [128598478](https://github.com/argoproj/argo-workflows/commit/128598478bdd6a5d35d76101feb85c04b4d6c7a8) fix(controller): Deal with hyphen in creator. Fixes #4058 (#4643) - * [2d05d56ea](https://github.com/argoproj/argo-workflows/commit/2d05d56ea0af726f9a0906f72119105f27453ff9) feat(controller): Make MAX_OPERATION_TIME configurable. Close #4239 (#4562) - * [c00ff7144](https://github.com/argoproj/argo-workflows/commit/c00ff7144bda39995823b8f0e3668c88958d9736) fix: Fix TestCleanFieldsExclude (#4625) - -### Contributors - - * Alex Collins - * Paul Brabban - * Saravanan Balasubramanian - * Simon Behar - * aletepe - * tczhao - -## v2.12.0-rc4 (2020-12-02) - - * [e34bc3b72](https://github.com/argoproj/argo-workflows/commit/e34bc3b7237669ae1d0a800f8210a462cb6e4cfa) Update manifests to v2.12.0-rc4 - * [feea63f02](https://github.com/argoproj/argo-workflows/commit/feea63f029f2416dc7002852c5541a9638a03d72) feat(executor): More informative log when executors do not support output param from base image layer (#4620) - * [65f5aefef](https://github.com/argoproj/argo-workflows/commit/65f5aefefe592f11a387b5db715b4895e47e1af1) fix(argo-server): fix global variable validation error with reversed dag.tasks (#4369) - * [e6870664e](https://github.com/argoproj/argo-workflows/commit/e6870664e16db166529363f85ed90632f66ca9de) fix(server): Correct webhook event payload marshalling. Fixes #4572 (#4594) - * [b1d682e71](https://github.com/argoproj/argo-workflows/commit/b1d682e71c8f3f3a66b71d47f8db22db55637629) fix: Perform fields filtering server side (#4595) - * [61b670481](https://github.com/argoproj/argo-workflows/commit/61b670481cb693b25dfc0186ff28dfe29dfa9353) fix: Null check pagination variable (#4617) - * [ace0ee1b2](https://github.com/argoproj/argo-workflows/commit/ace0ee1b23273ac982d0c8885d50755608849258) fix(executor): Fixed waitMainContainerStart returning prematurely. Closes #4599 (#4601) - * [f03f99ef6](https://github.com/argoproj/argo-workflows/commit/f03f99ef69b60e91f2dc08c6729ba58d27e56d1d) refactor: Use polling model for workflow phase metric (#4557) - * [8e887e731](https://github.com/argoproj/argo-workflows/commit/8e887e7315a522998e810021d10334e860a3b307) fix(executor): Handle sidecar killing in a process-namespace-shared pod (#4575) - * [991fa6747](https://github.com/argoproj/argo-workflows/commit/991fa6747bce82bef9919384925e0a6b2f7f3668) fix(server): serve artifacts directly from disk to support large artifacts (#4589) - * [2eeb1fcef](https://github.com/argoproj/argo-workflows/commit/2eeb1fcef6896e0518c3ab1d1cd715de93fe4c41) fix(server): use the correct name when downloading artifacts (#4579) - * [d1a37d5fb](https://github.com/argoproj/argo-workflows/commit/d1a37d5fbabc1f3c90b15a266858d207275e31ab) feat(controller): Retry transient offload errors. Resolves #4464 (#4482) - -### Contributors - - * Alex Collins - * Daisuke Taniwaki - * Simon Behar - * Yuan Tang - * dherman - * fsiegmund - * zhengchenyu - -## v2.12.0-rc3 (2020-11-23) - - * [85cafe6e8](https://github.com/argoproj/argo-workflows/commit/85cafe6e882f9a49e402c29d14e04ded348b07b2) Update manifests to v2.12.0-rc3 - * [916b4549b](https://github.com/argoproj/argo-workflows/commit/916b4549b9b4e2a74902aea16cfc04996dccb263) feat(ui): Add Template/Cron workflow filter to workflow page. Closes #4532 (#4543) - * [48af02445](https://github.com/argoproj/argo-workflows/commit/48af024450f6a395ca887073343d3296d69d836a) fix: executor/pns containerid prefix fix (#4555) - * [53195ed56](https://github.com/argoproj/argo-workflows/commit/53195ed56029c639856a395ed5c92db82d49a2d9) fix: Respect continueOn for leaf tasks (#4455) - * [7e121509c](https://github.com/argoproj/argo-workflows/commit/7e121509c6745dc7f6fa40cc35790012521f1f12) fix(controller): Correct default port logic (#4547) - * [a712e535b](https://github.com/argoproj/argo-workflows/commit/a712e535bec3b196219188236d4063ecc1153ba4) fix: Validate metric key names (#4540) - * [c469b053f](https://github.com/argoproj/argo-workflows/commit/c469b053f8ca27ca03d36343fa17277ad374edc9) fix: Missing arg lines caused files not to copy into containers (#4542) - * [0980ead36](https://github.com/argoproj/argo-workflows/commit/0980ead36d39620c914e04e2aa207e688a631e9a) fix(test): fix TestWFDefaultWithWFTAndWf flakiness (#4538) - * [564e69f3f](https://github.com/argoproj/argo-workflows/commit/564e69f3fdef6239f9091401ec4472bd8bd248bd) fix(ui): Do not auto-reload doc.location. Fixes #4530 (#4535) - * [176d890c1](https://github.com/argoproj/argo-workflows/commit/176d890c1cac25856f67fbed4cc39a396aa87a93) fix(controller): support float for param value (#4490) - * [4bacbc121](https://github.com/argoproj/argo-workflows/commit/4bacbc121ae028557b7f0718f02fbb25e8e63850) feat(controller): make sso timeout configurable via cm (#4494) - * [02e1f0e0d](https://github.com/argoproj/argo-workflows/commit/02e1f0e0d1c8ad8422984000bb2b49dc3709b1a0) fix(server): Add `list sa` and `create secret` to `argo-server` roles. Closes #4526 (#4514) - * [d0082e8fb](https://github.com/argoproj/argo-workflows/commit/d0082e8fb87fb731c7247f28da0c1b29b6fa3f02) fix: link templates not replacing multiple templates with same name (#4516) - * [411bde37c](https://github.com/argoproj/argo-workflows/commit/411bde37c2b146c1fb52d913bf5629a36e0a5af1) feat: adds millisecond-level timestamps to argo and workflow-controller (#4518) - * [2c54ca3fb](https://github.com/argoproj/argo-workflows/commit/2c54ca3fbee675815566508fc10c137e7b4f9f2f) add bonprix to argo users (#4520) - -### Contributors - - * Alex Collins - * Alexander Mikhailian - * Arghya Sadhu - * Boolman - * David Gibbons - * Lennart Kindermann - * Ludovic Cléroux - * Oleg Borodai - * Saravanan Balasubramanian - * Simon Behar - * tczhao - -## v2.12.0-rc2 (2020-11-12) - - * [f509fa550](https://github.com/argoproj/argo-workflows/commit/f509fa550b0694907bb9447084df11af171f9cc9) Update manifests to v2.12.0-rc2 - * [2dab2d158](https://github.com/argoproj/argo-workflows/commit/2dab2d15868c5f52ca4e3f7ba1c5276d55c26a42) fix(test): fix TestWFDefaultWithWFTAndWf flakiness (#4507) - * [64ae33034](https://github.com/argoproj/argo-workflows/commit/64ae33034d30a943dca71b0c5e4ebd97018448bf) fix(controller): prepend script path to the script template args. Resolves #4481 (#4492) - * [0931baf5f](https://github.com/argoproj/argo-workflows/commit/0931baf5fbe48487278b9a6c2fa206ab02406e5b) feat: Redirect to requested URL after SSO login (#4495) - * [465447c03](https://github.com/argoproj/argo-workflows/commit/465447c039a430f675a2c0cc10e71e7024fc79a3) fix: Ensure ContainerStatus in PNS is terminated before continuing (#4469) - * [f7287687b](https://github.com/argoproj/argo-workflows/commit/f7287687b61c7e2d8e27864e9768c216a53fd071) fix(ui): Check node children before counting them. (#4498) - * [bfc13c3f5](https://github.com/argoproj/argo-workflows/commit/bfc13c3f5b9abe2980826dee1283433b7cb22385) fix: Ensure redirect to login when using empty auth token (#4496) - * [d56ce890c](https://github.com/argoproj/argo-workflows/commit/d56ce890c900c300bd396c5050cea9fb2b4aa358) feat(cli): add selector and field-selector option to terminate (#4448) - * [e501fcca1](https://github.com/argoproj/argo-workflows/commit/e501fcca16a908781a786b93417cc41644b62ea4) fix(controller): Refactor the Merge Workflow, WorkflowTemplate and WorkflowDefaults (#4354) - * [2ee3f5a71](https://github.com/argoproj/argo-workflows/commit/2ee3f5a71f4791635192d7cd4e1b583d80e81077) fix(ui): fix the `all` option in the workflow archive list (#4486) - -### Contributors - - * Noah Hanjun Lee - * Saravanan Balasubramanian - * Simon Behar - * Vlad Losev - * dherman - * ivancili - -## v2.12.0-rc1 (2020-11-06) - - * [98be709d8](https://github.com/argoproj/argo-workflows/commit/98be709d88647a10231825f13aff03d08217a35a) Update manifests to v2.12.0-rc1 - * [a441a97bd](https://github.com/argoproj/argo-workflows/commit/a441a97bd53a92b8cc5fb918edd1f66701d1cf5c) refactor(server): Use patch instead of update to resume/suspend (#4468) - * [9ecf04991](https://github.com/argoproj/argo-workflows/commit/9ecf0499195b05bac1bb9fe6268c7d77bc12a963) fix(controller): When semaphore lock config gets updated, enqueue the waiting workflows (#4421) - * [c31d1722e](https://github.com/argoproj/argo-workflows/commit/c31d1722e6e5f800a62b30e9773c5e6049c243f5) feat(cli): Support ARGO_HTTP1 for HTTP/1 CLI requests. Fixes #4394 (#4416) - * [b8fb2a8b3](https://github.com/argoproj/argo-workflows/commit/b8fb2a8b3b7577d46e25c55829310df2f72fb335) chore(docs): Fix docgen (#4459) - * [6c5ab7804](https://github.com/argoproj/argo-workflows/commit/6c5ab7804d708981e250f1af6b8cb4e78c2291a7) feat: Add the --no-utf8 parameter to `argo get` command (#4449) - * [933a4db0c](https://github.com/argoproj/argo-workflows/commit/933a4db0cfdc3b39309b83dcc8105e4424df4775) refactor: Simplify grpcutil.TranslateError (#4465) - * [d752e2fa4](https://github.com/argoproj/argo-workflows/commit/d752e2fa4fd69204e2c5989c8adceeb19963f2d4) feat: Add resume/suspend endpoints for CronWorkflows (#4457) - * [42d060500](https://github.com/argoproj/argo-workflows/commit/42d060500a04fce181b09cb7f1cec108a9b8b522) fix: localhost not being resolved. Resolves #4460, #3564 (#4461) - * [59843e1fa](https://github.com/argoproj/argo-workflows/commit/59843e1faa91ab30e06e550d1df8e81adfcdac71) fix(controller): Trigger no of workflows based on available lock (#4413) - * [1be03db7e](https://github.com/argoproj/argo-workflows/commit/1be03db7e7604fabbbfce58eb45776d583d9bdf1) fix: Return copy of stored templates to ensure they are not modified (#4452) - * [854883bde](https://github.com/argoproj/argo-workflows/commit/854883bdebd6ea07937a2860d8f3287c9a079709) fix(controller): Fix throttler. Fixes #1554 and #4081 (#4132) - * [b956bc1ac](https://github.com/argoproj/argo-workflows/commit/b956bc1acd141f73b2f3182c10efcc68fbf55e74) chore(controller): Refactor and tidy up (#4453) - * [3e451114d](https://github.com/argoproj/argo-workflows/commit/3e451114d58bc0c5a210dda15a4b264aeed635a6) fix(docs): timezone DST note on Cronworkflow (#4429) - * [f4f68a746](https://github.com/argoproj/argo-workflows/commit/f4f68a746b7d0c5e2e71f99d69307b86d03b69c1) fix: Resolve inconsistent CronWorkflow persistence (#4440) - * [da93545f6](https://github.com/argoproj/argo-workflows/commit/da93545f687bfb3235d79ba31f6651da9b77ff66) feat(server): Add WorkflowLogs API. See #4394 (#4450) - * [3960a0ee5](https://github.com/argoproj/argo-workflows/commit/3960a0ee5daecfbde241d0a46b0179c88bad6b61) fix: Fix validation with Argo Variable in activeDeadlineSeconds (#4451) - * [dedf0521e](https://github.com/argoproj/argo-workflows/commit/dedf0521e8e799051cd3cde8c29ee419bb4a68f9) feat(ui): Visualisation of the suspended CronWorkflows in the list. Fixes #4264 (#4446) - * [0d13f40d6](https://github.com/argoproj/argo-workflows/commit/0d13f40d673ca5da6ba6066776d8d01d297671c0) fix(controller): Tolerate int64 parameters. Fixes #4359 (#4401) - * [2628be91e](https://github.com/argoproj/argo-workflows/commit/2628be91e4a19404c66c7d16b8fbc02b475b6399) fix(server): Only try to use auth-mode if enabled. Fixes #4400 (#4412) - * [7f2ff80f1](https://github.com/argoproj/argo-workflows/commit/7f2ff80f130b3cd5834b4c49ab6c1692dd93a76c) fix: Assume controller is in UTC when calculating NextScheduledRuntime (#4417) - * [45fbc951f](https://github.com/argoproj/argo-workflows/commit/45fbc951f51eee34151d51aa1cea3426efa1595f) fix(controller): Design-out event errors. Fixes #4364 (#4383) - * [5a18c674b](https://github.com/argoproj/argo-workflows/commit/5a18c674b43d304165efc16ca92635971bb21074) fix(docs): update link to container spec (#4424) - * [8006da129](https://github.com/argoproj/argo-workflows/commit/8006da129122a4e0046e0d016924d73af88be398) fix: Add x-frame config option (#4420) - * [462e55e97](https://github.com/argoproj/argo-workflows/commit/462e55e97467330f30248b1f9d1dd12e2ee93fa3) fix: Ensure resourceDuration variables in metrics are always in seconds (#4411) - * [3aeb1741e](https://github.com/argoproj/argo-workflows/commit/3aeb1741e720a7e7e005321451b2701f263ed85a) fix(executor): artifact chmod should only if err != nil (#4409) - * [2821e4e8f](https://github.com/argoproj/argo-workflows/commit/2821e4e8fe27d744256b1621a81ac4ce9d1da68c) fix: Use correct template when processing metrics (#4399) - * [e8f826147](https://github.com/argoproj/argo-workflows/commit/e8f826147cebc1a04ced90044689319f8e8c9a14) fix(validate): Local parameters should be validated locally. Fixes #4326 (#4358) - * [ddd45b6e8](https://github.com/argoproj/argo-workflows/commit/ddd45b6e8a2754e872a9a36a037d0288d617e9e3) fix(ui): Reconnect to DAG. Fixes #4301 (#4378) - * [252c46335](https://github.com/argoproj/argo-workflows/commit/252c46335f544617d675e733fe417729b37846e0) feat(ui): Sign-post examples and the catalog. Fixes #4360 (#4382) - * [334d1340f](https://github.com/argoproj/argo-workflows/commit/334d1340f32d927fa119bdebd1318977f7a3b159) feat(server): Enable RBAC for SSO. Closes #3525 (#4198) - * [e409164ba](https://github.com/argoproj/argo-workflows/commit/e409164ba37ae0b75ee995d206498b1c750b486e) fix(ui): correct log viewer only showing first log line (#4389) - * [28bdb6fff](https://github.com/argoproj/argo-workflows/commit/28bdb6ffff8308677af6d8ccf7b0ea70b53bb2fd) fix(ui): Ignore running workflows in report. Fixes #4387 (#4397) - * [7ace8f85f](https://github.com/argoproj/argo-workflows/commit/7ace8f85f1cb9cf716a30a53da2a78c07d3e13fc) fix(controller): Fix estimation bug. Fixes #4386 (#4396) - * [bdac65b09](https://github.com/argoproj/argo-workflows/commit/bdac65b09750ee0afe7bd3697792d9e4b3a10255) fix(ui): correct typing errors in workflow-drawer (#4373) - * [db5e28ed2](https://github.com/argoproj/argo-workflows/commit/db5e28ed26f4c35e0c429907c930cd098717c32e) fix: Use DeletionHandlingMetaNamespaceKeyFunc in cron controller (#4379) - * [99d33eed5](https://github.com/argoproj/argo-workflows/commit/99d33eed5b953952762dbfed4f44384bcbd46e8b) fix(server): Download artifacts from UI. Fixes #4338 (#4350) - * [db8a6d0b5](https://github.com/argoproj/argo-workflows/commit/db8a6d0b5a13259b6705b222e28dab1d0f999dc7) fix(controller): Enqueue the front workflow if semaphore lock is available (#4380) - * [933ba8340](https://github.com/argoproj/argo-workflows/commit/933ba83407b9e33e5d6e16660d28c33782d122df) fix: Fix intstr nil dereference (#4376) - * [220ac736c](https://github.com/argoproj/argo-workflows/commit/220ac736c1297c566667d3fb621a9dadea955c76) fix(controller): Only warn if cron job missing. Fixes #4351 (#4352) - * [dbbe95ccc](https://github.com/argoproj/argo-workflows/commit/dbbe95ccca01d985c5fbb81a2329f0bdb7fa5b1d) Use '[[:blank:]]' instead of ' ' to match spaces/tabs (#4361) - * [b03bd12a4](https://github.com/argoproj/argo-workflows/commit/b03bd12a463e3375bdd620c4fda85846597cdad4) fix: Do not allow tasks using 'depends' to begin with a digit (#4218) - * [b76246e28](https://github.com/argoproj/argo-workflows/commit/b76246e2894def70f4ad6902d05e64e3db0224ac) fix(executor): Increase pod patch backoff. Fixes #4339 (#4340) - * [ec671ddce](https://github.com/argoproj/argo-workflows/commit/ec671ddceb1c8d18fa0410e22106659a1572683c) feat(executor): Wait for termination using pod watch for PNS and K8SAPI executors. (#4253) - * [3156559b4](https://github.com/argoproj/argo-workflows/commit/3156559b40afe4248a3fd124a9611992e7459930) fix: ui/package.json & ui/yarn.lock to reduce vulnerabilities (#4342) - * [f5e23f79d](https://github.com/argoproj/argo-workflows/commit/f5e23f79da253d3b29f718b71251ece464fd88f2) refactor: De-couple config (#4307) - * [37a2ae06e](https://github.com/argoproj/argo-workflows/commit/37a2ae06e05ec5698c902f76dc231cf839ac2041) fix(ui): correct typing errors in events-panel (#4334) - * [03ef9d615](https://github.com/argoproj/argo-workflows/commit/03ef9d615bac1b38309189e77b38235aaa7f5713) fix(ui): correct typing errors in workflows-toolbar (#4333) - * [4de64c618](https://github.com/argoproj/argo-workflows/commit/4de64c618dea85334c0fa04a4dbc310629335c47) fix(ui): correct typing errors in cron-workflow-details (#4332) - * [939d8c301](https://github.com/argoproj/argo-workflows/commit/939d8c30153b4f7d82da9b2df13aa235d3118070) feat(controller): add enum support in parameters (fixes #4192) (#4314) - * [e14f4f922](https://github.com/argoproj/argo-workflows/commit/e14f4f922ff158b1fa1e0592fc072474e3257bd9) fix(executor): Fix the artifacts option in k8sapi and PNS executor Fixes#4244 (#4279) - * [ea9db4362](https://github.com/argoproj/argo-workflows/commit/ea9db43622c6b035b5cf800bb4cb112fcace7eac) fix(cli): Return exit code on Argo template lint command (#4292) - * [aa4a435b4](https://github.com/argoproj/argo-workflows/commit/aa4a435b4892f7881f4eeeb03d3d8e24ee4695ef) fix(cli): Fix panic on argo template lint without argument (#4300) - * [20b3b1baf](https://github.com/argoproj/argo-workflows/commit/20b3b1baf7c06d288134e638e6107339f9c4ec3a) fix: merge artifact arguments from workflow template. Fixes #4296 (#4316) - * [3c63c3c40](https://github.com/argoproj/argo-workflows/commit/3c63c3c407c13a3cbf5089c0a00d029b7da85706) chore(controller): Refactor the CronWorkflow schedule logic with sync.Map (#4320) - * [40648bcfe](https://github.com/argoproj/argo-workflows/commit/40648bcfe98828796edcac73548d681ffe9f0853) Update USERS.md (#4322) - * [07b2ef62f](https://github.com/argoproj/argo-workflows/commit/07b2ef62f44f94d90a2fff79c47f015ceae40b8d) fix(executor): Retrieve containerId from cri-containerd /proc/{pid}/cgroup. Fixes #4302 (#4309) - * [e6b024900](https://github.com/argoproj/argo-workflows/commit/e6b02490042065990f1f0053d0be0abb89c90d5e) feat(controller): Allow whitespace in variable substitution. Fixes #4286 (#4310) - * [9119682b0](https://github.com/argoproj/argo-workflows/commit/9119682b016e95b8ae766bf7d2688b981a267736) fix(build): Some minor Makefile fixes (#4311) - * [db20b4f2c](https://github.com/argoproj/argo-workflows/commit/db20b4f2c7ecf4388f70a5e422dc19fc78c4e753) feat(ui): Submit resources without namespace to current namespace. Fixes #4293 (#4298) - * [26f39b6d1](https://github.com/argoproj/argo-workflows/commit/26f39b6d1aff8bee60826dde5b7e58d09e38d1ee) fix(ci): add non-root user to Dockerfile (#4305) - * [1cc68d893](https://github.com/argoproj/argo-workflows/commit/1cc68d8939a7e144a798687f6d8b8ecc8c0f4195) fix(ui): undefined namespace in constructors (#4303) - * [e54bf815d](https://github.com/argoproj/argo-workflows/commit/e54bf815d6494aa8c466eea6caec6165249a3003) fix(controller): Patch rather than update cron workflows. (#4294) - * [9157ef2ad](https://github.com/argoproj/argo-workflows/commit/9157ef2ad60920866ca029711f4a7cb5705771d0) fix: TestMutexInDAG failure in master (#4283) - * [2d6f4e66f](https://github.com/argoproj/argo-workflows/commit/2d6f4e66fd8ad8d0535afc9a328fc090a5700c30) fix: WorkflowEventBinding typo in aggregated roles (#4287) - * [c02bb7f0b](https://github.com/argoproj/argo-workflows/commit/c02bb7f0bb50e18cdf95f2bbd2305be6d065d006) fix(controller): Fix argo retry with PVCs. Fixes #4275 (#4277) - * [c0423a223](https://github.com/argoproj/argo-workflows/commit/c0423a2238399f5db9e39618c93c8212e359831c) fix(ui): Ignore missing nodes in DAG. Fixes #4232 (#4280) - * [58144290d](https://github.com/argoproj/argo-workflows/commit/58144290d78e038fbcb7dbbdd6db291ff0a6aa86) fix(controller): Fix cron-workflow re-apply error. (#4278) - * [c605c6d73](https://github.com/argoproj/argo-workflows/commit/c605c6d73452b8dff899c0ff1b166c726181dd9f) fix(controller): Synchronization lock didn't release on DAG call flow Fixes #4046 (#4263) - * [3cefc1471](https://github.com/argoproj/argo-workflows/commit/3cefc1471f62f148221713ad80660c50f224ff92) feat(ui): Add a nudge for users who have not set their security context. Closes #4233 (#4255) - * [a461b076b](https://github.com/argoproj/argo-workflows/commit/a461b076bc044c6cca04744be4c692e2edd44eb2) feat(cli): add `--field-selector` option for `delete` command (#4274) - * [d7fac63e1](https://github.com/argoproj/argo-workflows/commit/d7fac63e12518e43174584fdc984d3163c55dc24) chore(controller): N/W progress fixes (#4269) - * [4c4234537](https://github.com/argoproj/argo-workflows/commit/4c42345374346c07852d3ea57d481832ebb42154) feat(controller): Track N/M progress. See #2717 (#4194) - * [afbb957a8](https://github.com/argoproj/argo-workflows/commit/afbb957a890fc1c2774a54b83887e586558e5a87) fix: Add WorkflowEventBinding to aggregated roles (#4268) - * [6ce6bf499](https://github.com/argoproj/argo-workflows/commit/6ce6bf499a3a68b95eb9de3ef3748e34e4da022f) fix(controller): Make the delay before the first workflow reconciliation configurable. Fixes #4107 (#4224) - * [42b797b8a](https://github.com/argoproj/argo-workflows/commit/42b797b8a47923cab3d36b813727b22e4d239cce) chore(api): Update swagger.json with Kubernetes v1.17.5 types. Closes #4204 (#4226) - * [346292b1b](https://github.com/argoproj/argo-workflows/commit/346292b1b0152d5bfdc0387a8b2c11b5d6d5bac1) feat(controller): Reduce reconcilliation time by exiting earlier. (#4225) - * [407ac3498](https://github.com/argoproj/argo-workflows/commit/407ac3498846d8879d785e985b88695dbf693f43) fix(ui): Revert bad part of commit (#4248) - * [eaae2309d](https://github.com/argoproj/argo-workflows/commit/eaae2309dcd89435c657d8e647968f0f1e13bcae) fix(ui): Fix bugs with DAG view. Fixes #4232 & #4236 (#4241) - * [04f7488ab](https://github.com/argoproj/argo-workflows/commit/04f7488abea14544880ac7957d873963b13112cc) feat(ui): Adds a report page which shows basic historical workflow metrics. Closes #3557 (#3558) - * [a545a53f6](https://github.com/argoproj/argo-workflows/commit/a545a53f6e1d03f9b016c8032c05a377a79bfbcc) fix(controller): Check the correct object for Cronworkflow reapply error log (#4243) - * [ec7a5a402](https://github.com/argoproj/argo-workflows/commit/ec7a5a40227979703c7e9a39a8419be6270e4805) fix(Makefile): removed deprecated k3d cmds. Fixes #4206 (#4228) - * [1706a3954](https://github.com/argoproj/argo-workflows/commit/1706a3954a7ec0aad2ff3f5c7ba47e010b87d207) fix: Increase deafult number of CronWorkflow workers (#4215) - * [50f231819](https://github.com/argoproj/argo-workflows/commit/50f23181911998c13096dd15980380e1ecaeaa2d) feat(cli): Print 'no resource msg' when `argo list` returns zero workflows (#4166) - * [2143a5019](https://github.com/argoproj/argo-workflows/commit/2143a5019df31b7d2d6ccb86b81ac70b98714827) fix(controller): Support workflowDefaults on TTLController for WorkflowTemplateRef Fixes #4188 (#4195) - * [cac10f130](https://github.com/argoproj/argo-workflows/commit/cac10f1306ae6f28eee4b2485f802b7512920474) fix(controller): Support int64 for param value. Fixes #4169 (#4202) - * [e910b7015](https://github.com/argoproj/argo-workflows/commit/e910b70159f6f92ef3dacf6382b42b430e15a388) feat: Controller/server runAsNonRoot. Closes #1824 (#4184) - * [4bd5fe10a](https://github.com/argoproj/argo-workflows/commit/4bd5fe10a2ef4f36acd5be7523f72bdbdb7e150c) fix(controller): Apply Workflow default on normal workflow scenario Fixes #4208 (#4213) - * [f9b65c523](https://github.com/argoproj/argo-workflows/commit/f9b65c52321d6e49d7fbc78f69d18e7d1ee442ad) chore(build): Update `make codegen` to only run on changes (#4205) - * [0879067a4](https://github.com/argoproj/argo-workflows/commit/0879067a48d7b1d667c827d064a9aa00a3595a6e) chore(build): re-add #4127 and steps to verify image pull (#4219) - * [b17b569ea](https://github.com/argoproj/argo-workflows/commit/b17b569eae0b518a649790daf9e4af87b900a91e) fix(controller): reduce withItem/withParams memory usage. Fixes #3907 (#4207) - * [524049f01](https://github.com/argoproj/argo-workflows/commit/524049f01b00d1fb04f169860217553869b79b53) fix: Revert "chore: try out pre-pushing linux/amd64 images and updating ma… Fixes #4216 (#4217) - * [9c08433f3](https://github.com/argoproj/argo-workflows/commit/9c08433f37dde41fbe7dbae32e97c4b3f70e8081) feat(executor): Decompress zip file input artifacts. Fixes #3585 (#4068) - * [14650339d](https://github.com/argoproj/argo-workflows/commit/14650339df95916d7a676354289d4dfac1ea7776) fix(executor): Update executor retry config for ExponentialBackoff. (#4196) - * [2b127625a](https://github.com/argoproj/argo-workflows/commit/2b127625a837e6225b9b803523e02b617df9cb20) fix(executor): Remove IsTransientErr check for ExponentialBackoff. Fixes #4144 (#4149) - * [f7e85f04b](https://github.com/argoproj/argo-workflows/commit/f7e85f04b11fd65e45b9408d5413be3bbb95e5cb) feat(server): Make Argo Server issue own JWE for SSO. Fixes #4027 & #3873 (#4095) - * [951d38f8e](https://github.com/argoproj/argo-workflows/commit/951d38f8eb19460268d9640dce8f94d3287ff6e2) refactor: Refactor Synchronization code (#4114) - * [9319c074e](https://github.com/argoproj/argo-workflows/commit/9319c074e742c5d9cb97d6c5bbbf076afe886f76) fix(ui): handle logging disconnects gracefully (#4150) - * [6265c7091](https://github.com/argoproj/argo-workflows/commit/6265c70915de42e4eb5c472379743a44d283e463) fix: Ensure CronWorkflows are persisted once per operation (#4172) - * [2a992aee7](https://github.com/argoproj/argo-workflows/commit/2a992aee733aaa73bb43ab1c4ff3b7919ee8b640) fix: Provide helpful hint when creating workflow with existing name (#4156) - * [de3a90dd1](https://github.com/argoproj/argo-workflows/commit/de3a90dd155023ede63a537c113ac0e58e6c6c73) refactor: upgrade argo-ui library version (#4178) - * [b7523369b](https://github.com/argoproj/argo-workflows/commit/b7523369bb6d278c504d1e90cd96d1dbe7f8f6d6) feat(controller): Estimate workflow & node duration. Closes #2717 (#4091) - * [c468b34d1](https://github.com/argoproj/argo-workflows/commit/c468b34d1b7b26d36d2f7a365e71635d1d6cb0db) fix(controller): Correct unstructured API version. Caused by #3719 (#4148) - * [de81242ec](https://github.com/argoproj/argo-workflows/commit/de81242ec681003d65b84862f6584d075889f523) fix: Render full tree of onExit nodes in UI (#4109) - * [109876e62](https://github.com/argoproj/argo-workflows/commit/109876e62f239397accbd451bb1b52a775998f36) fix: Changing DeletePropagation to background in TTL Controller and Argo CLI (#4133) - * [1e10e0ccb](https://github.com/argoproj/argo-workflows/commit/1e10e0ccbf366fa9052ad720373dc11a4d2cb671) Documentation (#4122) - * [b3682d4f1](https://github.com/argoproj/argo-workflows/commit/b3682d4f117cecf1fe6d2a54c281870f15e201a1) fix(cli): add validate args in delete command (#4142) - * [373543d11](https://github.com/argoproj/argo-workflows/commit/373543d114bfba727ef60645c3d9cb05e671808c) feat(controller): Sum resources duration for DAGs and steps (#4089) - * [4829e9abd](https://github.com/argoproj/argo-workflows/commit/4829e9abd7f58e6332527830b0892222f901c8bd) feat: Add MaxAge to memoization (#4060) - * [af53a4b00](https://github.com/argoproj/argo-workflows/commit/af53a4b008055d24c52dffa0b9483beb14de1ecb) fix(docs): Update k3d command for running argo locally (#4139) - * [554d66168](https://github.com/argoproj/argo-workflows/commit/554d66168fc3aaa34f982c181bfdc0d499befb27) fix(ui): Ignore referenced nodes that don't exist in UI. Fixes #4079 (#4099) - * [e8b79921e](https://github.com/argoproj/argo-workflows/commit/e8b79921e777e0262b7cdfa80795e1f1ff580d1b) fix(executor): race condition in docker kill (#4097) - * [3bb0c2a17](https://github.com/argoproj/argo-workflows/commit/3bb0c2a17cabdd1e5b1d736531ef801a930790f9) feat(artifacts): Allow HTTP artifact load to set request headers (#4010) - * [63b413754](https://github.com/argoproj/argo-workflows/commit/63b41375484502fe96cc9e66d99a3f96304b8e27) fix(cli): Add retry to retry, again. Fixes #4101 (#4118) - * [76cbfa9de](https://github.com/argoproj/argo-workflows/commit/76cbfa9defa7da45a363304c9a7acba839fcf64a) fix(ui): Show "waiting" msg while waiting for pod logs. Fixes #3916 (#4119) - * [196c5eed7](https://github.com/argoproj/argo-workflows/commit/196c5eed7b604f6bac14c59450624706cbee3228) fix(controller): Process workflows at least once every 20m (#4103) - * [4825b7ec7](https://github.com/argoproj/argo-workflows/commit/4825b7ec766bd32004354be0233b92b07d8afdfb) fix(server): argo-server-role to allow submitting cronworkflows from UI (#4112) - * [29aba3d10](https://github.com/argoproj/argo-workflows/commit/29aba3d1007e47805aa51b820a0007ebdeb228ca) fix(controller): Treat annotation and conditions changes as significant (#4104) - * [befcbbcee](https://github.com/argoproj/argo-workflows/commit/befcbbcee77edb6438fea575be052bd8e063fd22) feat(ui): Improve error recovery. Close #4087 (#4094) - * [5cb99a434](https://github.com/argoproj/argo-workflows/commit/5cb99a434ccfe167110bae618a2c882b59b2bb5b) fix(ui): No longer redirect to `undefined` namespace. See #4084 (#4115) - * [fafc5a904](https://github.com/argoproj/argo-workflows/commit/fafc5a904d2e2eff15bb1b3e8c4ae3963f522fa8) fix(cli): Reinstate --gloglevel flag. Fixes #4093 (#4100) - * [c4d910233](https://github.com/argoproj/argo-workflows/commit/c4d910233c01c659799a916a33b1052fbd5eafe6) fix(cli): Add retry to retry ;). Fixes #4101 (#4105) - * [6b350b095](https://github.com/argoproj/argo-workflows/commit/6b350b09519d705d28252f14c5935016c42a507c) fix(controller): Correct the order merging the fields in WorkflowTemplateRef scenario. Fixes #4044 (#4063) - * [764b56bac](https://github.com/argoproj/argo-workflows/commit/764b56baccb1bb4c12b520f815d1e78b2e037373) fix(executor): windows output artifacts. Fixes #4082 (#4083) - * [7c92b3a5b](https://github.com/argoproj/argo-workflows/commit/7c92b3a5b743b0755862c3eeabbc3d7fcdf3a7d1) fix(server): Optional timestamp inclusion when retrieving workflow logs. Closes #4033 (#4075) - * [1bf651b51](https://github.com/argoproj/argo-workflows/commit/1bf651b51136d3999c8d88cbfa37ac5d0033a709) feat(controller): Write back workflow to informer to prevent conflict errors. Fixes #3719 (#4025) - * [fdf0b056f](https://github.com/argoproj/argo-workflows/commit/fdf0b056fc18d9494e5924dc7f189bc7a93ad23a) feat(controller): Workflow-level `retryStrategy`/resubmit pending pods by default. Closes #3918 (#3965) - * [d7a297c07](https://github.com/argoproj/argo-workflows/commit/d7a297c07e61be5f51c329b4d0bbafe7a816886f) feat(controller): Use pod informer for performance. (#4024) - * [d8d0ecbb5](https://github.com/argoproj/argo-workflows/commit/d8d0ecbb52eefea8df4bf100ca15ccc79de4aa46) fix(ui): [Snyk] Fix for 1 vulnerabilities (#4031) - * [ed59408fe](https://github.com/argoproj/argo-workflows/commit/ed59408fe3ff0d01a066d6e6d17b1491945e7c26) fix: Improve better handling on Pod deletion scenario (#4064) - * [e2f4966bc](https://github.com/argoproj/argo-workflows/commit/e2f4966bc018f98e84d3dd0c99fb3c0f1be0cd98) fix: make cross-plattform compatible filepaths/keys (#4040) - * [5461d5418](https://github.com/argoproj/argo-workflows/commit/5461d5418928a74d0df223916c69be72e1d23618) feat(controller): Retry archiving later on error. Fixes #3786 (#3862) - * [4e0852261](https://github.com/argoproj/argo-workflows/commit/4e08522615ea248ba0b9563c084ae30c387c1c4a) fix: Fix unintended inf recursion (#4067) - * [f1083f39a](https://github.com/argoproj/argo-workflows/commit/f1083f39a4fc8ffc84b700b3be8c45b041e34756) fix: Tolerate malformed workflows when retrying (#4028) - * [a07539514](https://github.com/argoproj/argo-workflows/commit/a07539514ec6d1dea861c79a0f3c5ca5bb0fe55f) chore(executor): upgrade `kubectl` to 1.18.8. Closes #3996 (#3999) (#3999) - * [fc77beec3](https://github.com/argoproj/argo-workflows/commit/fc77beec37e5b958450c4e05049b031159c53751) fix(ui): Tiny modal DAG tweaks. Fixes #4039 (#4043) - * [74da06721](https://github.com/argoproj/argo-workflows/commit/74da06721b5194f649c2d4bb629215552d01a653) docs(Windows): Add more information on artifacts and limitations (#4032) - * [ef0ce47e1](https://github.com/argoproj/argo-workflows/commit/ef0ce47e154b554f78496e442ce2137263881231) feat(controller): Support different volume GC strategies. Fixes #3095 (#3938) - * [9f1206246](https://github.com/argoproj/argo-workflows/commit/9f120624621949e3f8d20d082b8cdf7fabf499fb) fix: Don't save label filter in local storage (#4022) - * [0123c9a8b](https://github.com/argoproj/argo-workflows/commit/0123c9a8be196406d72be789e08c0dee6020954b) fix(controller): use interpolated values for mutexes and semaphores #3955 (#3957) - * [5be254425](https://github.com/argoproj/argo-workflows/commit/5be254425e3bb98850b31a2ae59f66953468d890) feat(controller): Panic or error on mis-matched resource version (#3949) - * [ae779599e](https://github.com/argoproj/argo-workflows/commit/ae779599ee0589f13a44c6ad4dd51ca7c3d452ac) fix: Delete realtime metrics of running Workflows that are deleted (#3993) - * [4557c7137](https://github.com/argoproj/argo-workflows/commit/4557c7137eb113a260cc14564a664a966dd4b8ab) fix(controller): Script Output didn't set if template has RetryStrategy (#4002) - * [a013609cd](https://github.com/argoproj/argo-workflows/commit/a013609cdd499acc9eebbf8382533b964449752f) fix(ui): Do not save undefined namespace. Fixes #4019 (#4021) - * [f8145f83d](https://github.com/argoproj/argo-workflows/commit/f8145f83dee3ad76bfbe5d3a3fdf6c1472ffd79d) fix(ui): Correctly show pod events. Fixes #4016 (#4018) - * [2d722f1ff](https://github.com/argoproj/argo-workflows/commit/2d722f1ff218cff7afcc77fb347e24f7319035a5) fix(ui): Allow you to view timeline tab. Fixes #4005 (#4006) - * [f36ad2bb2](https://github.com/argoproj/argo-workflows/commit/f36ad2bb20bbb5706463e480929c7566ba116432) fix(ui): Report errors when uploading files. Fixes #3994 (#3995) - * [b5f319190](https://github.com/argoproj/argo-workflows/commit/b5f3191901d5f7e763047fd6421d642c8edeb2b2) feat(ui): Introduce modal DAG renderer. Fixes: #3595 (#3967) - * [ad607469c](https://github.com/argoproj/argo-workflows/commit/ad607469c1f03f390e2b782d1474b53d5ac4656b) fix(controller): Revert `resubmitPendingPods` mistake. Fixes #4001 (#4004) - * [fd1465c91](https://github.com/argoproj/argo-workflows/commit/fd1465c91bf3f765a247889a2161969c80451673) fix(controller): Revert parameter value to `*string`. Fixes #3960 (#3963) - * [138793413](https://github.com/argoproj/argo-workflows/commit/1387934132252a479f441ae50273d79434305b27) fix: argo-cluster-role pvc get (#3986) - * [f09babdbb](https://github.com/argoproj/argo-workflows/commit/f09babdbb83b63f9b5867e81922209e40507286c) fix: Default PDB example typo (#3914) - * [f81b006af](https://github.com/argoproj/argo-workflows/commit/f81b006af19081f661b81e1c33ace65f67c1eb25) fix: Step and Task level timeout examples (#3997) - * [91c49c14a](https://github.com/argoproj/argo-workflows/commit/91c49c14a4600f873972af9960f6b0f55271b426) fix: Consider WorkflowTemplate metadata during validation (#3988) - * [7b1d17a00](https://github.com/argoproj/argo-workflows/commit/7b1d17a006378d8f3c2e60eb201e2add4d4b13ba) fix(server): Remove XSS vulnerability. Fixes #3942 (#3975) - * [20c518ca8](https://github.com/argoproj/argo-workflows/commit/20c518ca81d0594efb46e6cec178830ff4ddcbea) fix(controller): End DAG execution on deadline exceeded error. Fixes #3905 (#3921) - * [74a68d47c](https://github.com/argoproj/argo-workflows/commit/74a68d47cfce851ab1393ce2ac45837074001f04) feat(ui): Add `startedAt` and `finishedAt` variables to configurable links. Fixes #3898 (#3946) - * [8e89617bd](https://github.com/argoproj/argo-workflows/commit/8e89617bd651139d1dbed7034019d53b372c403e) fix: typo of argo server cli (#3984) (#3985) - * [1def65b1f](https://github.com/argoproj/argo-workflows/commit/1def65b1f129457e2be1a0db2fb33fd75a5f570b) fix: Create global scope before workflow-level realtime metrics (#3979) - * [402fc0bf6](https://github.com/argoproj/argo-workflows/commit/402fc0bf65c11fa2c6bee3b407d6696089a3387e) fix(executor): set artifact mode recursively. Fixes #3444 (#3832) - * [ff5ed7e42](https://github.com/argoproj/argo-workflows/commit/ff5ed7e42f0f583e78961f49c8580deb94eb1d69) fix(cli): Allow `argo version` without KUBECONFIG. Fixes #3943 (#3945) - * [d4210ff37](https://github.com/argoproj/argo-workflows/commit/d4210ff3735dddb9e1c5e1742069c8334aa3184a) fix(server): Adds missing webhook permissions. Fixes #3927 (#3929) - * [184884af0](https://github.com/argoproj/argo-workflows/commit/184884af007b41290e53d20a145eb294b834b60c) fix(swagger): Correct item type. Fixes #3926 (#3932) - * [97764ba92](https://github.com/argoproj/argo-workflows/commit/97764ba92d3bc1e6b42f3502aadbce5701797bfe) fix: Fix UI selection issues (#3928) - * [b4329afd8](https://github.com/argoproj/argo-workflows/commit/b4329afd8981a8db0d56df93968aac5e95ec38e4) fix: Fix children is not defined error (#3950) - * [3b16a0233](https://github.com/argoproj/argo-workflows/commit/3b16a023370c469120ab2685c61a223869c57971) chore(doc): fixed java client project link (#3947) - * [5a0c515c4](https://github.com/argoproj/argo-workflows/commit/5a0c515c45f8fbcf0811c25774c1c5f97e72286d) feat: Step and Task Level Global Timeout (#3686) - * [24c778388](https://github.com/argoproj/argo-workflows/commit/24c778388a56792e847fcc30bd92a10299451959) fix: Custom metrics are not recorded for DAG tasks Fixes #3872 (#3886) - -### Contributors - - * Alex Capras - * Alex Collins - * Alexander Matyushentsev - * Amim Knabben - * Ang Gao - * Antoine Dao - * Bailey Hayes - * Basanth Jenu H B - * Byungjin Park (BJ) - * Elvis Jakupovic - * Fischer Jemison - * Greg Roodt - * Huan-Cheng Chang - * Ids van der Molen - * Igor Stepura - * InvictusMB - * Juan C. Müller - * Justen Walker - * Lénaïc Huard - * Markus Lippert - * Matt Campbell - * Michael Weibel - * Mike Chau - * Nicwalle - * Niklas Vest - * Nirav Patel - * Noah Hanjun Lee - * Pavel Čižinský - * Pranaye Karnati - * Saravanan Balasubramanian - * Simon Behar - * Snyk bot - * Tomáš Coufal - * boundless-thread - * conanoc - * dherman - * duluong - * ivancili - * jacky - * saranyaeu2987 - * tianfeiyu - * zhengchenyu - -## v2.11.8 (2020-11-20) - - * [310e099f8](https://github.com/argoproj/argo-workflows/commit/310e099f82520030246a7c9d66f3efaadac9ade2) Update manifests to v2.11.8 - * [e8ba1ed83](https://github.com/argoproj/argo-workflows/commit/e8ba1ed8303f1e816628e0b3aa5c96710e046629) feat(controller): Make MAX_OPERATION_TIME configurable. Close #4239 (#4562) - * [66f2306bb](https://github.com/argoproj/argo-workflows/commit/66f2306bb4ddf0794f92360c35783c1941df30c8) feat(controller): Allow whitespace in variable substitution. Fixes #4286 (#4310) - -### Contributors - - * Alex Collins - * Ids van der Molen - -## v2.11.7 (2020-11-02) - - * [bf3fec176](https://github.com/argoproj/argo-workflows/commit/bf3fec176cf6bdf3e23b2cb73ec7d4e3d051ca40) Update manifests to v2.11.7 - * [0f18ab1f1](https://github.com/argoproj/argo-workflows/commit/0f18ab1f149a02f01e7f031da2b0770b569974ec) fix: Assume controller is in UTC when calculating NextScheduledRuntime (#4417) - * [6026ba5fd](https://github.com/argoproj/argo-workflows/commit/6026ba5fd1762d8e006d779d5907f10fd6c2463d) fix: Ensure resourceDuration variables in metrics are always in seconds (#4411) - * [ca5adbc05](https://github.com/argoproj/argo-workflows/commit/ca5adbc05ceb518b634dfdb7857786b247b8d39f) fix: Use correct template when processing metrics (#4399) - * [0a0255a7e](https://github.com/argoproj/argo-workflows/commit/0a0255a7e594f6ae9c80f35e05bcd2804d129428) fix(ui): Reconnect to DAG. Fixes #4301 (#4378) - * [8dd7d3ba8](https://github.com/argoproj/argo-workflows/commit/8dd7d3ba820af499d1d3cf0eb82417d5c4b0b48b) fix: Use DeletionHandlingMetaNamespaceKeyFunc in cron controller (#4379) - * [47f580089](https://github.com/argoproj/argo-workflows/commit/47f5800894767b947628cc5a8a64d3089ce9a2cb) fix(server): Download artifacts from UI. Fixes #4338 (#4350) - * [0416aba50](https://github.com/argoproj/argo-workflows/commit/0416aba50d13baabfa0f677b744a9c47ff8d8426) fix(controller): Enqueue the front workflow if semaphore lock is available (#4380) - * [a2073d58e](https://github.com/argoproj/argo-workflows/commit/a2073d58e68cf15c75b7997afb49845db6a1423f) fix: Fix intstr nil dereference (#4376) - * [89080cf8f](https://github.com/argoproj/argo-workflows/commit/89080cf8f6f904a162100d279993f4d835a27ba2) fix(controller): Only warn if cron job missing. Fixes #4351 (#4352) - * [a4186dfd7](https://github.com/argoproj/argo-workflows/commit/a4186dfd71325ec8b0f1882e17d0d4ef7f5b0f56) fix(executor): Increase pod patch backoff. Fixes #4339 (#4340) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.11.6 (2020-10-19) - - * [5eebce9af](https://github.com/argoproj/argo-workflows/commit/5eebce9af4409da9de536f189877542dd88692e0) Update manifests to v2.11.6 - * [38a4a2e35](https://github.com/argoproj/argo-workflows/commit/38a4a2e351771e7960b347c266b7d6592efe90a2) chore(controller): Refactor the CronWorkflow schedule logic with sync.Map (#4320) - * [79e7a12a0](https://github.com/argoproj/argo-workflows/commit/79e7a12a08070235fbf944d68e694d343498a49c) fix(executor): Remove IsTransientErr check for ExponentialBackoff. Fixes #4144 (#4149) - -### Contributors - - * Alex Collins - * Ang Gao - * Saravanan Balasubramanian - -## v2.11.5 (2020-10-15) - - * [076bf89c4](https://github.com/argoproj/argo-workflows/commit/076bf89c4658adbd3b96050599f81424d1b08d6e) Update manifests to v2.11.5 - * [b9d8c96b7](https://github.com/argoproj/argo-workflows/commit/b9d8c96b7d023a1d260472883f44daf57bfa41ad) fix(controller): Patch rather than update cron workflows. (#4294) - * [3d1224264](https://github.com/argoproj/argo-workflows/commit/3d1224264f6b61d62dfd598826647689391aa804) fix: TestMutexInDAG failure in master (#4283) - * [05519427d](https://github.com/argoproj/argo-workflows/commit/05519427db492bfb092c44c562c4ac7d3324989a) fix(controller): Synchronization lock didn't release on DAG call flow Fixes #4046 (#4263) - * [ff2abd632](https://github.com/argoproj/argo-workflows/commit/ff2abd63207f2aa949d31f09139650240f751c6b) fix: Increase deafult number of CronWorkflow workers (#4215) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.11.4 (2020-10-14) - - * [571bff1fe](https://github.com/argoproj/argo-workflows/commit/571bff1fe4ad7e6610ad04d9a048091b1e453c5a) Update manifests to v2.11.4 - * [05a6078d8](https://github.com/argoproj/argo-workflows/commit/05a6078d8de135525c0094a02a72b8dc0f0faa5c) fix(controller): Fix argo retry with PVCs. Fixes #4275 (#4277) - * [08216ec75](https://github.com/argoproj/argo-workflows/commit/08216ec7557b2e2b2d1cb160e74ff2623661214a) fix(ui): Ignore missing nodes in DAG. Fixes #4232 (#4280) - * [476ea70fe](https://github.com/argoproj/argo-workflows/commit/476ea70fea0a981a736ccd2f070a7f9de8bb9d13) fix(controller): Fix cron-workflow re-apply error. (#4278) - * [448ae1137](https://github.com/argoproj/argo-workflows/commit/448ae1137b3e9d34fb0b44cd8f6e7bdfa31f702f) fix(controller): Check the correct object for Cronworkflow reapply error log (#4243) - * [e3dfd7884](https://github.com/argoproj/argo-workflows/commit/e3dfd7884863a9368776dd51517553069ec0ab21) fix(ui): Revert bad part of commit (#4248) - * [249e8329c](https://github.com/argoproj/argo-workflows/commit/249e8329c64754cda691110a39d4c7c43a075413) fix(ui): Fix bugs with DAG view. Fixes #4232 & #4236 (#4241) - -### Contributors - - * Alex Collins - * Juan C. Müller - * Saravanan Balasubramanian - -## v2.11.3 (2020-10-07) - - * [a00a8f141](https://github.com/argoproj/argo-workflows/commit/a00a8f141c221f50e397aea8f86a54171441e395) Update manifests to v2.11.3 - * [e48fe222d](https://github.com/argoproj/argo-workflows/commit/e48fe222d405efc84331e8f3d9dadd8072d18325) fixed merge conflict - * [51068f72d](https://github.com/argoproj/argo-workflows/commit/51068f72d5cc014576b4977b1a651c0d5b89f925) fix(controller): Support int64 for param value. Fixes #4169 (#4202) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - -## v2.11.2 (2020-10-05) - - * [0dfeb8e56](https://github.com/argoproj/argo-workflows/commit/0dfeb8e56071e7a1332370732949bc2e15073005) Update manifests to v2.11.2 - * [461a36a15](https://github.com/argoproj/argo-workflows/commit/461a36a15ecb8c11dcb62694c0c5bd624b835bd4) fix(controller): Apply Workflow default on normal workflow scenario Fixes #4208 (#4213) - * [4b9cf5d28](https://github.com/argoproj/argo-workflows/commit/4b9cf5d28ae661873847238203b0098a2722a97a) fix(controller): reduce withItem/withParams memory usage. Fixes #3907 (#4207) - * [8fea7bf6b](https://github.com/argoproj/argo-workflows/commit/8fea7bf6b5cf0c89cf9c3bb0c3f57c1397236f5e) Revert "Revert "chore: use build matrix and cache (#4111)"" - * [efb20eea0](https://github.com/argoproj/argo-workflows/commit/efb20eea05afc919652ebf17c6456791a283d4d2) Revert "chore: use build matrix and cache (#4111)" - * [de1c9e52d](https://github.com/argoproj/argo-workflows/commit/de1c9e52d48d8f91545dcfd32f426c235d001469) refactor: Refactor Synchronization code (#4114) - * [605d0895a](https://github.com/argoproj/argo-workflows/commit/605d0895aa436d8543ad43eee179cc169b792863) fix: Ensure CronWorkflows are persisted once per operation (#4172) - * [6f738db07](https://github.com/argoproj/argo-workflows/commit/6f738db0733da6aa16f851d1dbefa235e987bcf8) Revert "chore: Update Go module to argo/v2" - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.11.1 (2020-09-29) - - * [13b51d569](https://github.com/argoproj/argo-workflows/commit/13b51d569d580ab9493e977fe2944889784d2a0a) Update manifests to v2.11.1 - * [3f88216e6](https://github.com/argoproj/argo-workflows/commit/3f88216e61e3b408083956ad848c1603145c8507) fix: Render full tree of onExit nodes in UI (#4109) - * [d6c2a57be](https://github.com/argoproj/argo-workflows/commit/d6c2a57be0b0c3cc4d46bff36cdf3e426f760b82) fix: Fix unintended inf recursion (#4067) - * [4fda60f40](https://github.com/argoproj/argo-workflows/commit/4fda60f402bbbd5d3c0cadbd886feb065f255e19) fix: Tolerate malformed workflows when retrying (#4028) - * [995d59cc5](https://github.com/argoproj/argo-workflows/commit/995d59cc52d054f92c8ac54959e8115d4117dbf2) fix: Changing DeletePropagation to background in TTL Controller and Argo CLI (#4133) - * [aaef0a284](https://github.com/argoproj/argo-workflows/commit/aaef0a2846afc0943f9bb7688d2fba6e11b49f62) fix(ui): Ignore referenced nodes that don't exist in UI. Fixes #4079 (#4099) - * [fedae45ad](https://github.com/argoproj/argo-workflows/commit/fedae45ad6e4bfe297d1078928a6deb4269ebac0) fix(controller): Process workflows at least once every 20m (#4103) - * [6de464e80](https://github.com/argoproj/argo-workflows/commit/6de464e809ecf39bfe9b12eaf28fb8e7b20a27a9) fix(server): argo-server-role to allow submitting cronworkflows from UI (#4112) - * [ce3b90e25](https://github.com/argoproj/argo-workflows/commit/ce3b90e2553d4646f8f5bc95a88e48765ad1de19) fix(controller): Treat annotation and conditions changes as significant (#4104) - * [bf98b9778](https://github.com/argoproj/argo-workflows/commit/bf98b9778b556e68ef39a4290e489819d3142d6f) fix(ui): No longer redirect to `undefined` namespace. See #4084 (#4115) - * [af60b37dc](https://github.com/argoproj/argo-workflows/commit/af60b37dc5909c70730da01e9322605ad2852283) fix(cli): Reinstate --gloglevel flag. Fixes #4093 (#4100) - * [2cd6a9677](https://github.com/argoproj/argo-workflows/commit/2cd6a9677f0665931230fbdb6c8203381d9c9b77) fix(server): Optional timestamp inclusion when retrieving workflow logs. Closes #4033 (#4075) - * [2f7c4035f](https://github.com/argoproj/argo-workflows/commit/2f7c4035fe7f16b75bf418a67778db97c836ecf0) fix(controller): Correct the order merging the fields in WorkflowTemplateRef scenario. Fixes #4044 (#4063) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - * Tomáš Coufal - * ivancili - -## v2.11.0 (2020-09-17) - - * [f8e750de5](https://github.com/argoproj/argo-workflows/commit/f8e750de5ebab6f3c494c972889b31ef24c73c9b) Update manifests to v2.11.0 - * [c06db5757](https://github.com/argoproj/argo-workflows/commit/c06db57572843b38322b301aba783685c774045b) fix(ui): Tiny modal DAG tweaks. Fixes #4039 (#4043) - -### Contributors - - * Alex Collins - -## v2.11.0-rc3 (2020-09-14) - - * [1b4cf3f1f](https://github.com/argoproj/argo-workflows/commit/1b4cf3f1f26f6abf93355a0108f5048be9677978) Update manifests to v2.11.0-rc3 - * [e2594eca9](https://github.com/argoproj/argo-workflows/commit/e2594eca965ec2ea14b07f3c1acee4b288b02789) fix: Fix children is not defined error (#3950) - * [2ed8025eb](https://github.com/argoproj/argo-workflows/commit/2ed8025eb0fbf0599c20efc1bccfedfe51c88215) fix: Fix UI selection issues (#3928) - * [8dc0e94e6](https://github.com/argoproj/argo-workflows/commit/8dc0e94e68881693b504f6f2777f937e6f3c3e42) fix: Create global scope before workflow-level realtime metrics (#3979) - * [cdeabab72](https://github.com/argoproj/argo-workflows/commit/cdeabab722fac97a326e70b956a92d4cb5d58f2c) fix(controller): Script Output didn't set if template has RetryStrategy (#4002) - * [9c83fac80](https://github.com/argoproj/argo-workflows/commit/9c83fac80594fb0abef18b0de0ff563132ee84ae) fix(ui): Do not save undefined namespace. Fixes #4019 (#4021) - * [7fd2ecb1d](https://github.com/argoproj/argo-workflows/commit/7fd2ecb1d057cbf1e1b8139c30c20eccf86611ea) fix(ui): Correctly show pod events. Fixes #4016 (#4018) - * [11242c8be](https://github.com/argoproj/argo-workflows/commit/11242c8be5c3bbaf2dbcff68198958504ea88e43) fix(ui): Allow you to view timeline tab. Fixes #4005 (#4006) - * [3770f618a](https://github.com/argoproj/argo-workflows/commit/3770f618ab073fbac6654c9edcc4b53a1e010fea) fix(ui): Report errors when uploading files. Fixes #3994 (#3995) - * [0fed28ce2](https://github.com/argoproj/argo-workflows/commit/0fed28ce26f12a42f3321afee9188e9f59acfea7) fix: Custom metrics are not recorded for DAG tasks Fixes #3872 (#3886) - * [9146636e7](https://github.com/argoproj/argo-workflows/commit/9146636e75e950149ce39df33e4fc6f7346c7282) feat(ui): Introduce modal DAG renderer. Fixes: #3595 (#3967) - * [4b7a4694c](https://github.com/argoproj/argo-workflows/commit/4b7a4694c436c724cb75e09564fcd8c87923d6d7) fix(controller): Revert `resubmitPendingPods` mistake. Fixes #4001 (#4004) - * [49752fb5f](https://github.com/argoproj/argo-workflows/commit/49752fb5f9aa6ab151f311bb62faa021b2ebffa5) fix(controller): Revert parameter value to `*string`. Fixes #3960 (#3963) - * [ddf850b1b](https://github.com/argoproj/argo-workflows/commit/ddf850b1bd99a8343b5e94e7d3634912031e8d44) fix: Consider WorkflowTemplate metadata during validation (#3988) - * [a8ba447e3](https://github.com/argoproj/argo-workflows/commit/a8ba447e3ed4fff3d90cd772fc551db8c225a1c0) fix(server): Remove XSS vulnerability. Fixes #3942 (#3975) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.11.0-rc2 (2020-09-09) - - * [f930c0296](https://github.com/argoproj/argo-workflows/commit/f930c0296c41c8723a6f826260a098bb0647efce) Update manifests to v2.11.0-rc2 - * [b6890adb1](https://github.com/argoproj/argo-workflows/commit/b6890adb1b5c40ddb4b1aa41c39c337f0f08df12) fix(cli): Allow `argo version` without KUBECONFIG. Fixes #3943 (#3945) - * [354733e72](https://github.com/argoproj/argo-workflows/commit/354733e72f8b50645d4818236a5842c258d5627c) fix(swagger): Correct item type. Fixes #3926 (#3932) - * [1e461766f](https://github.com/argoproj/argo-workflows/commit/1e461766f2e7214c5723d15c724a77d14e908340) fix(server): Adds missing webhook permissions. Fixes #3927 (#3929) - * [884861926](https://github.com/argoproj/argo-workflows/commit/8848619262850a9f1c44d08084300a445a0c0ffb) feat: Step and Task Level Global Timeout (#3686) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - -## v2.11.0-rc1 (2020-09-01) - - * [f446f735b](https://github.com/argoproj/argo-workflows/commit/f446f735b4c8c16c95f1306ad3453af7f8ed0108) Update manifests to v2.11.0-rc1 - * [de2185c81](https://github.com/argoproj/argo-workflows/commit/de2185c81ae54736177e0476acae42b8e2dc0af5) feat(controller): Set retry factor to 2. Closes #3911 (#3919) - * [be91d7621](https://github.com/argoproj/argo-workflows/commit/be91d7621d82c6fb23e18ab4eebc9be58a59d81f) fix: Workflow should fail on Pod failure before container starts Fixes #3879 (#3890) - * [650869fde](https://github.com/argoproj/argo-workflows/commit/650869fde66158a9e03e58aae8aeabe698fe0da5) feat(server): Display events involved in the workflow. Closes #3673 (#3726) - * [5b5d2359e](https://github.com/argoproj/argo-workflows/commit/5b5d2359ef9f573121fe6429e386f03dd8652ece) fix(controller): Cron re-apply update (#3883) - * [fd3fca804](https://github.com/argoproj/argo-workflows/commit/fd3fca804ef998c875ce0ee2914605a918d9d01a) feat(artifacts): retrieve subpath from unarchived ref artifact. Closes #3061 (#3063) - * [6e82bf382](https://github.com/argoproj/argo-workflows/commit/6e82bf382a0b41df46db2cc3a1a3925d009f42e1) feat(controller): Emit events for malformed cron workflows. See #3881 (#3889) - * [f04bdd6af](https://github.com/argoproj/argo-workflows/commit/f04bdd6afa9f17d86833f1537f8ad6713a441bcb) Update workflow-controller-configmap.yaml (#3901) - * [bb79e3f5a](https://github.com/argoproj/argo-workflows/commit/bb79e3f5a00a62e58056e4abd07b129a0f01617d) fix(executor): Replace default retry in executor with an increased value retryer (#3891) - * [b681c1130](https://github.com/argoproj/argo-workflows/commit/b681c1130a41942291e964f29336f8dca53ec4b2) fix(ui): use absolute URL to redirect from autocomplete list. Closes #3903 (#3906) - * [712c77f5c](https://github.com/argoproj/argo-workflows/commit/712c77f5c46cdbb6f03ec2b020fbca9de08d6894) chore(users): Add Fynd Trak to the list of Users (#3900) - * [9681a4e2d](https://github.com/argoproj/argo-workflows/commit/9681a4e2d22d64bbbd4dab6f377fbd0e7a5e39e5) fix(ui): Improve error recovery. Fixes #3867 (#3869) - * [4c18a06ba](https://github.com/argoproj/argo-workflows/commit/4c18a06ba0a46037d40713a91f69320869b3bdc8) feat(controller): Always retry when `IsTransientErr` to tolerate transient errors. Fixes #3217 (#3853) - * [0cf7709ff](https://github.com/argoproj/argo-workflows/commit/0cf7709ff5b9409fcbaa5322601c5a9045ecbe40) fix(controller): Failure tolerant workflow archiving and offloading. Fixes #3786 and #3837 (#3787) - * [359ee8db4](https://github.com/argoproj/argo-workflows/commit/359ee8db4e89d15effd542680aaebdddcbbb2fd0) fix: Corrects CRD and Swagger types. Fixes #3578 (#3809) - * [58ac52b89](https://github.com/argoproj/argo-workflows/commit/58ac52b892c15c785f9209aac86d6374199400f1) chore(ui): correct a typo (#3876) - * [dae0f2df1](https://github.com/argoproj/argo-workflows/commit/dae0f2df1ffcc8a2ff4f3dce1ea7da3f34587e2f) feat(controller): Do not try to create pods we know exists to prevent `exceeded quota` errors. Fixes #3791 (#3851) - * [a24bc9448](https://github.com/argoproj/argo-workflows/commit/a24bc944822c9f5eed92c0b5b07284d7992908fa) feat(controller): Mutexes. Closes #2677 (#3631) - * [99fe11a7b](https://github.com/argoproj/argo-workflows/commit/99fe11a7b9b2c26c25701c6aa29ee535089c979d) feat: Show next scheduled cron run in UI/CLI (#3847) - * [6aaceeb95](https://github.com/argoproj/argo-workflows/commit/6aaceeb9541f46ee6af97e072be8d935812b7bc5) fix: Treat collapsed nodes as their siblings (#3808) - * [743ec5365](https://github.com/argoproj/argo-workflows/commit/743ec53652bf1989931a2c23c2db5e29043e3582) fix(ui): crash when workflow node has no memoization info (#3839) - * [a2f54da15](https://github.com/argoproj/argo-workflows/commit/a2f54da15de54b025859f7ba48779a062d42d8f3) fix(docs): Amend link to the Workflow CRD (#3828) - * [ca8ab468d](https://github.com/argoproj/argo-workflows/commit/ca8ab468dc72eb3fc2c038b4916c3b124a7e7b99) fix: Carry over ownerReferences from resubmitted workflow. Fixes #3818 (#3820) - * [da43086a1](https://github.com/argoproj/argo-workflows/commit/da43086a19f88c0b7ac71fdb888f913fd619962b) fix(docs): Add Entrypoint Cron Backfill example Fixes #3807 (#3814) - * [8e1a3db58](https://github.com/argoproj/argo-workflows/commit/8e1a3db58c2edf73c36f21a8ef87a1a1e40576d9) feat(ui): add node memoization information to node summary view (#3741) - * [d235c7d52](https://github.com/argoproj/argo-workflows/commit/d235c7d52810d701e473723ab3d1a85c0c38e9c4) fix: Consider all children of TaskGroups in DAGs (#3740) - * [3540d152a](https://github.com/argoproj/argo-workflows/commit/3540d152a62261d0af25c48756acbae710684db0) Add SYS_PTRACE to ease the setup of non-root deployments with PNS executor. (#3785) - * [0ca839248](https://github.com/argoproj/argo-workflows/commit/0ca8392485f32c3acdef312befe348ced037b7fb) feat: Github Workflow multi arch. Fixes #2080 (#3744) - * [7ad6eb845](https://github.com/argoproj/argo-workflows/commit/7ad6eb8456456f3aea1bf35f1b5bae5058ffd962) fix(ui): Remove outdated download links. Fixes #3762 (#3783) - * [226367827](https://github.com/argoproj/argo-workflows/commit/226367827dbf62f0a3155abbdc9de0b6d57f693c) fix(ui): Correctly load and store namespace. Fixes #3773 and #3775 (#3778) - * [ed90d4039](https://github.com/argoproj/argo-workflows/commit/ed90d4039d73894bf3073dd39735152833b87457) fix(controller): Support exit handler on workflow templates. Fixes #3737 (#3782) - * [f15a8f778](https://github.com/argoproj/argo-workflows/commit/f15a8f77834e369b291c9e6955bdcef324afc6cd) fix: workflow template ref does not work in other namespace (#3795) - * [ef44a03d3](https://github.com/argoproj/argo-workflows/commit/ef44a03d363b1e7e2a89d268260e9a834553de7b) fix: Increase the requeue duration on checkForbiddenErrorAndResubmitAllowed (#3794) - * [0125ab530](https://github.com/argoproj/argo-workflows/commit/0125ab5307249e6713d6706975d870a78c5046a5) fix(server): Trucate creator label at 63 chars. Fixes #3756 (#3758) - * [a38101f44](https://github.com/argoproj/argo-workflows/commit/a38101f449cd462847a3ac99ee65fa70e40acd80) feat(ui): Sign-post IDE set-up. Closes #3720 (#3723) - * [ee910b551](https://github.com/argoproj/argo-workflows/commit/ee910b5510c9e00bd07c32d2e8ef0846663a330a) feat(server): Emit audit events for workflow event binding errors (#3704) - * [e9b29e8c1](https://github.com/argoproj/argo-workflows/commit/e9b29e8c1f2cdc99e7ccde11f939b865b51e2320) fix: TestWorkflowLevelSemaphore flakiness (#3764) - * [fadd6d828](https://github.com/argoproj/argo-workflows/commit/fadd6d828e152f88236bcd5483bae39c619d2622) fix: Fix workflow onExit nodes not being displayed in UI (#3765) - * [513675bc5](https://github.com/argoproj/argo-workflows/commit/513675bc5b9be6eda48983cb5c8b4ad4d42c9efb) fix(executor): Add retry on pods watch to handle timeout. (#3675) - * [e35a86ff1](https://github.com/argoproj/argo-workflows/commit/e35a86ff108e247b6fd7dfbf947300f086d2e912) feat: Allow parametrizable int fields (#3610) - * [da115f9db](https://github.com/argoproj/argo-workflows/commit/da115f9db328af9bcc9152afd58b55ba929f7764) fix(controller): Tolerate malformed resources. Fixes #3677 (#3680) - * [f8053ae37](https://github.com/argoproj/argo-workflows/commit/f8053ae379a8244b53a8da6787fe6d9769158cbe) feat(operator): Add scope params for step startedAt and finishedAt (#3724) - * [54c2134fc](https://github.com/argoproj/argo-workflows/commit/54c2134fcdf4a4143b99590730340b79e57e180d) fix: Couldn't Terminate/Stop the ResourceTemplate Workflow (#3679) - * [12ddc1f69](https://github.com/argoproj/argo-workflows/commit/12ddc1f69a0495331eea83a3cd6be9c453658c9a) fix: Argo linting does not respect namespace of declared resource (#3671) - * [acfda260e](https://github.com/argoproj/argo-workflows/commit/acfda260e78e4035757bdfb7923238b7e48bf0f9) feat(controller): controller logs to be structured #2308 (#3727) - * [cc2e42a69](https://github.com/argoproj/argo-workflows/commit/cc2e42a691e01b6c254124c7aed52c11540e8475) fix(controller): Tolerate PDB delete race. Fixes #3706 (#3717) - * [5eda8b867](https://github.com/argoproj/argo-workflows/commit/5eda8b867d32ab09be6643ad111383014f58b0e9) fix: Ensure target task's onExit handlers are run (#3716) - * [811a44193](https://github.com/argoproj/argo-workflows/commit/811a441938ebfe1a9f7e634e6b4b8c1a98084df4) docs(windows): Add note about artifacts on windows (#3714) - * [5e5865fb7](https://github.com/argoproj/argo-workflows/commit/5e5865fb7ad2eddfefaf6192492bccbd07cbfc35) fix: Ingress docs (#3713) - * [eeb3c9d1a](https://github.com/argoproj/argo-workflows/commit/eeb3c9d1afb6b8e19423a71ca7eb24838358be8d) fix: Fix bug with 'argo delete --older' (#3699) - * [7aa536eda](https://github.com/argoproj/argo-workflows/commit/7aa536edaeb24d271593b4633cd211039df8beb6) feat: Upgrade Minio v7 with support IRSA (#3700) - * [71d612815](https://github.com/argoproj/argo-workflows/commit/71d6128154587f2e966d1fc2bad4195bc0b4fba8) feat(server): Trigger workflows from webhooks. Closes #2667 (#3488) - * [a5d995dc4](https://github.com/argoproj/argo-workflows/commit/a5d995dc49caa9837e0ccf86290fd485f72ec065) fix(controller): Adds ALL_POD_CHANGES_SIGNIFICANT (#3689) - * [9f00cdc9d](https://github.com/argoproj/argo-workflows/commit/9f00cdc9d73b44569a071d18535586e28c469b8e) fix: Fixed workflow queue duration if PVC creation is forbidden (#3691) - * [41ebbe8e3](https://github.com/argoproj/argo-workflows/commit/41ebbe8e38861e1ad09db6687512757fda2487d7) fix: Re-introduce 1 second sleep to reconcile informer (#3684) - * [6e3c5bef5](https://github.com/argoproj/argo-workflows/commit/6e3c5bef5c2bbfbef4a74b4c9c91e288b8e94735) feat(ui): Make UI errors recoverable. Fixes #3666 (#3674) - * [27fea1bbd](https://github.com/argoproj/argo-workflows/commit/27fea1bbd3dcb5f420beb85926a1fb2434b33b7e) chore(ui): Add label to 'from' section in Workflow Drawer (#3685) - * [32d6f7521](https://github.com/argoproj/argo-workflows/commit/32d6f75212e07004bcbf2c34973160c0ded2023a) feat(ui): Add links to wft, cwf, or cwft to workflow list and details. Closes #3621 (#3662) - * [1c95a985b](https://github.com/argoproj/argo-workflows/commit/1c95a985b486c4e23622322faf8caccbdd991c89) fix: Fix collapsible nodes rendering (#3669) - * [dbb393682](https://github.com/argoproj/argo-workflows/commit/dbb39368295cbc0ef886e78236338572c37607a1) feat: Add submit options to 'argo cron create' (#3660) - * [2b6db45b2](https://github.com/argoproj/argo-workflows/commit/2b6db45b2775cf8bff22b89b0a30e4dda700ecf9) fix(controller): Fix nested maps. Fixes #3653 (#3661) - * [3f293a4d6](https://github.com/argoproj/argo-workflows/commit/3f293a4d647c6c10cf1bafc8d340453e87bd4351) fix: interface{} values should be expanded with '%v' (#3659) - * [a8f4da00b](https://github.com/argoproj/argo-workflows/commit/a8f4da00b6157a2a457eef74cfe9c46b7a39f9ff) fix(server): Report v1.Status errors. Fixes #3608 (#3652) - * [a3a4ea0a4](https://github.com/argoproj/argo-workflows/commit/a3a4ea0a43c1421d04198dacd2000a0b8ecb17ad) fix: Avoid overriding the Workflow parameter when it is merging with WorkflowTemplate parameter (#3651) - * [9ce1d824e](https://github.com/argoproj/argo-workflows/commit/9ce1d824eb0ad607035db7d3bfaa6a54fbe6dc34) fix: Enforce metric Help must be the same for each metric Name (#3613) - * [f77780f5b](https://github.com/argoproj/argo-workflows/commit/f77780f5bdeb875506b4f619b63c40295b66810a) fix(controller): Carry-over labels for re-submitted workflows. Fixes #3622 (#3638) - * [bcc6e1f79](https://github.com/argoproj/argo-workflows/commit/bcc6e1f79c42f006b2720e1e185af59a984103d5) fix: Fixed flaky unit test TestFailSuspendedAndPendingNodesAfterDeadline (#3640) - * [8f70d2243](https://github.com/argoproj/argo-workflows/commit/8f70d2243e07c04254222b1cabf8088245ca55e2) fix: Don't panic on invalid template creation (#3643) - * [5b0210dcc](https://github.com/argoproj/argo-workflows/commit/5b0210dccff725b6288799a0c215550fe6fc6247) fix: Simplify the WorkflowTemplateRef field validation to support all fields in WorkflowSpec except `Templates` (#3632) - * [2375878af](https://github.com/argoproj/argo-workflows/commit/2375878af4ce02af81326e7a672b32c7ce8bfbb1) fix: Fix 'malformed request: field selector' error (#3636) - * [0f37e81ab](https://github.com/argoproj/argo-workflows/commit/0f37e81abd42fbdece9ea70b2091256dbecd1220) fix: DAG level Output Artifacts on K8S and Kubelet executor (#3624) - * [a89261bf6](https://github.com/argoproj/argo-workflows/commit/a89261bf6b6ab5b83037044c30f3a55cc1162d62) build(cli)!: Zip binaries binaries. Closes #3576 (#3614) - * [7f8444731](https://github.com/argoproj/argo-workflows/commit/7f844473167df32840720437953da478b3bdffa2) fix(controller): Panic when outputs in a cache entry are nil (#3615) - * [86f03a3fb](https://github.com/argoproj/argo-workflows/commit/86f03a3fbd871164cff95005d00b04c220ba58be) fix(controller): Treat TooManyError same as Forbidden (i.e. try again). Fixes #3606 (#3607) - * [e0a4f13d1](https://github.com/argoproj/argo-workflows/commit/e0a4f13d1f3df93fd2c003146d7db2dd2dd924e6) fix(server): Re-establish watch on v1.Status errors. Fixes #3608 (#3609) - * [f7be20c1c](https://github.com/argoproj/argo-workflows/commit/f7be20c1cc0e7b6ab708d7d7a1f60c6898c834e4) fix: Fix panic and provide better error message on watch endpoint (#3605) - * [491f4f747](https://github.com/argoproj/argo-workflows/commit/491f4f747619783384937348effaaa56143ea8f1) fix: Argo Workflows does not honour global timeout if step/pod is not able to schedule (#3581) - * [5d8f85d50](https://github.com/argoproj/argo-workflows/commit/5d8f85d5072b5e580a33358cf5fea1fac372baa4) feat(ui): Enhanced workflow submission. Closes #3498 (#3580) - * [ad3441dc8](https://github.com/argoproj/argo-workflows/commit/ad3441dc84b207df57094df570f01915634c073d) feat: Add 'argo node set' command (#3277) - * [17b46bdbb](https://github.com/argoproj/argo-workflows/commit/17b46bdbbe72072d87f83625b4cf1873f9c5379b) fix(controller): Fix bug in util/RecoverWorkflowNameFromSelectorString. Add error handling (#3596) - * [8b6e43f6d](https://github.com/argoproj/argo-workflows/commit/8b6e43f6dafbb95168eaa8c0b2a52f9e177ba075) fix(ui): Fix multiple UI issues (#3573) - * [cdc935ae7](https://github.com/argoproj/argo-workflows/commit/cdc935ae76b3d7cc50a486695b40ff2f647b49bc) feat(cli): Support deleting resubmitted workflows (#3554) - * [1b757ea9b](https://github.com/argoproj/argo-workflows/commit/1b757ea9bc75a379262928be76a4179ea75aa658) feat(ui): Change default language for Resource Editor to YAML and store preference in localStorage. Fixes #3543 (#3560) - * [c583bc04c](https://github.com/argoproj/argo-workflows/commit/c583bc04c672d3aac6955024568a7daebe928932) fix(server): Ignore not-JWT server tokens. Fixes #3562 (#3579) - * [5afbc131f](https://github.com/argoproj/argo-workflows/commit/5afbc131f2e43a0096857534a2814a9fdd9b95f9) fix(controller): Do not panic on nil output value. Fixes #3505 (#3509) - * [827106de2](https://github.com/argoproj/argo-workflows/commit/827106de2f8f3e03f267a3ebbb6095a1f9b4a0e6) fix: Skip TestStorageQuotaLimit (#3566) - * [13b1d3c19](https://github.com/argoproj/argo-workflows/commit/13b1d3c19e94047ae97a071e4468b1050b8e292b) feat(controller): Step level memoization. Closes #944 (#3356) - * [96e520eb6](https://github.com/argoproj/argo-workflows/commit/96e520eb68afb36894b5d2373d55505cc3703a94) fix: Exceeding quota with volumeClaimTemplates (#3490) - * [144c9b65e](https://github.com/argoproj/argo-workflows/commit/144c9b65ecbc671c30d41a0bd65546957a34c713) fix(ui): cannot push to nil when filtering by label (#3555) - * [7e4a78085](https://github.com/argoproj/argo-workflows/commit/7e4a780854fc5f39fcfc77e4354620c307ee21f1) feat: Collapse children in UI Workflow viewer (#3526) - * [7536982ae](https://github.com/argoproj/argo-workflows/commit/7536982ae7451a1a8bcd4b9ddfe6385b138fd782) fix: Fix flakey TestRetryOmitted (#3552) - * [dcee34849](https://github.com/argoproj/argo-workflows/commit/dcee34849ba6302a126d2eaf684a06d246080fd0) fix: Fix links in fields doc (#3539) - * [fb67c1beb](https://github.com/argoproj/argo-workflows/commit/fb67c1beb69c141604322bb19cf43596f9059cf9) Fix issue #3546 (#3547) - * [31afa92ab](https://github.com/argoproj/argo-workflows/commit/31afa92ab0c91e8026bba29d216e6fcc2d150ee7) fix(artifacts): support optional input artifacts, Fixes #3491 (#3512) - * [977beb462](https://github.com/argoproj/argo-workflows/commit/977beb462dcb11afd1913a4e1397136b1b14915b) fix: Fix when retrying Workflows with Omitted nodes (#3528) - * [ab4ef5c5a](https://github.com/argoproj/argo-workflows/commit/ab4ef5c5a290196878d3cf18a9a7036c8bfc9144) fix: Panic on CLI Watch command (#3532) - * [b901b2790](https://github.com/argoproj/argo-workflows/commit/b901b2790fe3c7c350b393e9a0943721ea76f3af) fix(controller): Backoff exponent is off by one. Fixes #3513 (#3514) - * [49ef5c0fe](https://github.com/argoproj/argo-workflows/commit/49ef5c0fe5b7b92ec0035e859a09cf906e4f02f2) fix: String interpreted as boolean in labels (#3518) - -### Contributors - - * Alex Collins - * Ang Gao - * Antoine Dao - * Carlos Montemuino - * Greg Roodt - * Guillaume Hormiere - * Jie Zhang - * Jonny - * Kaushik B - * Lucas Theisen - * Michael Weibel - * Nirav Patel - * Remington Breeze - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - * dgiebert - * dherman - * haibingzhao - * juliusvonkohout - * sh-tatsuno - * yonirab - -## v2.10.2 (2020-09-14) - - * [ed79a5401](https://github.com/argoproj/argo-workflows/commit/ed79a5401162db7a3060111aff1b0fae5e8c2117) Update manifests to v2.10.2 - * [d27bf2d29](https://github.com/argoproj/argo-workflows/commit/d27bf2d29afaaad608943f238c821d94952a8b85) fix: Fix UI selection issues (#3928) - * [51220389a](https://github.com/argoproj/argo-workflows/commit/51220389ac2a0f109b5411851f29f9ee2ff3d968) fix: Create global scope before workflow-level realtime metrics (#3979) - * [857ef750f](https://github.com/argoproj/argo-workflows/commit/857ef750f595f292775bace1129d9c01b08a8ddd) fix: Custom metrics are not recorded for DAG tasks Fixes #3872 (#3886) - * [b9a0bb00b](https://github.com/argoproj/argo-workflows/commit/b9a0bb00b03344c720485c8103f21b90beffc78e) fix: Consider WorkflowTemplate metadata during validation (#3988) - * [089e1862a](https://github.com/argoproj/argo-workflows/commit/089e1862ab1e6c34ff33b7f453ca2f7bad021eb4) fix(server): Remove XSS vulnerability. Fixes #3942 (#3975) - * [1215d9e1e](https://github.com/argoproj/argo-workflows/commit/1215d9e1e3250ec482363430d50c6ea4e5ca05ab) fix(cli): Allow `argo version` without KUBECONFIG. Fixes #3943 (#3945) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.10.1 (2020-09-02) - - * [854444e47](https://github.com/argoproj/argo-workflows/commit/854444e47ac00d146cb83d174049bfbb2066bfb2) Update manifests to v2.10.1 - * [69861fc91](https://github.com/argoproj/argo-workflows/commit/69861fc919495b4215fe24f549ce1a55bf0674db) fix: Workflow should fail on Pod failure before container starts Fixes #3879 (#3890) - * [670fc618c](https://github.com/argoproj/argo-workflows/commit/670fc618c52f8672a99d1159f4c922a7f1b1f1f5) fix(controller): Cron re-apply update (#3883) - * [4b30fa4ef](https://github.com/argoproj/argo-workflows/commit/4b30fa4ef82acba373b9e0d33809f63aa3c2632b) fix(executor): Replace default retry in executor with an increased value retryer (#3891) - * [ae537cd76](https://github.com/argoproj/argo-workflows/commit/ae537cd769ca57842fe92a463e78a0f9f3b74d32) fix(ui): use absolute URL to redirect from autocomplete list. Closes #3903 (#3906) - * [56dc9f7a7](https://github.com/argoproj/argo-workflows/commit/56dc9f7a77ce68880a8c95c43b380d6167d5f4c9) fix: Consider all children of TaskGroups in DAGs (#3740) - * [8ac7369bf](https://github.com/argoproj/argo-workflows/commit/8ac7369bf66af992a23d23eb6713000b95101e52) fix(controller): Support exit handler on workflow templates. Fixes #3737 (#3782) - * [ee8489213](https://github.com/argoproj/argo-workflows/commit/ee848921348676718a8ab4cef8e8c2f52b86d124) fix(controller): Failure tolerant workflow archiving and offloading. Fixes #3786 and #3837 (#3787) - -### Contributors - - * Alex Collins - * Ang Gao - * Nirav Patel - * Saravanan Balasubramanian - * Simon Behar - -## v2.10.0 (2020-08-18) - - * [195c6d831](https://github.com/argoproj/argo-workflows/commit/195c6d8310a70b07043b9df5c988d5a62dafe00d) Update manifests to v2.10.0 - * [08117f0cd](https://github.com/argoproj/argo-workflows/commit/08117f0cd1206647644f1f14580046268d1c8639) fix: Increase the requeue duration on checkForbiddenErrorAndResubmitAllowed (#3794) - * [5ea2ed0db](https://github.com/argoproj/argo-workflows/commit/5ea2ed0dbdb4003fc457b7cd76cf5cec9edc6799) fix(server): Trucate creator label at 63 chars. Fixes #3756 (#3758) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - -## v2.10.0-rc7 (2020-08-13) - - * [267da535b](https://github.com/argoproj/argo-workflows/commit/267da535b66ed1dab8bcc90410260b7cf4b80e2c) Update manifests to v2.10.0-rc7 - * [baeb0fed2](https://github.com/argoproj/argo-workflows/commit/baeb0fed2b3ab53f35297a764f983059600d4b44) fix: Revert merge error - * [66bae22f1](https://github.com/argoproj/argo-workflows/commit/66bae22f147cd248f1a88f913eaeac13ec873bcd) fix(executor): Add retry on pods watch to handle timeout. (#3675) - * [971f11537](https://github.com/argoproj/argo-workflows/commit/971f115373c8f01f0e21991b14fc3b27876f3cbf) removed unused test-report files - * [8c0b9f0a5](https://github.com/argoproj/argo-workflows/commit/8c0b9f0a52922485a1bdf6a8954cdc09060dbc29) fix: Couldn't Terminate/Stop the ResourceTemplate Workflow (#3679) - * [a04d72f95](https://github.com/argoproj/argo-workflows/commit/a04d72f95a433eaa37202418809e1877eb167a1a) fix(controller): Tolerate PDB delete race. Fixes #3706 (#3717) - * [a76357638](https://github.com/argoproj/argo-workflows/commit/a76357638598174812bb749ea539ca4061284d89) fix: Fix bug with 'argo delete --older' (#3699) - * [fe8129cfc](https://github.com/argoproj/argo-workflows/commit/fe8129cfc766f875985f0f09d37dc351a1e5f933) fix(controller): Carry-over labels for re-submitted workflows. Fixes #3622 (#3638) - * [e12d26e52](https://github.com/argoproj/argo-workflows/commit/e12d26e52a42d91ec4d2dbc3d188cf3b1a623a26) fix(controller): Treat TooManyError same as Forbidden (i.e. try again). Fixes #3606 (#3607) - * [9a5febec1](https://github.com/argoproj/argo-workflows/commit/9a5febec11d231ed1cd5e085a841069b9106dafe) fix: Ensure target task's onExit handlers are run (#3716) - * [c3a58e36d](https://github.com/argoproj/argo-workflows/commit/c3a58e36d18e3c3cbb7bffcd3a6ae4c5c08a66ea) fix: Enforce metric Help must be the same for each metric Name (#3613) - -### Contributors - - * Alex Collins - * Guillaume Hormiere - * Saravanan Balasubramanian - * Simon Behar - -## v2.10.0-rc6 (2020-08-06) - - * [cb3536f9d](https://github.com/argoproj/argo-workflows/commit/cb3536f9d1dd64258c1c3d737bb115bdab923e58) Update manifests to v2.10.0-rc6 - * [6e004ace2](https://github.com/argoproj/argo-workflows/commit/6e004ace2710e17ed2a282c6570a97b567946e58) lint - * [b31fc1f86](https://github.com/argoproj/argo-workflows/commit/b31fc1f8612a93c907b375de2e9a3c9326dca34b) fix(controller): Adds ALL_POD_CHANGES_SIGNIFICANT (#3689) - * [0b7cd5b31](https://github.com/argoproj/argo-workflows/commit/0b7cd5b3181eece7636b99d4761e96c61c17c453) fix: Fixed workflow queue duration if PVC creation is forbidden (#3691) - * [03b841627](https://github.com/argoproj/argo-workflows/commit/03b8416271002bfc88c11dd27d86fa08f95b33e9) fix: Re-introduce 1 second sleep to reconcile informer (#3684) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.10.0-rc5 (2020-08-03) - - * [e9ca55ec1](https://github.com/argoproj/argo-workflows/commit/e9ca55ec1cdbf37a43ee68da756ac91abb4edf73) Update manifests to v2.10.0-rc5 - * [85ddda053](https://github.com/argoproj/argo-workflows/commit/85ddda0533d7b60614bee5a93d60bbfe0209ea83) lint - * [fb367f5e8](https://github.com/argoproj/argo-workflows/commit/fb367f5e8f2faff6eeba751dc13c73336c112236) fix(controller): Fix nested maps. Fixes #3653 (#3661) - * [2385cca59](https://github.com/argoproj/argo-workflows/commit/2385cca59396eb53c03eac5bd87611b57f2a47a2) fix: interface{} values should be expanded with '%v' (#3659) - * [263e4bad7](https://github.com/argoproj/argo-workflows/commit/263e4bad78092310ad405919b607e2ef696c8bf9) fix(server): Report v1.Status errors. Fixes #3608 (#3652) - * [718f802b8](https://github.com/argoproj/argo-workflows/commit/718f802b8ed1533da2d2a0b666d2a80b51f476b2) fix: Avoid overriding the Workflow parameter when it is merging with WorkflowTemplate parameter (#3651) - * [9735df327](https://github.com/argoproj/argo-workflows/commit/9735df3275d456a868028b51a2386241f0d207ef) fix: Fixed flaky unit test TestFailSuspendedAndPendingNodesAfterDeadline (#3640) - * [662d22e4f](https://github.com/argoproj/argo-workflows/commit/662d22e4f10566a4ce34c3080ba38788d58fd681) fix: Don't panic on invalid template creation (#3643) - * [854aaefaa](https://github.com/argoproj/argo-workflows/commit/854aaefaa9713155a62deaaf041a36527d7f1718) fix: Fix 'malformed request: field selector' error (#3636) - * [9d56eb29c](https://github.com/argoproj/argo-workflows/commit/9d56eb29c268c7a1f73068e17edf10b6affc51a8) fix: DAG level Output Artifacts on K8S and Kubelet executor (#3624) - * [c7512b6ce](https://github.com/argoproj/argo-workflows/commit/c7512b6ce53e9b3fc5f7792a6c7c6d016aa66734) fix: Simplify the WorkflowTemplateRef field validation to support all fields in WorkflowSpec except `Templates` (#3632) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.10.0-rc4 (2020-07-28) - - * [8d6dae612](https://github.com/argoproj/argo-workflows/commit/8d6dae6128074445d9bd0222c449643053568db8) Update manifests to v2.10.0-rc4 - * [a4b1dde57](https://github.com/argoproj/argo-workflows/commit/a4b1dde573754556db1e635491189960721920a8) build(cli)!: Zip binaries binaries. Closes #3576 (#3614) - * [dea03a9c7](https://github.com/argoproj/argo-workflows/commit/dea03a9c7f1016cfb0b47e1b5152cb07c111b436) fix(server): Re-establish watch on v1.Status errors. Fixes #3608 (#3609) - * [c063f9f1c](https://github.com/argoproj/argo-workflows/commit/c063f9f1c3a5d1ce0fd5fb9dd5ce3938de18edce) fix: Fix panic and provide better error message on watch endpoint (#3605) - * [35a00498d](https://github.com/argoproj/argo-workflows/commit/35a00498dcc62ebecb9dd476c90fddb2800fdeb7) fix: Argo Workflows does not honour global timeout if step/pod is not able to schedule (#3581) - * [3879827cb](https://github.com/argoproj/argo-workflows/commit/3879827cb6bfa3f9e29e81dbd3bdbf0ffeeec233) fix(controller): Fix bug in util/RecoverWorkflowNameFromSelectorString. Add error handling (#3596) - * [5f4dec750](https://github.com/argoproj/argo-workflows/commit/5f4dec750a3be0d1ed8808d90535e90ee532f111) fix(ui): Fix multiple UI issues (#3573) - * [e94cf8a21](https://github.com/argoproj/argo-workflows/commit/e94cf8a21cd1c97f1a415d015038145a241a7b23) fix(ui): cannot push to nil when filtering by label (#3555) - * [61b5bd931](https://github.com/argoproj/argo-workflows/commit/61b5bd931045a2e423f1126300ab332f606cff9c) fix: Fix flakey TestRetryOmitted (#3552) - * [d53c883b7](https://github.com/argoproj/argo-workflows/commit/d53c883b713ad281b33603567a92d4dbe61a5b47) fix: Fix links in fields doc (#3539) - * [d2bd5879f](https://github.com/argoproj/argo-workflows/commit/d2bd5879f47badbd9dddef8308e20c3434caa95e) fix(artifacts): support optional input artifacts, Fixes #3491 (#3512) - * [652956e04](https://github.com/argoproj/argo-workflows/commit/652956e04c88c347d018367c8f11398ae2ced9dc) fix: Fix when retrying Workflows with Omitted nodes (#3528) - * [32c36d785](https://github.com/argoproj/argo-workflows/commit/32c36d785be4394b96615fbb4c716ae74177ed20) fix(controller): Backoff exponent is off by one. Fixes #3513 (#3514) - * [75d295747](https://github.com/argoproj/argo-workflows/commit/75d2957473c4783a6db18fda08907f62375c002e) fix: String interpreted as boolean in labels (#3518) - -### Contributors - - * Alex Collins - * Jie Zhang - * Jonny - * Remington Breeze - * Saravanan Balasubramanian - * Simon Behar - * haibingzhao - -## v2.10.0-rc3 (2020-07-23) - - * [37f4f9da2](https://github.com/argoproj/argo-workflows/commit/37f4f9da2b921c96f4d8919a17d4303e588e86c9) Update manifests to v2.10.0-rc3 - * [37297af7d](https://github.com/argoproj/argo-workflows/commit/37297af7ddf7d9fcebfed0dff5f76d9c4cc3199f) Update manifests to v2.10.0-rc2 - * [cbf27edf1](https://github.com/argoproj/argo-workflows/commit/cbf27edf17e84c86b9c969ed19f67774c27c50bd) fix: Panic on CLI Watch command (#3532) - * [a36664823](https://github.com/argoproj/argo-workflows/commit/a366648233e5fb7e992188034e0bc0e250279feb) fix: Skip TestStorageQuotaLimit (#3566) - * [802c18ed6](https://github.com/argoproj/argo-workflows/commit/802c18ed6ea8b1e481ef2feb6d0552eac7dab67d) fix: Exceeding quota with volumeClaimTemplates (#3490) - * [bbee82a08](https://github.com/argoproj/argo-workflows/commit/bbee82a086d32e721e60880139a91064c0b3abb6) fix(server): Ignore not-JWT server tokens. Fixes #3562 (#3579) - * [f72ae8813](https://github.com/argoproj/argo-workflows/commit/f72ae8813aa570eb13769de606b07dd72d991db8) fix(controller): Do not panic on nil output value. Fixes #3505 (#3509) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - -## v2.10.0-rc2 (2020-07-18) - - * [4bba17f39](https://github.com/argoproj/argo-workflows/commit/4bba17f3956708c4e50b54d932b516201f368b8b) Update manifests to v2.10.0-rc2 - * [616c79df0](https://github.com/argoproj/argo-workflows/commit/616c79df09c435fa7659bf7e5194529d948ee93b) Update manifests to v2.10.0-rc1 - -### Contributors - - * Alex Collins - -## v2.10.0-rc1 (2020-07-17) - - * [19e700a33](https://github.com/argoproj/argo-workflows/commit/19e700a3388552d9440ae75dd259efcbeb0a3657) fix(cli): Check mutual exclusivity for argo CLI flags (#3493) - * [7d45ff7f0](https://github.com/argoproj/argo-workflows/commit/7d45ff7f014d011ef895b9c808da781000ea32a5) fix: Panic on releaseAllWorkflowLocks if Object is not Unstructured type (#3504) - * [1b68a5a15](https://github.com/argoproj/argo-workflows/commit/1b68a5a15af12fb0866f4d5a4dcd9fb5da3f2ab4) fix: ui/package.json & ui/yarn.lock to reduce vulnerabilities (#3501) - * [7f262fd81](https://github.com/argoproj/argo-workflows/commit/7f262fd81bae1f8b9bc7707d8bf02f10174cc87d) fix(cli)!: Enable CLI to work without kube config. Closes #3383, #2793 (#3385) - * [27528ba34](https://github.com/argoproj/argo-workflows/commit/27528ba34538b764db9254d41761a4edeba6694c) feat: Support completions for more resources (#3494) - * [5bd2ad7a9](https://github.com/argoproj/argo-workflows/commit/5bd2ad7a9d0ad5437fb7d1b7955e0b8e0c9b52ca) fix: Merge WorkflowTemplateRef with defaults workflow spec (#3480) - * [69179e72c](https://github.com/argoproj/argo-workflows/commit/69179e72c0872cde9131cc9d68192d5c472d64c9) fix: link to server auth mode docs, adds Tulip as official user (#3486) - * [acf56f9f0](https://github.com/argoproj/argo-workflows/commit/acf56f9f0d2da426eab9cacc03b7ebadb4aa9ea3) feat(server): Label workflows with creator. Closes #2437 (#3440) - * [3b8ac065a](https://github.com/argoproj/argo-workflows/commit/3b8ac065a1db8ebe629d7cf02c1a8585b34ea2b7) fix: Pass resolved arguments to onExit handler (#3477) - * [f6f1844b7](https://github.com/argoproj/argo-workflows/commit/f6f1844b73d4e643614f575075401946b9aa7a7c) feat: Attempt to resolve nested tags (#3339) - * [48e15d6fc](https://github.com/argoproj/argo-workflows/commit/48e15d6fce2f980ae5dd5b5d2ff405f496b8f644) feat(cli): List only resubmitted workflows option (#3357) - * [25e9c0cdf](https://github.com/argoproj/argo-workflows/commit/25e9c0cdf73a3c9fa712fc3b544f1f8f33980515) docs, quick-start. Use http, not https for link (#3476) - * [7a2d76427](https://github.com/argoproj/argo-workflows/commit/7a2d76427da0ae6440f91adbb2f97e62b28355e6) fix: Metric emission with retryStrategy (#3470) - * [f5876e041](https://github.com/argoproj/argo-workflows/commit/f5876e041a2d87c8d48983751d2c3b4959fb1d93) test(controller): Ensure resubmitted workflows have correct labels (#3473) - * [aa92ec038](https://github.com/argoproj/argo-workflows/commit/aa92ec03885b2c58c537b33161809f9966faf968) fix(controller): Correct fail workflow when pod is deleted with --force. Fixes #3097 (#3469) - * [a1945d635](https://github.com/argoproj/argo-workflows/commit/a1945d635b24963af7f52bd73b19a7da52d647e3) fix(controller): Respect the volumes of a workflowTemplateRef. Fixes … (#3451) - * [847ba5305](https://github.com/argoproj/argo-workflows/commit/847ba5305273a16a65333c278e705dc157b9c723) test(controller): Add memoization tests. See #3214 (#3455) (#3466) - * [1e42813aa](https://github.com/argoproj/argo-workflows/commit/1e42813aaaaee55b9e4483338f7a8554ba9f9eab) test(controller): Add memoization tests. See #3214 (#3455) - * [abe768c4b](https://github.com/argoproj/argo-workflows/commit/abe768c4ba5433fe72f9e6d5a1dde09d37d4d20d) feat(cli): Allow to view previously terminated container logs (#3423) - * [7581025ff](https://github.com/argoproj/argo-workflows/commit/7581025ffac0da6a4c9b125dac3173d0c84aef4f) fix: Allow ints for sequence start/end/count. Fixes #3420 (#3425) - * [b82f900ae](https://github.com/argoproj/argo-workflows/commit/b82f900ae5e446d14a9899302c143c8e32447eab) Fixed typos (#3456) - * [23760119d](https://github.com/argoproj/argo-workflows/commit/23760119d4664f0825536d368b65cdde356e0ff3) feat: Workflow Semaphore Support (#3141) - * [81cba832e](https://github.com/argoproj/argo-workflows/commit/81cba832ed1d4f5b116dc9e43f1f3ad79c190c44) feat: Support WorkflowMetadata in WorkflowTemplate and ClusterWorkflowTemplate (#3364) - * [308c7083b](https://github.com/argoproj/argo-workflows/commit/308c7083bded1b6a1fb91bcd963e1e9b8d0b4152) fix(controller): Prevent panic on nil node. Fixes #3436 (#3437) - * [8ab06f532](https://github.com/argoproj/argo-workflows/commit/8ab06f532b24944e5e9c3ed33c4adc249203cad4) feat(controller): Add log message count as metrics. (#3362) - * [ee6c8760e](https://github.com/argoproj/argo-workflows/commit/ee6c8760e3d46dfdab0f8d3a63dbf1995322ad4b) fix: Ensure task dependencies run after onExit handler is fulfilled (#3435) - * [05b3590b5](https://github.com/argoproj/argo-workflows/commit/05b3590b5dc70963700b4a7a5cef4afd76b4943d) feat(controller): Add support for Docker workflow executor for Windows nodes (#3301) - * [676868f31](https://github.com/argoproj/argo-workflows/commit/676868f31da1bce361e89bebfa1eea81471784ac) fix(docs): Update kubectl proxy URL (#3433) - * [733e95f74](https://github.com/argoproj/argo-workflows/commit/733e95f742ff14fb7c303d8b1dbf30403e9e8983) fix: Add struct-wide RWMutext to metrics (#3421) - * [0463f2416](https://github.com/argoproj/argo-workflows/commit/0463f24165e360344b5ff743915d16a12fef0ba0) fix: Use a unique queue to visit nodes (#3418) - * [eddcac639](https://github.com/argoproj/argo-workflows/commit/eddcac6398e674aa24b59aea2e449492cf2c0c02) fix: Script steps fail with exceededQuota (#3407) - * [c631a545e](https://github.com/argoproj/argo-workflows/commit/c631a545e824682652569e49eb764844a7f5cb05) feat(ui): Add Swagger UI (#3358) - * [910f636dc](https://github.com/argoproj/argo-workflows/commit/910f636dcfad66c999aa859e11a31a9a772ccc79) fix: No panic on watch. Fixes #3411 (#3426) - * [b4da1bccc](https://github.com/argoproj/argo-workflows/commit/b4da1bccc7f961200b8fe8551e4b286d1d5d5a9f) fix(sso): Remove unused `groups` claim. Fixes #3411 (#3427) - * [330d4a0a2](https://github.com/argoproj/argo-workflows/commit/330d4a0a2085b986855f9d3f4c5e27fbbe261ca9) fix: panic on wait command if event is null (#3424) - * [03cbb8cf2](https://github.com/argoproj/argo-workflows/commit/03cbb8cf2c75f5b241ae543259ea9db02e9339fd) fix(ui): Render DAG with exit node (#3408) - * [3d50f9852](https://github.com/argoproj/argo-workflows/commit/3d50f9852b481692235a3f075c4c0966e6404104) feat: Expose certain queue metrics (#3371) - * [c7b35e054](https://github.com/argoproj/argo-workflows/commit/c7b35e054e3eee38f750c0eaf4a5431a56f80c49) fix: Ensure non-leaf DAG tasks have their onExit handler's run (#3403) - * [70111600d](https://github.com/argoproj/argo-workflows/commit/70111600d464bd7dd99014aa88b5f2cbab64a573) fix: Fix concurrency issues with metrics (#3401) - * [bc4faf5f7](https://github.com/argoproj/argo-workflows/commit/bc4faf5f739e9172b7968e198dc595f27d506f7b) fix: Fix bug parsing parmeters (#3372) - * [4934ad227](https://github.com/argoproj/argo-workflows/commit/4934ad227f043a5554c9a4f717f09f70d2c18cbf) fix: Running pods are garaged in PodGC onSuccess - * [0541cfda6](https://github.com/argoproj/argo-workflows/commit/0541cfda611a656ab16dbfcd7bed858b7c8b2f3c) chore(ui): Remove unused interfaces for artifacts (#3377) - * [1db93c062](https://github.com/argoproj/argo-workflows/commit/1db93c062c4f7e417bf74afe253e9a44e5381802) perf: Optimize time-based filtering on large number of workflows (#3340) - * [2ab9495f0](https://github.com/argoproj/argo-workflows/commit/2ab9495f0f3d944243d845411bafe7ebe496642b) fix: Don't double-count metric events (#3350) - * [7bd3e7209](https://github.com/argoproj/argo-workflows/commit/7bd3e7209018d0d7716ca0dbd0ffb1863165892d) fix(ui): Confirmation of workflow actions (#3370) - * [488790b24](https://github.com/argoproj/argo-workflows/commit/488790b247191dd22babadd9592efae11f4fd245) Wellcome is using Argo in our Data Labs division (#3365) - * [e4b08abbc](https://github.com/argoproj/argo-workflows/commit/e4b08abbcfe6f3886e0cd28e8ea8c1860ef8c9e1) fix(server): Remove `context cancelled` error. Fixes #3073 (#3359) - * [74ba51622](https://github.com/argoproj/argo-workflows/commit/74ba516220423cae5960b7dd51c4a8d5a37012b5) fix: Fix UI bug in DAGs (#3368) - * [5e60decf9](https://github.com/argoproj/argo-workflows/commit/5e60decf96e85a4077cd70d1d4e8da299d1d963d) feat(crds)!: Adds CRD generation and enhanced UI resource editor. Closes #859 (#3075) - * [731a1b4a6](https://github.com/argoproj/argo-workflows/commit/731a1b4a670078b8ba8e2f36bdd433afe22f2631) fix(controller): Allow events to be sent to non-argo namespace. Fixes #3342 (#3345) - * [916e0db25](https://github.com/argoproj/argo-workflows/commit/916e0db25880cef3058e4c3c3f6d118e14312be1) Adding InVision to Users (#3352) - * [6caf10fad](https://github.com/argoproj/argo-workflows/commit/6caf10fad7b116f9e3a6aaee4eb02243e37f2779) fix: Ensure child pods respect maxDuration (#3280) - * [2b4b7340a](https://github.com/argoproj/argo-workflows/commit/2b4b7340a6afb8317e27e3d58c46fba3c3db8ff0) fix: Remove broken SSO from quick-starts (#3327) - * [26570fd51](https://github.com/argoproj/argo-workflows/commit/26570fd51ec2eebe86cd0f3bc05ab43272f957c5) fix(controller)!: Support nested items. Fixes #3288 (#3290) - * [769a964fc](https://github.com/argoproj/argo-workflows/commit/769a964fcf51f58c76f2d4900c736f4dd945bd7f) feat(controller): Label workflows with their source workflow template (#3328) - * [0785be24c](https://github.com/argoproj/argo-workflows/commit/0785be24caaf93d62f5b77b2ee142a0691992b86) fix(ui): runtime error from null savedOptions props (#3330) - * [200be0e1e](https://github.com/argoproj/argo-workflows/commit/200be0e1e34f9cf6689e9739e3e4aea7f5bf7fde) feat: Save pagination limit and selected phases/labels to local storage (#3322) - * [b5ed90fe8](https://github.com/argoproj/argo-workflows/commit/b5ed90fe8611a10df7982e3fb2e6670400acf2d2) feat: Allow to change priority when resubmitting workflows (#3293) - * [60c86c84c](https://github.com/argoproj/argo-workflows/commit/60c86c84c60ac38c5a876d8df5362b5896700d73) fix(ui): Compiler error from workflows toolbar (#3317) - * [baad42ea8](https://github.com/argoproj/argo-workflows/commit/baad42ea8fed83b2158721766e518b203664ebe1) feat(ui): Add ability to select multiple workflows from list and perform actions on them. Fixes #3185 (#3234) - * [b6118939b](https://github.com/argoproj/argo-workflows/commit/b6118939bf0948e856bb20955f6911743106af4d) fix(controller): Fix panic logging. (#3315) - * [e021d7c51](https://github.com/argoproj/argo-workflows/commit/e021d7c512f01721e2f25d39836829752226c290) Clean up unused constants (#3298) - * [8b12f433a](https://github.com/argoproj/argo-workflows/commit/8b12f433a2e32cc69714ee456ee0d83e904ff31c) feat(cli): Add --logs to `argo [submit|resubmit|retry]. Closes #3183 (#3279) - * [07b450e81](https://github.com/argoproj/argo-workflows/commit/07b450e8134e1afe0b58c45b21dc0c13d91ecdb5) fix: Reapply Update if CronWorkflow resource changed (#3272) - * [d44d264c7](https://github.com/argoproj/argo-workflows/commit/d44d264c72649c540204ccb54e9a57550f48d1fc) Fixes validation of overridden ref template parameters. (#3286) - * [62e54fb68](https://github.com/argoproj/argo-workflows/commit/62e54fb68778030245bed87f0675694ef3c58b57) fix: Fix delete --complete (#3278) - * [824de95bf](https://github.com/argoproj/argo-workflows/commit/824de95bfb2de0e325f92c0544f42267242486e4) fix(git): Fixes Git when using auth or fetch. Fixes #2343 (#3248) - * [018fcc23d](https://github.com/argoproj/argo-workflows/commit/018fcc23dc9fad051d15db2f9a83c2710d50c828) Update releasing.md (#3283) - -### Contributors - - * 0x1D-1983 - * Alex Collins - * Daisuke Taniwaki - * Galen Han - * Jeff Uren - * Markus Lippert - * Remington Breeze - * Saravanan Balasubramanian - * Simon Behar - * Snyk bot - * Trevor Foster - * Vlad Losev - * Weston Platter - * Yuan Tang - * candonov - -## v2.9.5 (2020-08-06) - - * [5759a0e19](https://github.com/argoproj/argo-workflows/commit/5759a0e198d333fa8c3e0aeee433d93808c0dc72) Update manifests to v2.9.5 - * [53d20462f](https://github.com/argoproj/argo-workflows/commit/53d20462fe506955306cafccb86e969dfd4dd040) codegen - * [c0382fd97](https://github.com/argoproj/argo-workflows/commit/c0382fd97d58c66b55eacbe2d05d473ecc93a5d9) remove line - * [18cf4ea6c](https://github.com/argoproj/argo-workflows/commit/18cf4ea6c15264f4db053a5d4d7ae1b478216fc0) fix: Enforce metric Help must be the same for each metric Name (#3613) - * [7b4e98a8d](https://github.com/argoproj/argo-workflows/commit/7b4e98a8d9e50d829feff75ad593ca3ac231ab5a) fix: Fix 'malformed request: field selector' error (#3636) - * [0fceb6274](https://github.com/argoproj/argo-workflows/commit/0fceb6274ac26b01d30d806978b532a7f675ea5b) fix: Fix panic and provide better error message on watch endpoint (#3605) - * [8a7e9d3dc](https://github.com/argoproj/argo-workflows/commit/8a7e9d3dc23749bbe7ed415c5e45abcd2fc40a92) fix(controller): Fix bug in util/RecoverWorkflowNameFromSelectorString. Add error handling (#3596) - * [2ba243340](https://github.com/argoproj/argo-workflows/commit/2ba2433405643e845c521b9351fbfe14f9042195) fix: Re-introduce 1 second sleep to reconcile informer (#3684) - * [dca3b6ce2](https://github.com/argoproj/argo-workflows/commit/dca3b6ce275e2cc880ba92e58045e462cdf84671) fix(controller): Adds ALL_POD_CHANGES_SIGNIFICANT (#3689) - * [819bfdb63](https://github.com/argoproj/argo-workflows/commit/819bfdb63c3abc398998af727f4e3fa8923a9497) fix: Avoid overriding the Workflow parameter when it is merging with WorkflowTemplate parameter (#3651) - * [89e05bdb8](https://github.com/argoproj/argo-workflows/commit/89e05bdb884029e7ad681089b11e1c8e9a38a3a7) fix: Don't panic on invalid template creation (#3643) - * [0b8d78e16](https://github.com/argoproj/argo-workflows/commit/0b8d78e160800f23da9f793aee7fa57f601cd591) fix: Simplify the WorkflowTemplateRef field validation to support all fields in WorkflowSpec except `Templates` (#3632) - -### Contributors - - * Alex Collins - * Remington Breeze - * Saravanan Balasubramanian - * Simon Behar - -## v2.9.4 (2020-07-24) - - * [20d2ace3d](https://github.com/argoproj/argo-workflows/commit/20d2ace3d5344db68ce1bc2a250bbb1ba9862613) Update manifests to v2.9.4 - * [41db55254](https://github.com/argoproj/argo-workflows/commit/41db552549490caa9de2f9fa66521eb20a3263f3) Fix build - * [587785590](https://github.com/argoproj/argo-workflows/commit/5877855904b23b5c139778c0ea6ffec1a337dc0b) Fix build - * [f047ddf3b](https://github.com/argoproj/argo-workflows/commit/f047ddf3b69f283ce72204377119d1724ea1059d) fix: Fix flakey TestRetryOmitted (#3552) - * [b6ad88e2c](https://github.com/argoproj/argo-workflows/commit/b6ad88e2cf8fdd4c457958131cd2aa236b8b3e03) fix: Fix when retrying Workflows with Omitted nodes (#3528) - * [795998201](https://github.com/argoproj/argo-workflows/commit/7959982012f8dbe18f8ed7e38cf6f88f466da00d) fix: Panic on CLI Watch command (#3532) - * [eaa815f1f](https://github.com/argoproj/argo-workflows/commit/eaa815f1f353c7e192b81119fa2b12da8481658b) Fixed Packer and Hydrator test - * [71c7f64e1](https://github.com/argoproj/argo-workflows/commit/71c7f64e15fb347e33accdca0afd853e791f6d37) Fixed test failure - * [f0e8a3326](https://github.com/argoproj/argo-workflows/commit/f0e8a3326ddd025aedf6d740a994c028445321d3) fix: Merge WorkflowTemplateRef with defaults workflow spec (#3480) - -### Contributors - - * Saravanan Balasubramanian - * Simon Behar - -## v2.9.3 (2020-07-14) - - * [d597af5c1](https://github.com/argoproj/argo-workflows/commit/d597af5c13caf3b1d150da9cd27b0917db5b1644) Update manifests to v2.9.3 - * [d1a2ffd9b](https://github.com/argoproj/argo-workflows/commit/d1a2ffd9b77e41657692ee2e70818dd51c1bd4e8) fix: Pass resolved arguments to onExit handler (#3482) - * [2b706247f](https://github.com/argoproj/argo-workflows/commit/2b706247fd81215e49edb539bd7d26ea62b69fd0) Revert "fix: Pass resolved arguments to onExit handler (#3477)" - * [a431f93cd](https://github.com/argoproj/argo-workflows/commit/a431f93cdabb01f4acf29a6a190737e259611ef2) fix: Pass resolved arguments to onExit handler (#3477) - * [52bb1471e](https://github.com/argoproj/argo-workflows/commit/52bb1471e22ed25f5a8a4819d622556155e3de36) fix: Metric emission with retryStrategy (#3470) - * [675ce293f](https://github.com/argoproj/argo-workflows/commit/675ce293f41200bad96d4a66a31923a2cbe3b46c) fix(controller): Correct fail workflow when pod is deleted with --force. Fixes #3097 (#3469) - * [194a21392](https://github.com/argoproj/argo-workflows/commit/194a21392e656af46952deedf39b276fc0ba774c) fix(controller): Respect the volumes of a workflowTemplateRef. Fixes … (#3451) - * [584cb402c](https://github.com/argoproj/argo-workflows/commit/584cb402c4057de79198dcb0e82de6337e6ea138) fix(controller): Port master fix for #3214 - * [065d9b651](https://github.com/argoproj/argo-workflows/commit/065d9b65109bb37c6147c4f87c7468434cbc70ed) test(controller): Add memoization tests. See #3214 (#3455) (#3466) - * [b252b4085](https://github.com/argoproj/argo-workflows/commit/b252b4085f58d3210cbe81ec986097398e48257b) test(controller): Add memoization tests. See #3214 (#3455) - * [e3a8319be](https://github.com/argoproj/argo-workflows/commit/e3a8319be1b081e07252a241cd807486c27eddfa) fix(controller): Prevent panic on nil node. Fixes #3436 (#3437) - -### Contributors - - * Alex Collins - * Simon Behar - -## v2.9.2 (2020-07-08) - - * [65c2bd44e](https://github.com/argoproj/argo-workflows/commit/65c2bd44e45c11e0a0b03adeef8d6800b72cd551) merge Dockerfile from master - * [14942f2f9](https://github.com/argoproj/argo-workflows/commit/14942f2f940e1ee6f182a269a29691d4169d3160) Update manifests to v2.9.2 - * [823f9c549](https://github.com/argoproj/argo-workflows/commit/823f9c5499bd60dc5b9df6ce0c12f7295f72d294) Fix botched conflict resolution - * [2b3ccd3a0](https://github.com/argoproj/argo-workflows/commit/2b3ccd3a0ad8810e861696a7b97e84489ae4ed2a) fix: Add struct-wide RWMutext to metrics (#3421) - * [8e9ba4940](https://github.com/argoproj/argo-workflows/commit/8e9ba49401851603a1c154992cb22a87ff8430a3) fix: Use a unique queue to visit nodes (#3418) - * [28f76572b](https://github.com/argoproj/argo-workflows/commit/28f76572bc80b8582210549b1a67987ec812e7c5) conflict resolved - * [dcc09c983](https://github.com/argoproj/argo-workflows/commit/dcc09c983414671ae303c0111e39cf544d787ed8) fix: No panic on watch. Fixes #3411 (#3426) - * [4a48e25fc](https://github.com/argoproj/argo-workflows/commit/4a48e25fcdb110ef788a1d63f20163ec88a330c2) fix(sso): Remove unused `groups` claim. Fixes #3411 (#3427) - * [1e736b23c](https://github.com/argoproj/argo-workflows/commit/1e736b23c92c9cb45b23ff44b144271d19ffe728) fix: panic on wait command if event is null (#3424) - * [c10da5ecf](https://github.com/argoproj/argo-workflows/commit/c10da5ecf7d0bb490b0ee4edaf985eeab7f42a2e) fix: Ensure non-leaf DAG tasks have their onExit handler's run (#3403) - * [25b150aa8](https://github.com/argoproj/argo-workflows/commit/25b150aa86a3539121fd72e4a942f250d4d263dc) fix(ui): Render DAG with exit node (#3408) - * [6378a587b](https://github.com/argoproj/argo-workflows/commit/6378a587bc6900b2074f35205039eec453fd8051) fix: Fix concurrency issues with metrics (#3401) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - -## v2.9.1 (2020-07-03) - - * [6b967d08c](https://github.com/argoproj/argo-workflows/commit/6b967d08c0a142aaa278538f2407c28de467262e) Update manifests to v2.9.1 - * [6bf5fb3c9](https://github.com/argoproj/argo-workflows/commit/6bf5fb3c9de77de1629f059459bdce4f304e8d55) fix: Running pods are garaged in PodGC onSuccess - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - -## v2.9.0 (2020-07-01) - - * [d67d3b1db](https://github.com/argoproj/argo-workflows/commit/d67d3b1dbc61ebc5789806794ccd7e2debd71ffc) Update manifests to v2.9.0 - * [9c52c1be2](https://github.com/argoproj/argo-workflows/commit/9c52c1be2aaa317720b6e2c1bae20d7489f45f14) fix: Don't double-count metric events (#3350) - * [813122f76](https://github.com/argoproj/argo-workflows/commit/813122f765d47529cfe4e7eb25499ee98051abd6) fix: Fix UI bug in DAGs (#3368) - * [248643d3b](https://github.com/argoproj/argo-workflows/commit/248643d3b5ad4a93adef081afd73ee931ee76dae) fix: Ensure child pods respect maxDuration (#3280) - * [71d295849](https://github.com/argoproj/argo-workflows/commit/71d295849ba4ffa3a2e7e843c952f3330fb4160a) fix(controller): Allow events to be sent to non-argo namespace. Fixes #3342 (#3345) - * [52be71bc7](https://github.com/argoproj/argo-workflows/commit/52be71bc7ab5ddf56aab65570ee78a2c40b852b6) fix: Remove broken SSO from quick-starts (#3327) - -### Contributors - - * Alex Collins - * Simon Behar - -## v2.9.0-rc4 (2020-06-26) - - * [5b109bcb9](https://github.com/argoproj/argo-workflows/commit/5b109bcb9257653ecbae46e6315c8d65842de58a) Update manifests to v2.9.0-rc4 - * [011f1368d](https://github.com/argoproj/argo-workflows/commit/011f1368d11abadc1f3bad323067007eea71b9bc) fix(controller): Fix panic logging. (#3315) - * [5395ad3f9](https://github.com/argoproj/argo-workflows/commit/5395ad3f9ad938e334f29dc27e4aa105c17f1c58) Clean up unused constants (#3298) - * [a2a1fba8b](https://github.com/argoproj/argo-workflows/commit/a2a1fba8bf981aff0a9467368fd87cc0c5325de6) fix: Reapply Update if CronWorkflow resource changed (#3272) - * [9af98a5bc](https://github.com/argoproj/argo-workflows/commit/9af98a5bc141872d2fd55db8182674fb950c9ce1) Fixes validation of overridden ref template parameters. (#3286) - * [a91cea5f0](https://github.com/argoproj/argo-workflows/commit/a91cea5f087153553760f2d1f63413c7e78ab4ba) fix: Fix delete --complete (#3278) - * [d5a4807ae](https://github.com/argoproj/argo-workflows/commit/d5a4807aefed6d1df0296aabd2e4e6a7a7de32f1) Update releasing.md (#3283) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - * Vlad Losev - -## v2.9.0-rc3 (2020-06-23) - - * [2e95ff484](https://github.com/argoproj/argo-workflows/commit/2e95ff4843080e7fd673cf0a551a862e3e39d326) Update manifests to v2.9.0-rc3 - * [2bcfafb56](https://github.com/argoproj/argo-workflows/commit/2bcfafb56230194fd2d23adcfa5a1294066ec91e) fix: Add {{workflow.status}} to workflow-metrics (#3271) - * [e6aab6051](https://github.com/argoproj/argo-workflows/commit/e6aab605122356a10cb21df3a08e1ddeac6d2593) fix(jqFilter)!: remove extra quotes around output parameter value (#3251) - * [f4580163f](https://github.com/argoproj/argo-workflows/commit/f4580163f4187f798f93b8d778415e8bec001dda) fix(ui): Allow render of templates without entrypoint. Fixes #2891 (#3274) - * [d1cb1992c](https://github.com/argoproj/argo-workflows/commit/d1cb1992cd22e9f69894532f214fa0e00312ff36) fixed archiveLabelSelector nil (#3270) - * [c7e4c1808](https://github.com/argoproj/argo-workflows/commit/c7e4c1808cf097857b8ee89d326ef9f32384fc1b) fix(ui): Update workflow drawer with new duration format (#3256) - * [f2381a544](https://github.com/argoproj/argo-workflows/commit/f2381a5448e9d49a7b6ed0c9583ac8cf9b257938) fix(controller): More structured logging. Fixes #3260 (#3262) - * [acba084ab](https://github.com/argoproj/argo-workflows/commit/acba084abb01b967c239952d49e8e3d7775cbf2c) fix: Avoid unnecessary nil check for annotations of resubmitted workflow (#3268) - * [55e13705a](https://github.com/argoproj/argo-workflows/commit/55e13705ae57f86ca6c5846eb5de3e80370bc1d4) feat: Append previous workflow name as label to resubmitted workflow (#3261) - * [2dae72449](https://github.com/argoproj/argo-workflows/commit/2dae724496a96ce2e0993daea0a3b6a473f784da) feat: Add mode to require Workflows to use workflowTemplateRef (#3149) - * [56694abe2](https://github.com/argoproj/argo-workflows/commit/56694abe27267c1cb855064b44bc7c32d61ca66c) Fixed onexit on workflowtempalteRef (#3263) - * [54dd72c24](https://github.com/argoproj/argo-workflows/commit/54dd72c2439b5a6ef389eab4cb39bd412db9fd42) update mysql yaml port (#3258) - * [fb5026324](https://github.com/argoproj/argo-workflows/commit/fb502632419409e528e23f1ef70e7f610812d175) feat: Configure ArchiveLabelSelector for Workflow Archive (#3249) - * [5467c8995](https://github.com/argoproj/argo-workflows/commit/5467c8995e07e5501d685384e44585fc1b02c6b8) fix(controller): set pod finish timestamp when it is deleted (#3230) - * [4bd33c6c6](https://github.com/argoproj/argo-workflows/commit/4bd33c6c6ce6dcb9f0c85dab40f162608d5f67a6) chore(cli): Add examples of @latest alias for relevant commands. Fixes #3225 (#3242) - * [17108df1c](https://github.com/argoproj/argo-workflows/commit/17108df1cea937f49f099ec26b7a25bd376b16a5) fix: Ensure subscription is closed in log viewer (#3247) - -### Contributors - - * Alex Collins - * Ben Ye - * Jie Zhang - * Pierre Houssin - * Remington Breeze - * Saravanan Balasubramanian - * Simon Behar - * Yuan Tang - -## v2.9.0-rc2 (2020-06-16) - - * [abf02c3ba](https://github.com/argoproj/argo-workflows/commit/abf02c3ba143cbd9f2d42f286b86fa80ed0ecb5b) Update manifests to v2.9.0-rc2 - * [4db1c4c84](https://github.com/argoproj/argo-workflows/commit/4db1c4c8495d0b8e13c718207175273fe98555a2) fix: Support the TTLStrategy for WorkflowTemplateRef (#3239) - * [47f506937](https://github.com/argoproj/argo-workflows/commit/47f5069376f3c61b09ff02ff5729e5c3e6e58e45) feat(logging): Made more controller err/warn logging structured (#3240) - * [ef159f9ad](https://github.com/argoproj/argo-workflows/commit/ef159f9ad6be552de1abf58c3dc4dc6911c49733) feat: Tick CLI Workflow watch even if there are no new events (#3219) - * [ff1627b71](https://github.com/argoproj/argo-workflows/commit/ff1627b71789c42f604c0f83a9a3328d7e6b8248) fix(events): Adds config flag. Reduce number of dupe events emitted. (#3205) - * [eae8f6814](https://github.com/argoproj/argo-workflows/commit/eae8f68144acaf5c2ec0145ef0d136097cca7fcc) feat: Validate CronWorkflows before execution (#3223) - * [4470a8a29](https://github.com/argoproj/argo-workflows/commit/4470a8a29bca9e16ac7e5d7d8c8a2310d0200efa) fix(ui/server): Fix broken label filter functionality on UI due to bug on server. Fix #3226 (#3228) - * [e5e6456be](https://github.com/argoproj/argo-workflows/commit/e5e6456be37b52856205c4f7600a05ffef6daab1) feat(cli): Add --latest flag for argo get command as per #3128 (#3179) - * [34608594b](https://github.com/argoproj/argo-workflows/commit/34608594b98257c4ae47a280831d462bab7c53b4) fix(ui): Correctly update workflow list when workflow are modified/deleted (#3220) - * [a7d8546cf](https://github.com/argoproj/argo-workflows/commit/a7d8546cf9515ea70d686b8c669bf0a1d9b7538d) feat(controller): Improve throughput of many workflows. Fixes #2908 (#2921) - * [15885d3ed](https://github.com/argoproj/argo-workflows/commit/15885d3edc6d4754bc66f950251450eea8f29170) feat(sso): Allow reading SSO clientID from a secret. (#3207) - * [723e9d5f4](https://github.com/argoproj/argo-workflows/commit/723e9d5f40448ae425631fac8af2863a1f1ff1f5) fix: Ensrue image name is present in containers (#3215) - -### Contributors - - * Alex Collins - * Remington Breeze - * Saravanan Balasubramanian - * Simon Behar - * Vlad Losev - -## v2.9.0-rc1 (2020-06-10) - - * [c930d2ec6](https://github.com/argoproj/argo-workflows/commit/c930d2ec6a5ab2a2473951c4500272181bc759be) Update manifests to v2.9.0-rc1 - * [0ee5e1125](https://github.com/argoproj/argo-workflows/commit/0ee5e11253282eb5c36a5163086c20306cc09019) feat: Only process significant pod changes (#3181) - * [c89a81f3a](https://github.com/argoproj/argo-workflows/commit/c89a81f3ad3a76e22b98570a6045fd8eb358dbdb) feat: Add '--schedule' flag to 'argo cron create' (#3199) - * [591f649a3](https://github.com/argoproj/argo-workflows/commit/591f649a306edf826b667d0069ee04cb345dcd26) refactor: Refactor assesDAGPhase logic (#3035) - * [8e1d56cb7](https://github.com/argoproj/argo-workflows/commit/8e1d56cb78f8e039f4dbeea991bdaa1935738130) feat(controller): Add default name for artifact repository ref. (#3060) - * [f1cdba18b](https://github.com/argoproj/argo-workflows/commit/f1cdba18b3ef476e11f02e50a69fc33924158be7) feat(controller): Add `--qps` and `--burst` flags to controller (#3180) - * [b86949f0e](https://github.com/argoproj/argo-workflows/commit/b86949f0e9523e10c69e0f6b10b0f35413a20520) fix: Ensure stable desc/hash for metrics (#3196) - * [04c77f490](https://github.com/argoproj/argo-workflows/commit/04c77f490b00ffc05f74a941f1c9ccf76a5bf789) fix(server): Allow field selection for workflow-event endpoint (fixes #3163) (#3165) - * [a130d488a](https://github.com/argoproj/argo-workflows/commit/a130d488ab69cf4d4d543c7348a45e4cd34f972e) feat(ui): Add drawer with more details for each workflow in Workflow List (#3151) - * [fa84e2032](https://github.com/argoproj/argo-workflows/commit/fa84e203239b35976210a441387d6480d951f034) fix: Do not use alphabetical order if index exists (#3174) - * [138af5977](https://github.com/argoproj/argo-workflows/commit/138af5977b81e619681eb2cfa20fd3891c752510) fix(cli): Sort expanded nodes by index. Closes #3145 (#3146) - * [c42e4d3ae](https://github.com/argoproj/argo-workflows/commit/c42e4d3aeaf4093581d0a5d92b4d7750be205225) feat(metrics): Add node-level resources duration as Argo variable for metrics. Closes #3110 (#3161) - * [edfa5b93f](https://github.com/argoproj/argo-workflows/commit/edfa5b93fb58c0b243e1f019b92f02e846f7b83d) feat(metrics): Report controller error counters via metrics. Closes #3034 (#3144) - * [8831e4ead](https://github.com/argoproj/argo-workflows/commit/8831e4ead39acfe3d49801271a95907a3b737d49) feat(argo-server): Add support for SSO. See #1813 (#2745) - * [b62184c2e](https://github.com/argoproj/argo-workflows/commit/b62184c2e3715fd7ddd9077e11513db25a512c93) feat(cli): More `argo list` and `argo delete` options (#3117) - * [c6565d7c3](https://github.com/argoproj/argo-workflows/commit/c6565d7c3c8c4b40c6725a1f682186e04e0a8f36) fix(controller): Maybe bug with nil woc.wfSpec. Fixes #3121 (#3160) - * [70b56f25b](https://github.com/argoproj/argo-workflows/commit/70b56f25baf78d67253a2f29bd4057279b0e9558) enhancement(ui): Add workflow labels column to workflow list. Fixes #2782 (#3143) - * [a0062adfe](https://github.com/argoproj/argo-workflows/commit/a0062adfe895ee39572db3aa6f259913279c6db3) feat(ui): Add Alibaba Cloud OSS related models in UI (#3140) - * [1469991ce](https://github.com/argoproj/argo-workflows/commit/1469991ce34333697df07ca750adb247b21cc3a9) fix: Update container delete grace period to match Kubernetes default (#3064) - * [df725bbdd](https://github.com/argoproj/argo-workflows/commit/df725bbddac2f3a216010b069363f0344a2f5a80) fix(ui): Input artifacts labelled in UI. Fixes #3098 (#3131) - * [c0d59cc28](https://github.com/argoproj/argo-workflows/commit/c0d59cc283a62f111123728f70c24df5954d98e4) feat: Persist DAG rendering options in local storage (#3126) - * [8715050b4](https://github.com/argoproj/argo-workflows/commit/8715050b441f0fb5c84ae0a0a19695c89bf2e7b9) fix(ui): Fix label error (#3130) - * [1814ea2e4](https://github.com/argoproj/argo-workflows/commit/1814ea2e4a6702eacd567aefd1194bd6aec212ed) fix(item): Support ItemValue.Type == List. Fixes #2660 (#3129) - * [12b72546e](https://github.com/argoproj/argo-workflows/commit/12b72546eb49b8af5b4374577107f30484a6e975) fix: Panic on invalid WorkflowTemplateRef (#3127) - * [09092147c](https://github.com/argoproj/argo-workflows/commit/09092147cf26939e775848d75f687d5c8fc15aa9) fix(ui): Display error message instead of DAG when DAG cannot be rendered. Fixes #3091 (#3125) - * [69c9e5f05](https://github.com/argoproj/argo-workflows/commit/69c9e5f053195e46871176c6a31d646144532c3a) fix: Remove unnecessary panic (#3123) - * [2f3aca898](https://github.com/argoproj/argo-workflows/commit/2f3aca8988cee483f5fac116a8e99cdec7fd89cc) add AppDirect to the list of users (#3124) - * [257355e4c](https://github.com/argoproj/argo-workflows/commit/257355e4c54b8ca37e056e73718a112441faddb4) feat: Add 'submit --from' to CronWorkflow and WorkflowTemplate in UI. Closes #3112 (#3116) - * [6e5dd2e19](https://github.com/argoproj/argo-workflows/commit/6e5dd2e19a3094f88e6f927f786f866eccc5f500) Add Alibaba OSS to the list of supported artifacts (#3108) - * [1967b45b1](https://github.com/argoproj/argo-workflows/commit/1967b45b1465693b71e3a0ccac9563886641694c) support sso (#3079) - * [9229165f8](https://github.com/argoproj/argo-workflows/commit/9229165f83011b3d5b867ac511793f8934bdcfab) feat(ui): Add cost optimisation nudges. (#3089) - * [e88124dbf](https://github.com/argoproj/argo-workflows/commit/e88124dbf64128388cf0e6fa6d30b2f756e57d23) fix(controller): Do not panic of woc.orig in not hydrated. Fixes #3118 (#3119) - * [132b947ad](https://github.com/argoproj/argo-workflows/commit/132b947ad6ba5a5b81e281c469f08cb97748e42d) fix: Differentiate between Fulfilled and Completed (#3083) - * [4de997468](https://github.com/argoproj/argo-workflows/commit/4de9974681034d7bb7223d2131eba1cd0e5d254d) feat: Added Label selector and Field selector in Argo list (#3088) - * [bb2ce9f77](https://github.com/argoproj/argo-workflows/commit/bb2ce9f77894982f5bcae4e772795d0e679bf405) fix: Graceful error handling of malformatted log lines in watch (#3071) - * [4fd27c314](https://github.com/argoproj/argo-workflows/commit/4fd27c314810ae43b39a5c2d36cef2dbbf5691af) build(swagger): Fix Swagger build problems (#3084) - * [fa69c1bb7](https://github.com/argoproj/argo-workflows/commit/fa69c1bb7157e19755eea669bf44434e2bedd157) feat: Add CronWorkflowConditions to report errors (#3055) - * [50ad3cec2](https://github.com/argoproj/argo-workflows/commit/50ad3cec2b002b81e30a5d6975e7dc044a83b301) adds millisecond-level timestamps to argoexec (#2950) - * [6464bd199](https://github.com/argoproj/argo-workflows/commit/6464bd199eff845da66d59d263f2d04479663020) fix(controller): Implement offloading for workflow updates that are re-applied. Fixes #2856 (#2941) - * [6df0b2d35](https://github.com/argoproj/argo-workflows/commit/6df0b2d3538cd1525223c8d85581662ece172cf9) feat: Support Top level workflow template reference (#2912) - * [0709ad28c](https://github.com/argoproj/argo-workflows/commit/0709ad28c3dbd4696404aa942478a7505e9e9a67) feat: Enhanced filters for argo {watch,get,submit} (#2450) - * [2b038ed2e](https://github.com/argoproj/argo-workflows/commit/2b038ed2e61781e5c4b8a796aba4c4afe4850305) feat: Enhanced depends logic (#2673) - * [4c3387b27](https://github.com/argoproj/argo-workflows/commit/4c3387b273d802419a1552345dfb95dd05b8555b) fix: Linters should error if nothing was validated (#3011) - * [51dd05b5f](https://github.com/argoproj/argo-workflows/commit/51dd05b5f16e0554bdd33511f8332f3198604690) fix(artifacts): Explicit archive strategy. Fixes #2140 (#3052) - * [ada2209ef](https://github.com/argoproj/argo-workflows/commit/ada2209ef94e2380c4415cf19a8e321324650405) Revert "fix(artifacts): Allow tar check to be ignored. Fixes #2140 (#3024)" (#3047) - * [38a995b74](https://github.com/argoproj/argo-workflows/commit/38a995b749b83a76b5f1f2542df959898489210b) fix(executor): Properly handle empty resource results, like for a missing get (#3037) - * [a1ac8bcf5](https://github.com/argoproj/argo-workflows/commit/a1ac8bcf548c4f8fcff6b7df25aa61ad9e4c15ed) fix(artifacts): Allow tar check to be ignored. Fixes #2140 (#3024) - * [f12d79cad](https://github.com/argoproj/argo-workflows/commit/f12d79cad9d4a9b2169f634183b6c7837c9e4615) fix(controller)!: Correctly format workflow.creationTimepstamp as RFC3339. Fixes #2974 (#3023) - * [d10e949a0](https://github.com/argoproj/argo-workflows/commit/d10e949a061de541f5312645dfa19c5732a302ff) fix: Consider metric nodes that were created and completed in the same operation (#3033) - * [202d4ab31](https://github.com/argoproj/argo-workflows/commit/202d4ab31a2883d4f2448c309c30404f67761727) fix(executor): Optional input artifacts. Fixes #2990 (#3019) - * [f17e946c4](https://github.com/argoproj/argo-workflows/commit/f17e946c4d006cda4e161380fb5a0ba52dcebfd1) fix(executor): Save script results before artifacts in case of error. Fixes #1472 (#3025) - * [3d216ae6d](https://github.com/argoproj/argo-workflows/commit/3d216ae6d5ad96b996ce40c42793a2031a392bb1) fix: Consider missing optional input/output artifacts with same name (#3029) - * [3717dd636](https://github.com/argoproj/argo-workflows/commit/3717dd636949e4a78e8a6ddee4320e6a98cc3c81) fix: Improve robustness of releases. Fixes #3004 (#3009) - * [9f86a4e94](https://github.com/argoproj/argo-workflows/commit/9f86a4e941ecca4399267f7780fbb2e7ddcd2199) feat(ui): Enable CSP, HSTS, X-Frame-Options. Fixes #2760, #1376, #2761 (#2971) - * [cb71d585c](https://github.com/argoproj/argo-workflows/commit/cb71d585c73c72513aead057d570c279ba46e74b) refactor(metrics)!: Refactor Metric interface (#2979) - * [052e6c519](https://github.com/argoproj/argo-workflows/commit/052e6c5197a6e8b4dfb14d18c2b923ca93fcb84c) Fix isTarball to handle the small gzipped file (#3014) - * [cdcba3c4d](https://github.com/argoproj/argo-workflows/commit/cdcba3c4d6849668238180903e59f37affdff01d) fix(ui): Displays command args correctl pre-formatted. (#3018) - * [cc0fe433a](https://github.com/argoproj/argo-workflows/commit/cc0fe433aebc0397c648ff4ddc8c1f99df042568) fix(events): Correct event API Version. Fixes #2994 (#2999) - * [d5d6f750b](https://github.com/argoproj/argo-workflows/commit/d5d6f750bf9324e8277fc0f05d8214b5dee255cd) feat(controller)!: Updates the resource duration calculation. Fixes #2934 (#2937) - * [fa3801a5d](https://github.com/argoproj/argo-workflows/commit/fa3801a5d89d58208f07977b73a8686e3aa2c3c9) feat(ui): Render 2000+ nodes DAG acceptably. (#2959) - * [f952df517](https://github.com/argoproj/argo-workflows/commit/f952df517bae1f423063d61e7542c4f0c4c667e1) fix(executor/pns): remove sleep before sigkill (#2995) - * [2a9ee21f4](https://github.com/argoproj/argo-workflows/commit/2a9ee21f47dbd36ba1d2020d0939c73fc198b333) feat(ui): Add Suspend and Resume to CronWorkflows in UI (#2982) - * [60d5fdc7f](https://github.com/argoproj/argo-workflows/commit/60d5fdc7f91b675055ab0b1c7f450fa6feb0fac5) fix: Begin counting maxDuration from first child start (#2976) - * [d8cb66e78](https://github.com/argoproj/argo-workflows/commit/d8cb66e785c170030bd503ca4626ab4e6e4f8c6c) feat: Add Argo variable {{retries}} to track retry attempt (#2911) - * [3c4422326](https://github.com/argoproj/argo-workflows/commit/3c4422326dceea456df94a71270df80e9cbf7177) fix: Remove duplicate node event. Fixes #2961 (#2964) - * [d8ab13f24](https://github.com/argoproj/argo-workflows/commit/d8ab13f24031eae58354b9ac1c59bad69968cbe6) fix: Consider Shutdown when assesing DAG Phase for incomplete Retry node (#2966) - * [8a511e109](https://github.com/argoproj/argo-workflows/commit/8a511e109dc55d9f9c7b69614f110290c2536858) fix: Nodes with pods deleted out-of-band should be Errored, not Failed (#2855) - * [5f01c4a59](https://github.com/argoproj/argo-workflows/commit/5f01c4a5945a9d89d5194efbbaaf1d4d2c40532d) Upgraded to Node 14.0.0 (#2816) - * [849d876c8](https://github.com/argoproj/argo-workflows/commit/849d876c835982bbfa814714e713b4d19b35148d) Fixes error with unknown flag: --show-all (#2960) - * [93bf6609c](https://github.com/argoproj/argo-workflows/commit/93bf6609cf407d6cd374a6dd3bc137b1c82e88df) fix: Don't update backoff message to save operations (#2951) - * [3413a5dfa](https://github.com/argoproj/argo-workflows/commit/3413a5dfa7c29711d9bf0d227437a10bf0de9d3b) fix(cli): Remove info logging from watches. Fixes #2955 (#2958) - * [fe9f90191](https://github.com/argoproj/argo-workflows/commit/fe9f90191fac2fb7909c8e0b31c5f3b5a31236c4) fix: Display Workflow finish time in UI (#2896) - * [c8bd0bb82](https://github.com/argoproj/argo-workflows/commit/c8bd0bb82e174cca8d733e7b75748273172efa37) fix(ui): Change default pagination to all and sort workflows (#2943) - * [e3ed686e1](https://github.com/argoproj/argo-workflows/commit/e3ed686e13eacf0174b3e1088fe3cf2eb7706b39) fix(cli): Re-establish watch on EOF (#2944) - * [673553729](https://github.com/argoproj/argo-workflows/commit/673553729e12d4ad83387eba68b3cbfb0aea8fe4) fix(swagger)!: Fixes invalid K8S definitions in `swagger.json`. Fixes #2888 (#2907) - * [023f23389](https://github.com/argoproj/argo-workflows/commit/023f233896ac90fdf1529f747c56ab19028b6a9c) fix(argo-server)!: Implement missing instanceID code. Fixes #2780 (#2786) - * [7b0739e0b](https://github.com/argoproj/argo-workflows/commit/7b0739e0b846cff7d2bc3340e88859ab655d25ff) Fix typo (#2939) - * [20d69c756](https://github.com/argoproj/argo-workflows/commit/20d69c75662653523dc6276e7e57084ec1c7334f) Detect ctrl key when a link is clicked (#2935) - * [f32cec310](https://github.com/argoproj/argo-workflows/commit/f32cec31027b7112a9a51069c2ad7b1cfbedd960) fix default null value for timestamp column - MySQL 5.7 (#2933) - * [99858ea53](https://github.com/argoproj/argo-workflows/commit/99858ea53d79e964530f4a3840936d5da79585d9) feat(controller): Remove the excessive logging of node data (#2925) - * [03ad694c4](https://github.com/argoproj/argo-workflows/commit/03ad694c42a782dc9f45f7ff0ba94b32cbbfa2f1) feat(cli): Refactor `argo list --chunk-size` and add `argo archive list --chunk-size`. Fixes #2820 (#2854) - * [a06cb5e0e](https://github.com/argoproj/argo-workflows/commit/a06cb5e0e02d7b480d20713e9c67f83d09fa2b24) fix: remove doubled entry in server cluster role deployment (#2904) - * [c71116dde](https://github.com/argoproj/argo-workflows/commit/c71116ddedafde0f2931fbd489b9b17b8bd81e65) feat: Windows Container Support. Fixes #1507 and #1383 (#2747) - * [3afa7b2f1](https://github.com/argoproj/argo-workflows/commit/3afa7b2f1b4ecb9e64b2c9dee1e91dcf548f82c3) fix(ui): Use LogsViewer for container logs (#2825) - * [7d8818ca2](https://github.com/argoproj/argo-workflows/commit/7d8818ca2a335f5cb200d9b088305d032cacd020) fix(controller): Workflow stop and resume by node didn't properly support offloaded nodes. Fixes #2543 (#2548) - * [db52e7bac](https://github.com/argoproj/argo-workflows/commit/db52e7bac649a7b101f846e7f7354d10a45c9e62) fix(controller): Add mutex to nameEntryIDMap in cron controller. Fix #2638 (#2851) - * [9a33aa2d3](https://github.com/argoproj/argo-workflows/commit/9a33aa2d3c0ffedf33625bd3339c2006937c0953) docs(users): Adding Habx to the users list (#2781) - * [9e4ac9b3c](https://github.com/argoproj/argo-workflows/commit/9e4ac9b3c8c7028c9759278931a76c5f26481e53) feat(cli): Tolerate deleted workflow when running `argo delete`. Fixes #2821 (#2877) - * [a0035dd58](https://github.com/argoproj/argo-workflows/commit/a0035dd58609d744a6fa304e51d61474f25c817d) fix: ConfigMap syntax (#2889) - * [56143eb1e](https://github.com/argoproj/argo-workflows/commit/56143eb1e1e80275da2742135ef147e563cae737) feat(ui): Add pagination to workflow list. Fixes #1080 and #976 (#2863) - * [e378ca470](https://github.com/argoproj/argo-workflows/commit/e378ca470f1a97d624d3aceb3c53b55155fd02a9) fix: Cannot create WorkflowTemplate with un-supplied inputs (#2869) - * [c3e30c505](https://github.com/argoproj/argo-workflows/commit/c3e30c5052b9544d363c4c73315be5136b593f9a) fix(swagger): Generate correct Swagger for inline objects. Fixes #2835 (#2837) - * [c0143d347](https://github.com/argoproj/argo-workflows/commit/c0143d3478c6ff2ec5138f7c6b272fc8e36c6734) feat: Add metric retention policy (#2836) - * [f03cda61a](https://github.com/argoproj/argo-workflows/commit/f03cda61a73243eea225fe4d0a49f2ada0523d0d) Update getting-started.md (#2872) - -### Contributors - - * Adam Gilat - * Alex Collins - * Alex Stein - * Daisuke Taniwaki - * Daniel Sutton - * Florent Clairambault - * Huan-Cheng Chang - * Kannappan Sirchabesan - * Leonardo Luz - * Markus Lippert - * Matt Brant - * Mike Seddon - * Pradip Caulagi - * Remington Breeze - * Romain GUICHARD - * Saravanan Balasubramanian - * Sascha Grunert - * Simon Behar - * Stephen Steiner - * William - * Youngjoon Lee - * Yuan Tang - * dmayle - * mark9white - * shibataka000 - -## v2.8.2 (2020-06-22) - - * [c15e817b2](https://github.com/argoproj/argo-workflows/commit/c15e817b2fa61456ae6612800017df6f094ff5a0) Update manifests to v2.8.2 - * [8a151aec6](https://github.com/argoproj/argo-workflows/commit/8a151aec6538c9442cf2380c2544ba3efb60ff60) Update manifests to 2.8.2 - * [123e94ac4](https://github.com/argoproj/argo-workflows/commit/123e94ac4827a4aa48d67045ed4e7fb6a9c15b4c) fix(controller): set pod finish timestamp when it is deleted (#3230) - * [68a606615](https://github.com/argoproj/argo-workflows/commit/68a6066152ac5299fc689f4277b36799df9ca38a) fix: Begin counting maxDuration from first child start (#2976) - -### Contributors - - * Jie Zhang - * Simon Behar - -## v2.8.1 (2020-05-28) - - * [0fff4b21c](https://github.com/argoproj/argo-workflows/commit/0fff4b21c21c5ff5adbb5ff62c68e67edd95d6b8) Update manifests to v2.8.1 - * [05dd78623](https://github.com/argoproj/argo-workflows/commit/05dd786231a713690349826079bd2fcb1cdb7c1b) fix(item): Support ItemValue.Type == List. Fixes #2660 (#3129) - * [3b840201b](https://github.com/argoproj/argo-workflows/commit/3b840201b2be6402d247ee12b9993061317653b7) Fix test - * [41689c55a](https://github.com/argoproj/argo-workflows/commit/41689c55ac388c6634cf46ee1154f31df556e59e) fix: Graceful error handling of malformatted log lines in watch (#3071) - * [79aeca1f3](https://github.com/argoproj/argo-workflows/commit/79aeca1f3faa62678115e92c0ecb0b0e7670392a) fix: Linters should error if nothing was validated (#3011) - * [c977d8bba](https://github.com/argoproj/argo-workflows/commit/c977d8bbab61b282375dcac598eabc558751b386) fix(executor): Properly handle empty resource results, like for a missing get (#3037) - * [1a01c8042](https://github.com/argoproj/argo-workflows/commit/1a01c804212a069e3b82bf0e1fceb12141e101f6) fix: Consider metric nodes that were created and completed in the same operation (#3033) - * [6065b7ed7](https://github.com/argoproj/argo-workflows/commit/6065b7ed7688b3fc4fb9c46b449a8dab50da0a21) fix: Consider missing optional input/output artifacts with same name (#3029) - * [acb0f1c16](https://github.com/argoproj/argo-workflows/commit/acb0f1c1679ee6ec686bb5ff266bc20c4344f3e2) fix: Cannot create WorkflowTemplate with un-supplied inputs (#2869) - * [5b04ccce7](https://github.com/argoproj/argo-workflows/commit/5b04ccce7199e02f6054c47c9d17f071af9d6c1d) fix(controller)!: Correctly format workflow.creationTimepstamp as RFC3339. Fixes #2974 (#3023) - * [319ee46d3](https://github.com/argoproj/argo-workflows/commit/319ee46d3927b2cfe1c7e2aec38e01e24ebd3b4f) fix(events): Correct event API Version. Fixes #2994 (#2999) - -### Contributors - - * Alex Collins - * Saravanan Balasubramanian - * Simon Behar - * dmayle - -## v2.8.0 (2020-05-11) - - * [8f6961747](https://github.com/argoproj/argo-workflows/commit/8f696174746ed01b9bf1941ad03da62d312df641) Update manifests to v2.8.0 - -### Contributors - - * Alex Collins - -## v2.8.0-rc4 (2020-05-06) - - * [ee0dc575d](https://github.com/argoproj/argo-workflows/commit/ee0dc575dc7d2187e0e97e768c7b58538958608b) Update manifests to v2.8.0-rc4 - * [3a85610a4](https://github.com/argoproj/argo-workflows/commit/3a85610a42e4ca4ed4e506fd2017791464db9c59) fix(cli): Remove info logging from watches. Fixes #2955 (#2958) - * [29c7780dc](https://github.com/argoproj/argo-workflows/commit/29c7780dc9311dc734a4f09f683253648ce75dd0) make codegen - * [265666bf7](https://github.com/argoproj/argo-workflows/commit/265666bf7b62d421e939a373ee0c676103d631cd) fix(cli): Re-establish watch on EOF (#2944) - * [fef4e9689](https://github.com/argoproj/argo-workflows/commit/fef4e968900365a79fd623efa054671b66dc8f1e) fix(swagger)!: Fixes invalid K8S definitions in `swagger.json`. Fixes #2888 (#2907) - * [249309aa7](https://github.com/argoproj/argo-workflows/commit/249309aa7c6d483cb622589afa417cb3b7f4965f) fix(swagger): Generate correct Swagger for inline objects. Fixes #2835 (#2837) - * [ad28a9c95](https://github.com/argoproj/argo-workflows/commit/ad28a9c955562bbf3f3cb3346118e7c39c84ffe0) fix(controller): Workflow stop and resume by node didn't properly support offloaded nodes. Fixes #2543 (#2548) - * [d9fca8f08](https://github.com/argoproj/argo-workflows/commit/d9fca8f08ffc3a16ee085352831f9b208131661d) fix(controller): Add mutex to nameEntryIDMap in cron controller. Fix #2638 (#2851) - -### Contributors - - * Alex Collins - * mark9white - * shibataka000 - -## v2.8.0-rc3 (2020-04-28) - - * [2f153b215](https://github.com/argoproj/argo-workflows/commit/2f153b215666b3dc30c65931faeedba749207110) Update manifests to v2.8.0-rc3 - * [d66224e12](https://github.com/argoproj/argo-workflows/commit/d66224e12613c36f8fa91956509fad9fc450af74) fix: Don't error when deleting already-deleted WFs (#2866) - * [d7f8e0c47](https://github.com/argoproj/argo-workflows/commit/d7f8e0c4742b62d9271b6272a8f87c53a4fddea2) fix(CLI): Re-establish workflow watch on disconnect. Fixes #2796 (#2830) - * [31358d6e2](https://github.com/argoproj/argo-workflows/commit/31358d6e255e28f20803575f5ee0fdf2015ecb68) feat(CLI): Add -v and --verbose to Argo CLI (#2814) - * [90743353f](https://github.com/argoproj/argo-workflows/commit/90743353fcaf46dae04872935e95ce858e1792b3) feat: Expose workflow.serviceAccountName as global variable (#2838) - * [f07f7bf61](https://github.com/argoproj/argo-workflows/commit/f07f7bf61147b3444255117c26bfd38261220e95) note that tar.gz'ing output artifacts is optional (#2797) - * [b956ec65f](https://github.com/argoproj/argo-workflows/commit/b956ec65f372194e0f110e672a2ad50bd51a10d8) fix: Add Step node outputs to global scope (#2826) - * [52ff43b54](https://github.com/argoproj/argo-workflows/commit/52ff43b54a76f934ae3b491c74e2350fbd2298f2) fix: Artifact panic on unknown artifact. Fixes #2824 (#2829) - * [554fd06c9](https://github.com/argoproj/argo-workflows/commit/554fd06c9daf7ce1147f949d397e489d508c58ba) fix: Enforce metric naming validation (#2819) - -### Contributors - - * Alex Collins - * Michael Crenshaw - * Mike Seddon - * Simon Behar - -## v2.8.0-rc2 (2020-04-23) - - * [4126d22b6](https://github.com/argoproj/argo-workflows/commit/4126d22b6f49e347ae1a75dd3ad6f484bee30f11) Update manifests to v2.8.0-rc2 - * [ce6b23e92](https://github.com/argoproj/argo-workflows/commit/ce6b23e92e193ceafd28b81e6f6bafc7cf644c21) revert - * [0dbd78ff2](https://github.com/argoproj/argo-workflows/commit/0dbd78ff223e592f8761f1334f952e97c9e6ac48) feat: Add TLS support. Closes #2764 (#2766) - * [510e11b63](https://github.com/argoproj/argo-workflows/commit/510e11b639e0b797cc4253d84e96fb070691b7ab) fix: Allow empty strings in valueFrom.default (#2805) - * [399591c96](https://github.com/argoproj/argo-workflows/commit/399591c96ed588cfbc96d78268ce35812fcd465b) fix: Don't configure Sonar on CI for release branches - * [d7f41ac8d](https://github.com/argoproj/argo-workflows/commit/d7f41ac8df15b8ed1e68b2e4f44d64418e4c4000) fix: Print correct version in logs. (#2806) - * [e0f2697e2](https://github.com/argoproj/argo-workflows/commit/e0f2697e252e7b62842af3b56f924f324f2c48ec) fix(controller): Include global params when using withParam (#2757) - * [1ea286eb2](https://github.com/argoproj/argo-workflows/commit/1ea286eb237ed86bfde5a4c954927b335ab588f2) fix: ClusterWorkflowTemplate RBAC for argo server (#2753) - * [1f14f2a5f](https://github.com/argoproj/argo-workflows/commit/1f14f2a5f6054a88f740c6799d443216f694f08f) feat(archive): Implement data retention. Closes #2273 (#2312) - * [d0cc7764f](https://github.com/argoproj/argo-workflows/commit/d0cc7764fe477465ac2c76de9cc406bbf2aac807) feat: Display argo-server version in `argo version` and in UI. (#2740) - * [8de572813](https://github.com/argoproj/argo-workflows/commit/8de572813ee9f028cf8e06834f45a3592bc73f14) feat(controller): adds Kubernetes node name to workflow node detail in web UI and CLI output. Implements #2540 (#2732) - * [52fa5fdee](https://github.com/argoproj/argo-workflows/commit/52fa5fdee9f021b73eca30a199c65a3760462bd9) MySQL config fix (#2681) - * [43d9eebb4](https://github.com/argoproj/argo-workflows/commit/43d9eebb479242ef23e84135bbe4b9dd252dea46) fix: Rename Submittable API endpoint to `submit` (#2778) - * [69333a87b](https://github.com/argoproj/argo-workflows/commit/69333a87b0ae411972f7f25b196db989500bbe0c) Fix template scope tests (#2779) - * [905e0b993](https://github.com/argoproj/argo-workflows/commit/905e0b99312e579dcd8aa8036c2ee57df6fa7a29) fix: Naming error in Makefile (#2774) - * [7cb2fd177](https://github.com/argoproj/argo-workflows/commit/7cb2fd17765aad691eda25ca4c5acecb89f84394) fix: allow non path output params (#2680) - -### Contributors - - * Alex Collins - * Alex Stein - * Daisuke Taniwaki - * Fabio Rigato - * Saravanan Balasubramanian - * Simon Behar - -## v2.8.0-rc1 (2020-04-20) - - * [4a73f45c3](https://github.com/argoproj/argo-workflows/commit/4a73f45c38a07e9e517c39ed5611d386bcf518bd) Update manifests to v2.8.0-rc1 - * [1c8318eb9](https://github.com/argoproj/argo-workflows/commit/1c8318eb92d17fa2263675cabce5134d3f1e37a2) fix: Add compatiblity mode to templateReference (#2765) - * [7975952b0](https://github.com/argoproj/argo-workflows/commit/7975952b0aa3ac84ea4559b302236598d1d47954) fix: Consider expanded tasks in getTaskFromNode (#2756) - * [bc421380c](https://github.com/argoproj/argo-workflows/commit/bc421380c9dfce1b8a25950d2bdc6a71b2e74a2d) fix: Fix template resolution in UI (#2754) - * [391c0f78a](https://github.com/argoproj/argo-workflows/commit/391c0f78a496dbe0334686dfcabde8c9af8a474f) Make phase and templateRef available for unsuspend and retry selectors (#2723) - * [a6fa3f71f](https://github.com/argoproj/argo-workflows/commit/a6fa3f71fa6bf742cb2fa90292180344f3744def) fix: Improve cookie security. Fixes #2759 (#2763) - * [57f0183cd](https://github.com/argoproj/argo-workflows/commit/57f0183cd194767af8f9bcb5fb84ab083c1661c3) Fix typo on the documentation. It causes error unmarshaling JSON: while (#2730) - * [c6ef1ff19](https://github.com/argoproj/argo-workflows/commit/c6ef1ff19e1c3f74b4ef146be37e74bd0b748cd7) feat(manifests): add name on workflow-controller-metrics service port (#2744) - * [06c4bd60c](https://github.com/argoproj/argo-workflows/commit/06c4bd60cf2dc85362b3370acd44e4bc3977dcbc) fix: Make ClusterWorkflowTemplate optional for namespaced Installation (#2670) - * [4ea43e2d6](https://github.com/argoproj/argo-workflows/commit/4ea43e2d63385211cc0a29c2aa1b237797a62f71) fix: Children of onExit nodes are also onExit nodes (#2722) - * [3f1b66672](https://github.com/argoproj/argo-workflows/commit/3f1b6667282cf3d1b7944f7fdc075ef0f1b8ff36) feat: Add Kustomize as supported install option. Closes #2715 (#2724) - * [691459ed3](https://github.com/argoproj/argo-workflows/commit/691459ed3591f72251dc230982d7b03dc3d6f9db) fix: Error pending nodes w/o Pods unless resubmitPendingPods is set (#2721) - * [3c8149fab](https://github.com/argoproj/argo-workflows/commit/3c8149fabfcb84bc57d1973c10fe6dbce96232a0) Fix typo (#2741) - * [98f60e798](https://github.com/argoproj/argo-workflows/commit/98f60e7985ebd77d42ff99c6d6e1276048fb07f6) feat: Added Workflow SubmitFromResource API (#2544) - * [6253997a7](https://github.com/argoproj/argo-workflows/commit/6253997a7e25f3ad9fd3c322ea9ca9ad0b710c83) fix: Reset all conditions when resubmitting (#2702) - * [e7c67de30](https://github.com/argoproj/argo-workflows/commit/e7c67de30df90ba7bbd649a2833dc6efed8a18de) fix: Maybe fix watch. Fixes #2678 (#2719) - * [cef6dfb6a](https://github.com/argoproj/argo-workflows/commit/cef6dfb6a25445624f864863da45c36380049e6d) fix: Print correct version string. (#2713) - * [e9589d28a](https://github.com/argoproj/argo-workflows/commit/e9589d28a5dbc7cb620f206bd1fee457a8b29dfe) feat: Increase pod workers and workflow workers both to 32 by default. (#2705) - * [54f5be361](https://github.com/argoproj/argo-workflows/commit/54f5be361f597d45c97469095a2e5cb5678436a8) style: Camelcase "clusterScope" (#2720) - * [db6d1416a](https://github.com/argoproj/argo-workflows/commit/db6d1416a11dbd9d963a2df6740908a1d8086ff6) fix: Flakey TestNestedClusterWorkflowTemplate testcase failure (#2613) - * [b4fd4475c](https://github.com/argoproj/argo-workflows/commit/b4fd4475c2661f12a92ba48a71b52067536044fe) feat(ui): Add a YAML panel to view the workflow manifest. (#2700) - * [65d413e5d](https://github.com/argoproj/argo-workflows/commit/65d413e5d68b2f1667ef09f3c5938a07c3442fe8) build(ui): Fix compression of UI package. (#2704) - * [4129528d4](https://github.com/argoproj/argo-workflows/commit/4129528d430be282099e94d7e98d61e40d9c78ba) fix: Don't use docker cache when building release images (#2707) - * [9d93e971a](https://github.com/argoproj/argo-workflows/commit/9d93e971a66d8f50ad92ff9e15175c6bbfe292c4) Update getting-started.md (#2697) - * [2737c0abf](https://github.com/argoproj/argo-workflows/commit/2737c0abf77f1555c9a9a59e564d0f1242d2656e) feat: Allow to pass optional flags to resource template (#1779) - * [c1a2fc7ca](https://github.com/argoproj/argo-workflows/commit/c1a2fc7ca8be7b9286ec01a12a185d8d4360b9f6) Update running-locally.md - fixing incorrect protoc install (#2689) - * [a1226c461](https://github.com/argoproj/argo-workflows/commit/a1226c4616ad327400b37be19703e65a31919248) fix: Enhanced WorkflowTemplate and ClusterWorkflowTemplate validation to support Global Variables (#2644) - * [c21cc2f31](https://github.com/argoproj/argo-workflows/commit/c21cc2f31fead552cbab5f4664d20d56cf291619) fix a typo (#2669) - * [9430a513f](https://github.com/argoproj/argo-workflows/commit/9430a513fe7b5587048a5e74d3c9abc9e36e4304) fix: Namespace-related validation in UI (#2686) - * [f3eeca6e3](https://github.com/argoproj/argo-workflows/commit/f3eeca6e3b72f27f86678de840d1b6b7497e9473) feat: Add exit code as output variable (#2111) - * [9f95e23a4](https://github.com/argoproj/argo-workflows/commit/9f95e23a4dc9104da2218c66c66c4475285dfc3e) fix: Report metric emission errors via Conditions (#2676) - * [c67f5ff55](https://github.com/argoproj/argo-workflows/commit/c67f5ff55b8e41b465e481d7a38d54d551c07ee4) fix: Leaf task with continueOn should not fail DAG (#2668) - * [9c6351fa6](https://github.com/argoproj/argo-workflows/commit/9c6351fa643f76a7cf36eef3b80cff9bf5880463) feat: Allow step restart on workflow retry. Closes #2334 (#2431) - * [e2d0aa23a](https://github.com/argoproj/argo-workflows/commit/e2d0aa23ab4ee9b91b018bb556959c60981586e2) fix: Consider offloaded and compressed node in retry and resume (#2645) - * [4a3ca930e](https://github.com/argoproj/argo-workflows/commit/4a3ca930ef1d944dfd8659d5886d8abc7f6ce42f) fix: Correctly emit events. Fixes #2626 (#2629) - * [41f91e18a](https://github.com/argoproj/argo-workflows/commit/41f91e18a4f65d8a6626782ebc8920ca02b3cc86) fix: Add DAG as default in UI filter and reorder (#2661) - * [f138ada68](https://github.com/argoproj/argo-workflows/commit/f138ada68ba0b3c46f546bfef574e212833759ac) fix: DAG should not fail if its tasks have continueOn (#2656) - * [4c452d5f7](https://github.com/argoproj/argo-workflows/commit/4c452d5f7287179b6a7967fc7d60fb0837bd36ca) fix: Don't attempt to resolve artifacts if task is going to be skipped (#2657) - * [2cb596da3](https://github.com/argoproj/argo-workflows/commit/2cb596da3dac3c5683ed44e7a363c014e73a38a5) Storage region should be specified (#2538) - * [4c1b07772](https://github.com/argoproj/argo-workflows/commit/4c1b077725a22d183ecdb24f2f147fee0a6e320c) fix: Sort log entries. (#2647) - * [268fc4619](https://github.com/argoproj/argo-workflows/commit/268fc46197ac411339c78018f05d76e102447eef) docs: Added doc generator code (#2632) - * [d58b7fc39](https://github.com/argoproj/argo-workflows/commit/d58b7fc39620fb24e40bb4f55f69c4e0fb5fc017) fix: Add input paremeters to metric scope (#2646) - * [cc3af0b83](https://github.com/argoproj/argo-workflows/commit/cc3af0b83381e2d4a8da1959c36fd0a466c414ff) fix: Validating Item Param in Steps Template (#2608) - * [6c685c5ba](https://github.com/argoproj/argo-workflows/commit/6c685c5baf281116340b7b0708f8a29764d72c47) fix: allow onExit to run if wf exceeds activeDeadlineSeconds. Fixes #2603 (#2605) - * [ffc43ce97](https://github.com/argoproj/argo-workflows/commit/ffc43ce976973c7c20d6c58d7b27a28969ae206f) feat: Added Client validation on Workflow/WFT/CronWF/CWFT (#2612) - * [24655cd93](https://github.com/argoproj/argo-workflows/commit/24655cd93246e2a25dc858238116f7acec45ea42) feat(UI): Move Workflow parameters to top of submit (#2640) - * [0a3b159ab](https://github.com/argoproj/argo-workflows/commit/0a3b159ab87bd313896174f8464ffd277b14264c) Use error equals (#2636) - * [a78ecb7fe](https://github.com/argoproj/argo-workflows/commit/a78ecb7fe040c0040fb12731997351a02e0808a0) docs(users): Add CoreWeave and ConciergeRender (#2641) - * [14be46707](https://github.com/argoproj/argo-workflows/commit/14be46707f4051db71e9495472e842fbb1eb5ea0) fix: Fix logs part 2 (#2639) - * [4da6f4f3e](https://github.com/argoproj/argo-workflows/commit/4da6f4f3ee75b2e50206381dad1809d5a21c6cce) feat: Add 'outputs.result' to Container templates (#2584) - * [212c6d75f](https://github.com/argoproj/argo-workflows/commit/212c6d75fa7e5e8d568e80992d1924a2c51cd631) fix: Support minimal mysql version 5.7.8 (#2633) - * [8facaceeb](https://github.com/argoproj/argo-workflows/commit/8facaceeb3515d804c3fd276b1802dbd6cf773e8) refactor: Refactor Template context interfaces (#2573) - * [812813a28](https://github.com/argoproj/argo-workflows/commit/812813a288608e196006d4b8369702d020e61dc4) fix: fix test cases (#2631) - * [ed028b25f](https://github.com/argoproj/argo-workflows/commit/ed028b25f6c925a02596f90d722283856b003ff8) fix: Fix logging problems. See #2589 (#2595) - * [d95926fe4](https://github.com/argoproj/argo-workflows/commit/d95926fe40e48932c25a0f70c671ad99f4149505) fix: Fix WorkflowTemplate icons to be more cohesive (#2607) - * [5a1ac2035](https://github.com/argoproj/argo-workflows/commit/5a1ac20352ab6042958f49a59d0f5227329f654c) fix: Fixes panic in toWorkflow method (#2604) - * [232bb115e](https://github.com/argoproj/argo-workflows/commit/232bb115eba8e2667653fdbdc9831bee112daa85) fix(error handling): use Errorf instead of New when throwing errors with formatted text (#2598) - * [eeb2f97be](https://github.com/argoproj/argo-workflows/commit/eeb2f97be5c8787180af9f32f2d5e8baee63ed2f) fix(controller): dag continue on failed. Fixes #2596 (#2597) - * [21c737793](https://github.com/argoproj/argo-workflows/commit/21c7377932825cd30f67a840d30853f4a48951fa) fix: Fixes lint errors (#2594) - * [59f746e1a](https://github.com/argoproj/argo-workflows/commit/59f746e1a551180d11e57676f8a2a384b3741599) feat: UI enhancement for Cluster Workflow Template (#2525) - * [0801a4284](https://github.com/argoproj/argo-workflows/commit/0801a4284a948bbeced83852af27a019e7b33535) fix(cli): Show lint errors of all files (#2552) - * [79217bc89](https://github.com/argoproj/argo-workflows/commit/79217bc89e892ee82bdd5018b2bba65425924d36) feat(archive): allow specifying a compression level (#2575) - * [88d261d7f](https://github.com/argoproj/argo-workflows/commit/88d261d7fa72faea19745de588c19de45e7fab88) fix: Use outputs of last child instead of retry node itslef (#2565) - * [5c08292e4](https://github.com/argoproj/argo-workflows/commit/5c08292e4ee388c1c5ca5291c601d50b2b3374e7) style: Correct the confused logic (#2577) - * [3d1461445](https://github.com/argoproj/argo-workflows/commit/3d14614459d50b96838fcfd83809ee29499e2917) fix: Fix bug in deleting pods. Fixes #2571 (#2572) - * [cb739a689](https://github.com/argoproj/argo-workflows/commit/cb739a6897591969b959bd2feebd8ded97b9cb33) feat: Cluster scoped workflow template (#2451) - * [c63e3d40b](https://github.com/argoproj/argo-workflows/commit/c63e3d40be50479ca3c9a7325bfeb5fd9d31fa7c) feat: Show workflow duration in the index UI page (#2568) - * [ffbb3b899](https://github.com/argoproj/argo-workflows/commit/ffbb3b899912f7af888d8216bd2ab55bc7106880) fix: Fixes empty/missing CM. Fixes #2285 (#2562) - * [49801e32f](https://github.com/argoproj/argo-workflows/commit/49801e32f1624ba20926f1b07a6ddafa2f162301) chore(docker): upgrade base image for executor image (#2561) - * [c4efb8f8b](https://github.com/argoproj/argo-workflows/commit/c4efb8f8b6e28a591794c018f5e61f55dd7d75e3) Add Riskified to the user list (#2558) - * [8b92d33eb](https://github.com/argoproj/argo-workflows/commit/8b92d33eb2f2de3b593459140576ea8eaff8fb4b) feat: Create K8S events on node completion. Closes #2274 (#2521) - -### Contributors - - * Adam Gilat - * Alex Collins - * Alex Stein - * CWen - * Derek Wang - * Dustin Specker - * Gabriele Santomaggio - * Heikki Kesa - * Marek Čermák - * Michael Crenshaw - * Niklas Hansson - * Omer Kahani - * Peng Li - * Peter Salanki - * Romain Di Giorgio - * Saravanan Balasubramanian - * Simon Behar - * Song Juchao - * Vardan Manucharyan - * Wei Yan - * lueenavarro - * mark9white - * tunoat - -## v2.7.7 (2020-05-06) - - * [54154c61e](https://github.com/argoproj/argo-workflows/commit/54154c61eb4fe9f052b04328fb00128568dc20d0) Update manifests to v2.7.7 - * [1254dd440](https://github.com/argoproj/argo-workflows/commit/1254dd440816dfb376b815032d02e1094850c5df) fix(cli): Re-establish watch on EOF (#2944) - * [42d622b63](https://github.com/argoproj/argo-workflows/commit/42d622b63bc2517e24217b580e5ee4f1e3abb015) fix(controller): Add mutex to nameEntryIDMap in cron controller. Fix #2638 (#2851) - * [51ce1063d](https://github.com/argoproj/argo-workflows/commit/51ce1063db2595221743eb42c274ed95d922bd48) fix: Print correct version in logs. (#2806) - -### Contributors - - * Alex Collins - * shibataka000 - -## v2.7.6 (2020-04-28) - - * [70facdb67](https://github.com/argoproj/argo-workflows/commit/70facdb67207dbe115a9029e365f8e974e6156bc) Update manifests to v2.7.6 - * [15f0d741d](https://github.com/argoproj/argo-workflows/commit/15f0d741d64af5de3672ff7860c008152823654b) Fix TestGlobalScope - * [3a906e655](https://github.com/argoproj/argo-workflows/commit/3a906e655780276b0b016ff751a9deb27fe5e77c) Fix build - * [b6022a9bd](https://github.com/argoproj/argo-workflows/commit/b6022a9bdde84d6cebe914c4015ce0255d0e9587) fix(controller): Include global params when using withParam (#2757) - * [728287e89](https://github.com/argoproj/argo-workflows/commit/728287e8942b30acf02bf8ca60b5ec66e1a21058) fix: allow non path output params (#2680) - * [83fa94065](https://github.com/argoproj/argo-workflows/commit/83fa94065dc60254a4b6873d5621eabd7f711498) fix: Add Step node outputs to global scope (#2826) - * [462f6af0c](https://github.com/argoproj/argo-workflows/commit/462f6af0c4aa08d535a1ee1982be87e94f62acf1) fix: Enforce metric naming validation (#2819) - * [ed9f87c55](https://github.com/argoproj/argo-workflows/commit/ed9f87c55c30e7807a2c40e32942aa13e9036f12) fix: Allow empty strings in valueFrom.default (#2805) - * [4d1690c43](https://github.com/argoproj/argo-workflows/commit/4d1690c437a686ad24c8d62dec5ea725e233876d) fix: Children of onExit nodes are also onExit nodes (#2722) - * [d40036c3b](https://github.com/argoproj/argo-workflows/commit/d40036c3b28dbdcc2799e23c92a6c002f8d64514) fix(CLI): Re-establish workflow watch on disconnect. Fixes #2796 (#2830) - * [f1a331a1c](https://github.com/argoproj/argo-workflows/commit/f1a331a1c1639a6070bab51fb473cd37601fc474) fix: Artifact panic on unknown artifact. Fixes #2824 (#2829) - -### Contributors - - * Alex Collins - * Daisuke Taniwaki - * Simon Behar - -## v2.7.5 (2020-04-20) - - * [ede163e1a](https://github.com/argoproj/argo-workflows/commit/ede163e1af83cfce29b519038be8127664421329) Update manifests to v2.7.5 - * [ab18ab4c0](https://github.com/argoproj/argo-workflows/commit/ab18ab4c07c0881af30a0e7900922d9fdad4d546) Hard-code build opts - * [ca77a5e62](https://github.com/argoproj/argo-workflows/commit/ca77a5e62e40d6d877700295cd37b51ebe8e0d6c) Resolve conflicts - * [dacfa20fe](https://github.com/argoproj/argo-workflows/commit/dacfa20fec70adfc6777b1d24d8b44c302d3bf46) fix: Error pending nodes w/o Pods unless resubmitPendingPods is set (#2721) - * [e014c6e0c](https://github.com/argoproj/argo-workflows/commit/e014c6e0ce67140f3d63a2a29206f304155386b6) Run make manifests - * [ee107969d](https://github.com/argoproj/argo-workflows/commit/ee107969da597ef383185b96eaf6d9aca289a7f6) fix: Improve cookie security. Fixes #2759 (#2763) - * [e8cd8d776](https://github.com/argoproj/argo-workflows/commit/e8cd8d7765fedd7f381845d28804f5aa172f4d62) fix: Consider expanded tasks in getTaskFromNode (#2756) - * [ca5cdc47a](https://github.com/argoproj/argo-workflows/commit/ca5cdc47aab8d7c7acadec678df3edf159615641) fix: Reset all conditions when resubmitting (#2702) - * [80dd96af7](https://github.com/argoproj/argo-workflows/commit/80dd96af702d9002af480f3659a35914c4d71d14) feat: Add Kustomize as supported install option. Closes #2715 (#2724) - * [306a1189b](https://github.com/argoproj/argo-workflows/commit/306a1189b1a6b734a55d9c5a1ec83ce39c939f8d) fix: Maybe fix watch. Fixes #2678 (#2719) - * [5b05519d1](https://github.com/argoproj/argo-workflows/commit/5b05519d15874faf357da6e2e85ba97bd86d7a29) fix: Print correct version string. (#2713) - -### Contributors - - * Alex Collins - * Simon Behar - -## v2.7.4 (2020-04-16) - - * [50b209ca1](https://github.com/argoproj/argo-workflows/commit/50b209ca14c056fb470ebb8329e255304dd5be90) Update manifests to v2.7.4 - * [a8ecd5139](https://github.com/argoproj/argo-workflows/commit/a8ecd513960c2810a7789e43f958517f0884ebd7) chore(docker): upgrade base image for executor image (#2561) - -### Contributors - - * Dustin Specker - * Simon Behar - -## v2.7.3 (2020-04-15) - - * [66bd04252](https://github.com/argoproj/argo-workflows/commit/66bd0425280c801c06f21cf9a4bed46ee6f1e660) go mod tidy - * [a8cd8b834](https://github.com/argoproj/argo-workflows/commit/a8cd8b83473ed3825392b9b4c6bd0090e9671e2a) Update manifests to v2.7.3 - * [b879f5c62](https://github.com/argoproj/argo-workflows/commit/b879f5c629f0cf5aeaa928f5b483c71ecbdedd55) fix: Don't use docker cache when building release images (#2707) - * [60fe5bd3c](https://github.com/argoproj/argo-workflows/commit/60fe5bd3cd9d205246dd96f1f06f2ff818853dc6) fix: Report metric emission errors via Conditions (#2676) - * [04f79f2bb](https://github.com/argoproj/argo-workflows/commit/04f79f2bbde4e650a37a45ca87cd047cd0fdbaa9) fix: Leaf task with continueOn should not fail DAG (#2668) - -### Contributors - - * Alex Collins - * Simon Behar - -## v2.7.2 (2020-04-10) - - * [c52a65aa6](https://github.com/argoproj/argo-workflows/commit/c52a65aa62426f5e874e1d3f1058af15c43eb35f) Update manifests to v2.7.2 - * [180f9e4d1](https://github.com/argoproj/argo-workflows/commit/180f9e4d103782c910ea7a06c463d5de1b0a4ec4) fix: Consider offloaded and compressed node in retry and resume (#2645) - * [a28fc4fbe](https://github.com/argoproj/argo-workflows/commit/a28fc4fbea0e315e75d1fbddc052aeab7f011e51) fix: allow onExit to run if wf exceeds activeDeadlineSeconds. Fixes #2603 (#2605) - * [6983e56b2](https://github.com/argoproj/argo-workflows/commit/6983e56b26f805a152deee256c408325294945c2) fix: Support minimal mysql version 5.7.8 (#2633) - * [f99fa50fb](https://github.com/argoproj/argo-workflows/commit/f99fa50fbf46a60f1b99e7b2916a92cacd52a40a) fix: Add DAG as default in UI filter and reorder (#2661) - * [0a2c0d1a0](https://github.com/argoproj/argo-workflows/commit/0a2c0d1a0e9010a612834154784f54379aa6d87c) fix: DAG should not fail if its tasks have continueOn (#2656) - * [b7a8f6e69](https://github.com/argoproj/argo-workflows/commit/b7a8f6e69bbba6c312df7df188ac78a1a83c6278) fix: Don't attempt to resolve artifacts if task is going to be skipped (#2657) - * [910db6655](https://github.com/argoproj/argo-workflows/commit/910db665513cba47bbbbb4d8810936db2a6d5038) fix: Add input paremeters to metric scope (#2646) - * [05e5ce6db](https://github.com/argoproj/argo-workflows/commit/05e5ce6db97418b248dec274ec5c3dd13585442b) fix: Sort log entries. (#2647) - * [b35f23372](https://github.com/argoproj/argo-workflows/commit/b35f2337221e77f5deaad79c8b376cb41eeb1fb4) fix: Fix logs part 2 (#2639) - * [733ace4dd](https://github.com/argoproj/argo-workflows/commit/733ace4dd989b124dfaae99fc784f3d10d1ccb34) fix: Fix logging problems. See #2589 (#2595) - * [e99309b8e](https://github.com/argoproj/argo-workflows/commit/e99309b8eb80f94773816e9134f153529cfa8e63) remove file - -### Contributors - - * Alex Collins - * Derek Wang - * Simon Behar - * mark9white - -## v2.7.1 (2020-04-07) - - * [2a3f59c10](https://github.com/argoproj/argo-workflows/commit/2a3f59c10ae260a460b6ad97a0cadd8667d4b488) Update manifests to v2.7.1 - * [25f673dfa](https://github.com/argoproj/argo-workflows/commit/25f673dfad7a32c2337c3696d639e8762f6f6eb8) fix: Fixes panic in toWorkflow method (#2604) - * [8c799b1f0](https://github.com/argoproj/argo-workflows/commit/8c799b1f002da0088b37159265aa78db43257894) make codegen - * [d02c46200](https://github.com/argoproj/argo-workflows/commit/d02c46200d0856bdfb8980325e3d7ed7b07c2d2a) fix(error handling): use Errorf instead of New when throwing errors with formatted text (#2598) - * [c0d50ca2e](https://github.com/argoproj/argo-workflows/commit/c0d50ca2ef43d3d5f9ae37e7f594db43dde9d361) fix(controller): dag continue on failed. Fixes #2596 (#2597) - * [12ac33877](https://github.com/argoproj/argo-workflows/commit/12ac33877dbb64a74ef910de2e4182eb18ff5395) fix: Fixes lint errors (#2594) - * [fd49ef2d0](https://github.com/argoproj/argo-workflows/commit/fd49ef2d04051f7a04c61ac41be1e5d2079b5725) fix(cli): Show lint errors of all files (#2552) - * [e697dbc5e](https://github.com/argoproj/argo-workflows/commit/e697dbc5ec29c5d6e370f5ebf89b12b94c7a6ac2) fix: Use outputs of last child instead of retry node itslef (#2565) - * [7623a4f36](https://github.com/argoproj/argo-workflows/commit/7623a4f3640c68e6893238a78ca30ca2f2790f8c) style: Correct the confused logic (#2577) - * [f619f8ff1](https://github.com/argoproj/argo-workflows/commit/f619f8ff1f7cfa19062ef1dca77177efa8338076) fix: Fix bug in deleting pods. Fixes #2571 (#2572) - * [4c623bee7](https://github.com/argoproj/argo-workflows/commit/4c623bee7ff51feaf3a6012258eb062043f0941d) feat: Show workflow duration in the index UI page (#2568) - * [f97be738b](https://github.com/argoproj/argo-workflows/commit/f97be738b25ba7b29064198801a366d86593c8ae) fix: Fixes empty/missing CM. Fixes #2285 (#2562) - * [2902e144d](https://github.com/argoproj/argo-workflows/commit/2902e144ddba2f8c5a93cdfc8e2437c04705065b) feat: Add node type and phase filter to UI (#2555) - * [fb74ba1ce](https://github.com/argoproj/argo-workflows/commit/fb74ba1ce27b96473411c2c5cfe9a86972af589e) fix: Separate global scope processing from local scope building (#2528) - -### Contributors - - * Alex Collins - * Heikki Kesa - * Niklas Hansson - * Peng Li - * Simon Behar - * Vardan Manucharyan - * Wei Yan - -## v2.7.0 (2020-03-31) - - * [4d1175eb6](https://github.com/argoproj/argo-workflows/commit/4d1175eb68f6578ed5d599f877be9b4855d33ce9) Update manifests to v2.7.0 - * [618b6dee4](https://github.com/argoproj/argo-workflows/commit/618b6dee4de973b3f3ef1d1164a44b9cb176355e) fix: Fixes --kubeconfig flag. Fixes #2492 (#2553) - -### Contributors - - * Alex Collins - -## v2.7.0-rc4 (2020-03-30) - - * [479fa48a9](https://github.com/argoproj/argo-workflows/commit/479fa48a963b16903e11475b947b6a860d7a68ba) Update manifests to v2.7.0-rc4 - * [15a3c9903](https://github.com/argoproj/argo-workflows/commit/15a3c990359c40d791be64a34736e2a1ffa40178) feat: Report SpecWarnings in status.conditions (#2541) - * [93b6be619](https://github.com/argoproj/argo-workflows/commit/93b6be619523ec3d9d8c52c75d9fa540e0272c7f) fix(archive): Fix bug that prevents listing archive workflows. Fixes … (#2523) - * [b4c9c54f7](https://github.com/argoproj/argo-workflows/commit/b4c9c54f79d902f2372192f017192fa519800fd8) fix: Omit config key in configure artifact document. (#2539) - * [864bf1e56](https://github.com/argoproj/argo-workflows/commit/864bf1e56812b0ea1434b3952073a3e15dd9f046) fix: Show template on its own field in CLI (#2535) - * [5e1e78295](https://github.com/argoproj/argo-workflows/commit/5e1e78295df4df0205a47adcedde6f1d5915af95) fix: Validate CronWorkflow before creation (#2532) - * [c92413393](https://github.com/argoproj/argo-workflows/commit/c92413393404bd4caeb00606b3ba8775eeadf231) fix: Fix wrong assertions (#2531) - * [67fe04bb7](https://github.com/argoproj/argo-workflows/commit/67fe04bb78ac7b402bb6ef5b58d5cca33ecd74db) Revert "fix: fix template scope tests (#2498)" (#2526) - * [30542be7a](https://github.com/argoproj/argo-workflows/commit/30542be7a121cf8774352bf987ee658b5d8b96c8) chore(docs): Update docs for useSDKCreds (#2518) - * [e2cc69880](https://github.com/argoproj/argo-workflows/commit/e2cc6988018e50956c05ed20c665ead01766278d) feat: More control over resuming suspended nodes Fixes #1893 (#1904) - * [b1ad163ac](https://github.com/argoproj/argo-workflows/commit/b1ad163ac17312d103c03bf6a88069f1b055ea7d) fix: fix template scope tests (#2498) - -### Contributors - - * Alex Collins - * Daisuke Taniwaki - * Ejiah - * Simon Behar - * Zach Aller - * mark9white - -## v2.7.0-rc3 (2020-03-25) - - * [2bb0a7a4f](https://github.com/argoproj/argo-workflows/commit/2bb0a7a4fd7bbf3da12ac449c3d20f8d5baf0995) Update manifests to v2.7.0-rc3 - * [661d1b674](https://github.com/argoproj/argo-workflows/commit/661d1b6748b25488b288811dc5c0089b49b75a52) Increase client gRPC max size to match server (#2514) - * [d8aa477f7](https://github.com/argoproj/argo-workflows/commit/d8aa477f7f5089505df5fd26560f53f508f5b29f) fix: Fix potential panic (#2516) - * [1afb692ee](https://github.com/argoproj/argo-workflows/commit/1afb692eeb6a63cb0539cbc6762d8219b2b2dd00) fix: Allow runtime resolution for workflow parameter names (#2501) - * [243ea338d](https://github.com/argoproj/argo-workflows/commit/243ea338de767a39947f5fb4450321083a6f9c67) fix(controller): Ensure we copy any executor securityContext when creating wait containers; fixes #2512 (#2510) - * [6e8c7badc](https://github.com/argoproj/argo-workflows/commit/6e8c7badcfa3f2eb7d5cb76f229e0570f3325f61) feat: Extend workflowDefaults to full Workflow and clean up docs and code (#2508) - * [06cfc1294](https://github.com/argoproj/argo-workflows/commit/06cfc1294a5a913a8b23bc4337ffa019717c4af2) feat: Native Google Cloud Storage support for artifact. Closes #1911 (#2484) - * [999b1e1d9](https://github.com/argoproj/argo-workflows/commit/999b1e1d9a6c9d69def35fd43d01b03c75748e62) fix: Read ConfigMap before starting servers (#2507) - * [e5bd6a7ed](https://github.com/argoproj/argo-workflows/commit/e5bd6a7ed35a4d5ed75023719814541423affc48) fix(controller): Updates GetTaskAncestry to skip visited nod. Fixes #1907 (#1908) - * [e636000bc](https://github.com/argoproj/argo-workflows/commit/e636000bc457d654d487e065c1bcacd15ed75a74) feat: Updated arm64 support patch (#2491) - * [559cb0059](https://github.com/argoproj/argo-workflows/commit/559cb00596acbcc9a6a9cce001ca25fdcc561b2b) feat(ui): Report resources duration in UI. Closes #2460 (#2489) - * [09291d9d5](https://github.com/argoproj/argo-workflows/commit/09291d9d59e1fe51b1622b90ac18c6a5985b6a85) feat: Add default field in parameters.valueFrom (#2500) - * [33cd4f2b8](https://github.com/argoproj/argo-workflows/commit/33cd4f2b86e8b0993563d70c6b0d6f0f91b14535) feat(config): Make configuration mangement easier. Closes #2463 (#2464) - -### Contributors - - * Alex Collins - * Derek Wang - * Simon Behar - * StoneHuang - * Xin Wang - * mark9white - * vatine - -## v2.7.0-rc2 (2020-03-23) - - * [240d7ad92](https://github.com/argoproj/argo-workflows/commit/240d7ad9298c60a69d4ce056e3d83ef9283a83ec) Update manifests to v2.7.0-rc2 - * [487ed4258](https://github.com/argoproj/argo-workflows/commit/487ed425840dc5698a4ef3a3c8f214b6c08949cc) feat: Logging the Pod Spec in controller log (#2476) - * [96c80e3e2](https://github.com/argoproj/argo-workflows/commit/96c80e3e2c6eb6867e360dde3dea97047b963c2f) fix(cli): Rearrange the order of chunk size argument in list command. Closes #2420 (#2485) - * [53a10564a](https://github.com/argoproj/argo-workflows/commit/53a10564aebc6ee17eb8e3e121b4c36b2a334b87) feat(usage): Report resource duration. Closes #1066 (#2219) - * [063d9bc65](https://github.com/argoproj/argo-workflows/commit/063d9bc657b00e23ce7722d5d08ca69347fe7205) Revert "feat: Add support for arm64 platform (#2364)" (#2482) - * [735d25e9d](https://github.com/argoproj/argo-workflows/commit/735d25e9d719b409a7517685bcb4148278bef5a1) fix: Build image with SHA tag when a git tag is not available (#2479) - * [e1c9f7afc](https://github.com/argoproj/argo-workflows/commit/e1c9f7afcb4f685f615235ae1d0b6000add93635) fix ParallelSteps child type so replacements happen correctly; fixes argoproj-labs/argo-client-gen#5 (#2478) - * [55c315db2](https://github.com/argoproj/argo-workflows/commit/55c315db2e87fe28dcc26f49f4ee969bae9c7ea1) feat: Add support for IRSA and aws default provider chain. (#2468) - * [c724c7c1a](https://github.com/argoproj/argo-workflows/commit/c724c7c1afca646e09c0cb82acf8b59f8c413780) feat: Add support for arm64 platform (#2364) - * [315dc164d](https://github.com/argoproj/argo-workflows/commit/315dc164dcd24d0443b49ac95d49eb06b2c2a64f) feat: search archived wf by startat. Closes #2436 (#2473) - -### Contributors - - * Alex Collins - * Derek Wang - * Huan-Cheng Chang - * Michael Crenshaw - * Saravanan Balasubramanian - * Simon Behar - * Xin Wang - * Zach Aller - -## v2.7.0-rc1 (2020-03-18) - - * [55702224c](https://github.com/argoproj/argo-workflows/commit/55702224cdb1da698b84fdcfb7ae1199afde8eee) Update manifests to v2.7.0-rc1 - * [23d230bd5](https://github.com/argoproj/argo-workflows/commit/23d230bd54e04af264a0977545db365a2c0d6a6d) feat(ui): add Env to Node Container Info pane. Closes #2471 (#2472) - * [10a0789b9](https://github.com/argoproj/argo-workflows/commit/10a0789b9477b1b6c1b7adda71101925989d02de) fix: ParallelSteps swagger.json (#2459) - * [a59428e72](https://github.com/argoproj/argo-workflows/commit/a59428e72c092e12b17c2bd8f22ee2e86eec043f) fix: Duration must be a string. Make it a string. (#2467) - * [47bc6f3b7](https://github.com/argoproj/argo-workflows/commit/47bc6f3b7450895aa35f9275b326077bb08453b5) feat: Add `argo stop` command (#2352) - * [14478bc07](https://github.com/argoproj/argo-workflows/commit/14478bc07f42ae9ee362cc1531b1cf00d923211d) feat(ui): Add the ability to have links to logging facility in UI. Closes #2438 (#2443) - * [a85f62c5e](https://github.com/argoproj/argo-workflows/commit/a85f62c5e8ee1a51f5fa8fd715ebdf4140d2483d) feat: Custom, step-level, and usage metrics (#2254) - * [64ac02980](https://github.com/argoproj/argo-workflows/commit/64ac02980ea641d92f22328442e5a12893600d67) fix: Deprecate template.{template,templateRef,arguments} (#2447) - * [6cb79e4e5](https://github.com/argoproj/argo-workflows/commit/6cb79e4e5414277932e5cf755761cec4cda7e1b7) fix: Postgres persistence SSL Mode (#1866) (#1867) - * [2205c0e16](https://github.com/argoproj/argo-workflows/commit/2205c0e162c93645a5ae1d883aec6ae33fec3c8f) fix(controller): Updates to add condition to workflow status. Fixes #2421 (#2453) - * [9d96ab2ff](https://github.com/argoproj/argo-workflows/commit/9d96ab2ffd6cec9fc65f0182234e103664ab9cd5) fix: make dir if needed (#2455) - * [3448ccf91](https://github.com/argoproj/argo-workflows/commit/3448ccf91cbff2e3901a99e23e57a0e1ad97044c) fix: Delete PVCs unless WF Failed/Errored (#2449) - * [782bc8e7c](https://github.com/argoproj/argo-workflows/commit/782bc8e7c5d1fd102f1a16d07f209aed3bfdc689) fix: Don't error when optional artifacts are not found (#2445) - * [32fc2f782](https://github.com/argoproj/argo-workflows/commit/32fc2f78212d031f99f1dfc5ad3a3642617ce7e7) feat: Support workflow templates submission. Closes #2007 (#2222) - * [050a143d7](https://github.com/argoproj/argo-workflows/commit/050a143d7639ad38dc01a685edce536917409a37) fix(archive): Fix edge-cast error for archiving. Fixes #2427 (#2434) - * [9455c1b88](https://github.com/argoproj/argo-workflows/commit/9455c1b88d85f80091aa4fd2c8d4dc53b6cc73f8) doc: update CHANGELOG.md (#2425) - * [1baa7ee4e](https://github.com/argoproj/argo-workflows/commit/1baa7ee4ec7149afe789d73ed6e64abfe13387a7) feat(ui): cache namespace selection. Closes #2439 (#2441) - * [91d29881f](https://github.com/argoproj/argo-workflows/commit/91d29881f41642273fe0494bef70f2b9c41350e2) feat: Retry pending nodes (#2385) - * [30332b14f](https://github.com/argoproj/argo-workflows/commit/30332b14fb1043e22a66db594f1af252c5932853) fix: Allow numbers in steps.args.params.value (#2414) - * [e9a06dde2](https://github.com/argoproj/argo-workflows/commit/e9a06dde297e9f907d10ec88da93fbb90df5ebaf) feat: instanceID support for argo server. Closes #2004 (#2365) - * [3f8be0cd4](https://github.com/argoproj/argo-workflows/commit/3f8be0cd48963958c493e7669a1d03bb719b375a) fix "Unable to retry workflow" on argo-server (#2409) - * [135088284](https://github.com/argoproj/argo-workflows/commit/135088284acd1ced004374d20928c017fbf9cac7) fix: Check child node status before backoff in retry (#2407) - * [b59419c9f](https://github.com/argoproj/argo-workflows/commit/b59419c9f58422f60c7d5185c89b4d55ac278660) fix: Build with the correct version if you check out a specific version (#2423) - * [184c36530](https://github.com/argoproj/argo-workflows/commit/184c3653085bc8821bdcd65f5476fbe24f24b00e) fix: Remove lazy workflow template (#2417) - * [20d6e27bd](https://github.com/argoproj/argo-workflows/commit/20d6e27bdf11389f23b2efe1be4ef737f333221d) Update CONTRIBUTING.md (#2410) - * [f2ca045e1](https://github.com/argoproj/argo-workflows/commit/f2ca045e1cad03d5ec7566ff7200fd8ca575ec5d) feat: Allow WF metadata spec on Cron WF (#2400) - * [068a43362](https://github.com/argoproj/argo-workflows/commit/068a43362b2088f53d408623bc7ab078e0e7a9d0) fix: Correctly report version. Fixes #2374 (#2402) - * [e19a398c8](https://github.com/argoproj/argo-workflows/commit/e19a398c810fada879facd624a7663501306e1ef) Update pull_request_template.md (#2401) - * [175b164c3](https://github.com/argoproj/argo-workflows/commit/175b164c33aee7fe2873df60915a881502ec9163) Change font family for class yaml (#2394) - * [d11947558](https://github.com/argoproj/argo-workflows/commit/d11947558bc758e5102238162036650890731ec6) fix: Don't display Retry Nodes in UI if len(children) == 1 (#2390) - * [1d21d3f56](https://github.com/argoproj/argo-workflows/commit/1d21d3f5600feca4b63e3dc4b1d94d2830fa6e24) fix(doc strings): Fix bug related documentation/clean up of default configurations #2331 (#2388) - * [42200fad4](https://github.com/argoproj/argo-workflows/commit/42200fad45b4925b8f4aac48a580e6e369de2ad4) fix(controller): Mount volumes defined in script templates. Closes #1722 (#2377) - * [96af36d85](https://github.com/argoproj/argo-workflows/commit/96af36d85d70d4721b1ac3e6e0ef14db65e7aec3) fix: duration must be a string (#2380) - * [7bf081926](https://github.com/argoproj/argo-workflows/commit/7bf0819267543808d80acaa5f39f40c1fdba511e) fix: Say no logs were outputted when pod is done (#2373) - * [847c3507d](https://github.com/argoproj/argo-workflows/commit/847c3507dafdd3ff2cd1acca4669c1a54a680ee2) fix(ui): Removed tailLines from EventSource (#2330) - * [3890a1243](https://github.com/argoproj/argo-workflows/commit/3890a12431bfacc83cc75d862f956ddfbc1d2a37) feat: Allow for setting default configurations for workflows, Fixes #1923, #2044 (#2331) - * [81ab53859](https://github.com/argoproj/argo-workflows/commit/81ab538594ad0428a97e99f34b18041f31a1c753) Update readme (#2379) - * [918102733](https://github.com/argoproj/argo-workflows/commit/91810273318ab3ea84ecf73b9d0a6f1ba7f43c2a) feat: Log version (structured) on component start-up (#2375) - * [5b6b82578](https://github.com/argoproj/argo-workflows/commit/5b6b8257890d3c7aa93d8e98b10090add08a22e1) fix(docker): fix streaming of combined stdout/stderr (#2368) - * [974383130](https://github.com/argoproj/argo-workflows/commit/9743831306714cc85b762487ac070f77e25f85d6) fix: Restart server ConfigMap watch when closed (#2360) - * [12386fc60](https://github.com/argoproj/argo-workflows/commit/12386fc6029f5533921c75797455efc62e4cc9ce) fix: rerun codegen after merging OSS artifact support (#2357) - * [40586ed5c](https://github.com/argoproj/argo-workflows/commit/40586ed5c3a539d2e13f8a34509a40367563874a) fix: Always validate templates (#2342) - * [897db8943](https://github.com/argoproj/argo-workflows/commit/897db89434079fa3b3b902253d1c624c39af1422) feat: Add support for Alibaba Cloud OSS artifact (#1919) - * [7e2dba036](https://github.com/argoproj/argo-workflows/commit/7e2dba03674219ec35e88b2ce785fdf120f855fd) feat(ui): Circles for nodes (#2349) - * [7ae4ec78f](https://github.com/argoproj/argo-workflows/commit/7ae4ec78f627b620197a323b190fa33c31ffcbcc) docker: remove NopCloser from the executor. (#2345) - * [5895b3642](https://github.com/argoproj/argo-workflows/commit/5895b3642a691629b6c8aa145cf17627a227665f) feat: Expose workflow.paramteres with JSON string of all params (#2341) - * [a9850b43b](https://github.com/argoproj/argo-workflows/commit/a9850b43b16e05d9f74f52c789a8475d493f4c92) Fix the default (#2346) - * [c3763d34e](https://github.com/argoproj/argo-workflows/commit/c3763d34ed02bc63d166e8ef4f2f724786a2cf7c) fix: Simplify completion detection logic in DAGs (#2344) - * [d8a9ea09b](https://github.com/argoproj/argo-workflows/commit/d8a9ea09be395241664d929e8dbca7d02aecb049) fix(auth): Fixed returning expired Auth token for GKE (#2327) - * [6fef04540](https://github.com/argoproj/argo-workflows/commit/6fef0454073fb60b4dd6216accef07f5195ec7e9) fix: Add timezone support to startingDeadlineSeconds (#2335) - * [a66c8802c](https://github.com/argoproj/argo-workflows/commit/a66c8802c7d0dbec9b13d408b91655e41531a97a) feat: Allow Worfklows to be submitted as files from UI (#2340) - * [8672b97f1](https://github.com/argoproj/argo-workflows/commit/8672b97f134dacb553592c367399229891aaf5c8) fix(Dockerfile): Using `--no-install-recommends` (Optimization) (#2329) - * [c3fe1ae1b](https://github.com/argoproj/argo-workflows/commit/c3fe1ae1b3ad662bc94a4b46e72f20c957dd4475) fix(ui): fixed worflow UI refresh. Fixes ##2337 (#2338) - * [d7690e32f](https://github.com/argoproj/argo-workflows/commit/d7690e32faf2ac5842468831daf1443283703c25) feat(ui): Adds ability zoom and hide successful steps. POC (#2319) - * [e9e13d4cb](https://github.com/argoproj/argo-workflows/commit/e9e13d4cbbc0f456c2d1dafbb1a95739127f6ab4) feat: Allow retry strategy on non-leaf nodes, eg for step groups. Fixes #1891 (#1892) - * [62e6db826](https://github.com/argoproj/argo-workflows/commit/62e6db826ea4e0a02ac839bc59ec5f70ce3b9b29) feat: Ability to include or exclude fields in the response (#2326) - * [52ba89ad4](https://github.com/argoproj/argo-workflows/commit/52ba89ad4911fd4c7b13fd6dbc7f019971354ea0) fix(swagger): Fix the broken swagger. (#2317) - * [1c77e864a](https://github.com/argoproj/argo-workflows/commit/1c77e864ac004f9cc6aff0e204ea9fd4b056c84b) fix(swagger): Fix the broken swagger. (#2317) - * [aa0523469](https://github.com/argoproj/argo-workflows/commit/aa05234694bc79e649e02adcc9790778cef0154d) feat: Support workflow level poddisruptionbudge for workflow pods #1728 (#2286) - * [5dcb84bb5](https://github.com/argoproj/argo-workflows/commit/5dcb84bb549429ba5f46a21873e873a2c1c5bf67) chore(cli): Clean-up code. Closes #2117 (#2303) - * [e49dd8c4f](https://github.com/argoproj/argo-workflows/commit/e49dd8c4f9f69551be7e31c2044fef043d2992b2) chore(cli): Migrate `argo logs` to use API client. See #2116 (#2177) - * [5c3d9cf93](https://github.com/argoproj/argo-workflows/commit/5c3d9cf93079ecbbfb024ea273d6e57e56c2506d) chore(cli): Migrate `argo wait` to use API client. See #2116 (#2282) - * [baf03f672](https://github.com/argoproj/argo-workflows/commit/baf03f672728a6ed8b2aeb986d84ce35e9d7717a) fix(ui): Provide a link to archived logs. Fixes #2300 (#2301) - -### Contributors - - * Aaron Curtis - * Alex Collins - * Antoine Dao - * Antonio Macías Ojeda - * Daisuke Taniwaki - * Derek Wang - * EDGsheryl - * Huan-Cheng Chang - * Michael Crenshaw - * Mingjie Tang - * Niklas Hansson - * Pascal VanDerSwalmen - * Pratik Raj - * Roman Galeev - * Saradhi Sreegiriraju - * Saravanan Balasubramanian - * Simon Behar - * Theodore Messinezis - * Tristan Colgate-McFarlane - * fsiegmund - * mark9white - * tkilpela - -## v2.6.4 (2020-04-15) - - * [e6caf9845](https://github.com/argoproj/argo-workflows/commit/e6caf9845976c9c61e5dc66842c30fd41bde952b) Update manifests to v2.6.4 - * [5aeb3ecf3](https://github.com/argoproj/argo-workflows/commit/5aeb3ecf3b58708722243692017ef562636a2d14) fix: Don't use docker cache when building release images (#2707) - -### Contributors - - * Alex Collins - * Simon Behar - -## v2.6.3 (2020-03-16) - - * [2e8ac609c](https://github.com/argoproj/argo-workflows/commit/2e8ac609cba1ad3d69c765dea19bc58ea4b8a8c3) Update manifests to v2.6.3 - * [9633bad1d](https://github.com/argoproj/argo-workflows/commit/9633bad1d0b9084a1094b8524cac06b7407268e7) fix: Delete PVCs unless WF Failed/Errored (#2449) - * [a0b933a0e](https://github.com/argoproj/argo-workflows/commit/a0b933a0ed03a8ee89087f7d24305aa161872290) fix: Don't error when optional artifacts are not found (#2445) - * [d1513e68b](https://github.com/argoproj/argo-workflows/commit/d1513e68b17af18469930556762e880d656d2584) fix: Allow numbers in steps.args.params.value (#2414) - * [9c608e50a](https://github.com/argoproj/argo-workflows/commit/9c608e50a51bfb2101482144086f35c157fc5204) fix: Check child node status before backoff in retry (#2407) - * [8ad643c40](https://github.com/argoproj/argo-workflows/commit/8ad643c402bb68ee0f549966e2ed55633af98fd2) fix: Say no logs were outputted when pod is done (#2373) - * [60fcfe902](https://github.com/argoproj/argo-workflows/commit/60fcfe902a8f376bef096a3dcd58466ba0f7a164) fix(ui): Removed tailLines from EventSource (#2330) - * [6ec81d351](https://github.com/argoproj/argo-workflows/commit/6ec81d351f6dfb8a6441d4793f5b8203c4a1b0bd) fix "Unable to retry workflow" on argo-server (#2409) - * [642ccca24](https://github.com/argoproj/argo-workflows/commit/642ccca249598e754fa99cdbf51f5d8a452d4e76) fix: Build with the correct version if you check out a specific version (#2423) - -### Contributors - - * Alex Collins - * EDGsheryl - * Simon Behar - * tkilpela - -## v2.6.2 (2020-03-12) - - * [be0a0bb46](https://github.com/argoproj/argo-workflows/commit/be0a0bb46ba50ed4d48ab2fd74c81216d4558b56) Update manifests to v2.6.2 - * [09ec9a0df](https://github.com/argoproj/argo-workflows/commit/09ec9a0df76b7234f50e4a6ccecdd14c2c27fc02) fix(docker): fix streaming of combined stdout/stderr (#2368) - * [64b6f3a48](https://github.com/argoproj/argo-workflows/commit/64b6f3a48865e466f8efe58d923187ab0fbdd550) fix: Correctly report version. Fixes #2374 (#2402) - -### Contributors - - * Alex Collins - -## v2.6.1 (2020-03-04) - - * [842739d78](https://github.com/argoproj/argo-workflows/commit/842739d7831cc5b417c4f524ed85288408a32bbf) Update manifests to v2.6.1 - * [64c6aa43e](https://github.com/argoproj/argo-workflows/commit/64c6aa43e34a25674180cbd5073a72f634df99cd) fix: Restart server ConfigMap watch when closed (#2360) - * [9ff429aa4](https://github.com/argoproj/argo-workflows/commit/9ff429aa4eea32330194968fda2a2386aa252644) fix: Always validate templates (#2342) - * [51c3ad335](https://github.com/argoproj/argo-workflows/commit/51c3ad3357fa621fddb77f154f1411a817d1623f) fix: Simplify completion detection logic in DAGs (#2344) - * [3de7e5139](https://github.com/argoproj/argo-workflows/commit/3de7e5139b55f754624acd50da3852874c82fd76) fix(auth): Fixed returning expired Auth token for GKE (#2327) - * [fa2a30233](https://github.com/argoproj/argo-workflows/commit/fa2a302336afab94d357c379c4849d772edc1915) fix: Add timezone support to startingDeadlineSeconds (#2335) - * [a9b6a254a](https://github.com/argoproj/argo-workflows/commit/a9b6a254ab2312737bef9756159a05e31b52d781) fix(ui): fixed worflow UI refresh. Fixes ##2337 (#2338) - * [793c072ed](https://github.com/argoproj/argo-workflows/commit/793c072edba207ae12bd07d7b47e827cec8d914e) docker: remove NopCloser from the executor. (#2345) - -### Contributors - - * Alex Collins - * Derek Wang - * Saravanan Balasubramanian - * Simon Behar - * Tristan Colgate-McFarlane - * fsiegmund - -## v2.6.0 (2020-02-28) - - * [5d3bdd566](https://github.com/argoproj/argo-workflows/commit/5d3bdd56607eea962183a9e45009e3d08fafdf9b) Update manifests to v2.6.0 - -### Contributors - - * Alex Collins - -## v2.6.0-rc3 (2020-02-25) - - * [fc24de462](https://github.com/argoproj/argo-workflows/commit/fc24de462b9b7aa5882ee2ecc2051853c919da37) Update manifests to v2.6.0-rc3 - * [b59471655](https://github.com/argoproj/argo-workflows/commit/b5947165564246a3c55375500f3fc1aea4dc6966) feat: Create API clients (#2218) - * [214c45153](https://github.com/argoproj/argo-workflows/commit/214c451535ebeb6e68f1599c2c0a4a4d174ade25) fix(controller): Get correct Step or DAG name. Fixes #2244 (#2304) - * [c4d264661](https://github.com/argoproj/argo-workflows/commit/c4d2646612d190ec73f38ec840259110a9ce89e0) fix: Remove active wf from Cron when deleted (#2299) - * [0eff938d6](https://github.com/argoproj/argo-workflows/commit/0eff938d62764abffcfdc741dfaca5fd6c8ae53f) fix: Skip empty withParam steps (#2284) - * [636ea443c](https://github.com/argoproj/argo-workflows/commit/636ea443c38869beaccfff19f4b72dd23755b2ff) chore(cli): Migrate `argo terminate` to use API client. See #2116 (#2280) - * [d0a9b528e](https://github.com/argoproj/argo-workflows/commit/d0a9b528e383a1b9ea737e0f919c93969d3d393b) chore(cli): Migrate `argo template` to use API client. Closes #2115 (#2296) - * [f69a6c5fa](https://github.com/argoproj/argo-workflows/commit/f69a6c5fa487d3b6c2d5383aa588695d6dcdb6de) chore(cli): Migrate `argo cron` to use API client. Closes #2114 (#2295) - * [80b9b590e](https://github.com/argoproj/argo-workflows/commit/80b9b590ebca1dbe69c5c7df0dd1c2f1feae5eea) chore(cli): Migrate `argo retry` to use API client. See #2116 (#2277) - -### Contributors - - * Alex Collins - * Derek Wang - * Simon Behar - -## v2.6.0-rc2 (2020-02-21) - - * [9f7ef614f](https://github.com/argoproj/argo-workflows/commit/9f7ef614fb8a4291d64c6a4374910edb67678da9) Update manifests to v2.6.0-rc2 - * [cdbc61945](https://github.com/argoproj/argo-workflows/commit/cdbc61945e09ae4dab8a56a085d050a0c358b896) fix(sequence): broken in 2.5. Fixes #2248 (#2263) - * [0d3955a7f](https://github.com/argoproj/argo-workflows/commit/0d3955a7f617c58f74c2892894036dfbdebaa5aa) refactor(cli): 2x simplify migration to API client. See #2116 (#2290) - * [df8493a1c](https://github.com/argoproj/argo-workflows/commit/df8493a1c05d3bac19a8f95f608d5543ba96ac82) fix: Start Argo server with out Configmap #2285 (#2293) - * [51cdf95b1](https://github.com/argoproj/argo-workflows/commit/51cdf95b18c8532f0bdb72c7ca20d56bdafc3a60) doc: More detail for namespaced installation (#2292) - * [a73026976](https://github.com/argoproj/argo-workflows/commit/a730269767bdd10c4a9c5901c7e73f6bb25429c2) build(swagger): Fix argo-server swagger so version does not change. (#2291) - * [47b4fc284](https://github.com/argoproj/argo-workflows/commit/47b4fc284df3cff9dfb4ea6622a0236bf1613096) fix(cli): Reinstate `argo wait`. Fixes #2281 (#2283) - * [1793887b9](https://github.com/argoproj/argo-workflows/commit/1793887b95446d341102b81523931403e30ef0f7) chore(cli): Migrate `argo suspend` and `argo resume` to use API client. See #2116 (#2275) - * [1f3d2f5a0](https://github.com/argoproj/argo-workflows/commit/1f3d2f5a0c9d772d7b204b13529f56bc33703a45) chore(cli): Update `argo resubmit` to support client API. See #2116 (#2276) - * [c33f6cda3](https://github.com/argoproj/argo-workflows/commit/c33f6cda39a3be40cc2e829c4c8d0b4c54704896) fix(archive): Fix bug in migrating cluster name. Fixes #2272 (#2279) - * [fb0acbbff](https://github.com/argoproj/argo-workflows/commit/fb0acbbffb0a7c754223e516f55a40b957277fe4) fix: Fixes double logging in UI. Fixes #2270 (#2271) - * [acf37c2db](https://github.com/argoproj/argo-workflows/commit/acf37c2db0d69def2045a6fc0f37a2b9db0c41fe) fix: Correctly report version. Fixes #2264 (#2268) - * [b30f1af65](https://github.com/argoproj/argo-workflows/commit/b30f1af6528046a3af29c82ac1e29d9d300eec22) fix: Removes Template.Arguments as this is never used. Fixes #2046 (#2267) - -### Contributors - - * Alex Collins - * Derek Wang - * Saravanan Balasubramanian - * mark9white - -## v2.6.0-rc1 (2020-02-19) - - * [bd89f9cbe](https://github.com/argoproj/argo-workflows/commit/bd89f9cbe1bd0ab4d70fa0fa919278fb8266956d) Update manifests to v2.6.0-rc1 - * [79b09ed43](https://github.com/argoproj/argo-workflows/commit/79b09ed43550bbf958c631386f8514b2d474062c) fix: Removed duplicate Watch Command (#2262) - * [b5c47266c](https://github.com/argoproj/argo-workflows/commit/b5c47266c4e33ba8739277ea43fe4b8023542367) feat(ui): Add filters for archived workflows (#2257) - * [d30aa3357](https://github.com/argoproj/argo-workflows/commit/d30aa3357738a272e1864d9f352f3c160c1608fc) fix(archive): Return correct next page info. Fixes #2255 (#2256) - * [8c97689e5](https://github.com/argoproj/argo-workflows/commit/8c97689e5d9d956a0dd9493c4c53088a6e8a87fa) fix: Ignore bookmark events for restart. Fixes #2249 (#2253) - * [63858eaa9](https://github.com/argoproj/argo-workflows/commit/63858eaa919c430bf0683dc33d81c94d4237b45b) fix(offloading): Change offloaded nodes datatype to JSON to support 1GB. Fixes #2246 (#2250) - * [4d88374b7](https://github.com/argoproj/argo-workflows/commit/4d88374b70e272eb454395f066c371ad2977abef) Add Cartrack into officially using Argo (#2251) - * [d309d5c1a](https://github.com/argoproj/argo-workflows/commit/d309d5c1a134502a11040757ff85230f7199510f) feat(archive): Add support to filter list by labels. Closes #2171 (#2205) - * [79f13373f](https://github.com/argoproj/argo-workflows/commit/79f13373fd8c4d0e9c9ff56f2133fa6009d1ed07) feat: Add a new symbol for suspended nodes. Closes #1896 (#2240) - * [82b48821a](https://github.com/argoproj/argo-workflows/commit/82b48821a83e012ac7ea5740d45addb046e3c8ee) Fix presumed typo (#2243) - * [af94352f6](https://github.com/argoproj/argo-workflows/commit/af94352f6c93e4bdbb69a1fc92b5d596c647d1a0) feat: Reduce API calls when changing filters. Closes #2231 (#2232) - * [a58cbc7dd](https://github.com/argoproj/argo-workflows/commit/a58cbc7dd12fe919614768ca0fa4714853091b7f) BasisAI uses Argo (#2241) - * [68e3c9fd9](https://github.com/argoproj/argo-workflows/commit/68e3c9fd9f597b6b4599dc7e9dbc5d71252ac5cf) feat: Add Pod Name to UI (#2227) - * [eef850726](https://github.com/argoproj/argo-workflows/commit/eef85072691a9302e4168a072cfdffed6908a5d6) fix(offload): Fix bug which deleted completed workflows. Fixes #2233 (#2234) - * [4e4565cdb](https://github.com/argoproj/argo-workflows/commit/4e4565cdbb5d2e5c215af1b8b2f03695b45c2bba) feat: Label workflow-created pvc with workflow name (#1890) - * [8bd5ecbc1](https://github.com/argoproj/argo-workflows/commit/8bd5ecbc16f1063ef332ca3445ed9a9b953efa4f) fix: display error message when deleting archived workflow fails. (#2235) - * [ae381ae57](https://github.com/argoproj/argo-workflows/commit/ae381ae57e5d2d3226114c773264595b3d672c39) feat: This add support to enable debug logging for all CLI commands (#2212) - * [1b1927fc6](https://github.com/argoproj/argo-workflows/commit/1b1927fc6fa519b7bf277e4273f4c7cede16ed64) feat(swagger): Adds a make api/argo-server/swagger.json (#2216) - * [5d7b4c8c2](https://github.com/argoproj/argo-workflows/commit/5d7b4c8c2d5819116b060f1ee656571b77b873bd) Update README.md (#2226) - * [2981e6ff4](https://github.com/argoproj/argo-workflows/commit/2981e6ff4c053b898a425d366fa696c8530ffeb0) fix: Enforce UnknownField requirement in WorkflowStep (#2210) - * [affc235cd](https://github.com/argoproj/argo-workflows/commit/affc235cd07bb01ee0ef8bb226b7a4c6470dc1e7) feat: Add failed node info to exit handler (#2166) - * [af1f6d600](https://github.com/argoproj/argo-workflows/commit/af1f6d60078c5562b2c9d538d2b104c277c82593) fix: UI Responsive design on filter box (#2221) - * [a445049ca](https://github.com/argoproj/argo-workflows/commit/a445049ca3f67b499b9bef95c9e43075c8e10250) fix: Fixed race condition in kill container method. Fixes #1884 (#2208) - * [2672857f2](https://github.com/argoproj/argo-workflows/commit/2672857f2fbaabf727e354b040b1af2431ea70e5) feat: upgrade to Go 1.13. Closes #1375 (#2097) - * [7466efa99](https://github.com/argoproj/argo-workflows/commit/7466efa99adfeeb3833b02c5afa7a33cdf8f87bc) feat: ArtifactRepositoryRef ConfigMap is now taken from the workflow namespace (#1821) - * [f2bd74bca](https://github.com/argoproj/argo-workflows/commit/f2bd74bca116f1b1ad9990aef9dbad98e0068900) fix: Remove quotes from UI (#2213) - * [62f466806](https://github.com/argoproj/argo-workflows/commit/62f4668064e71046532505a11c67a675aa29afcf) fix(offloading): Correctly deleted offloaded data. Fixes #2206 (#2207) - * [e30b77fcd](https://github.com/argoproj/argo-workflows/commit/e30b77fcd5b140074065491988985779b800c4d7) feat(ui): Add label filter to workflow list page. Fixes #802 (#2196) - * [930ced392](https://github.com/argoproj/argo-workflows/commit/930ced39241b427a521b609c403e7a39f6cc8c48) fix(ui): fixed workflow filtering and ordering. Fixes #2201 (#2202) - * [881123129](https://github.com/argoproj/argo-workflows/commit/8811231299434e89ee9279e400db3445d83fec39) fix: Correct login instructions. (#2198) - * [d6f5953d7](https://github.com/argoproj/argo-workflows/commit/d6f5953d73d3940e0151011b7c32446c4c1c0ec4) Update ReadMe for EBSCO (#2195) - * [b024c46c8](https://github.com/argoproj/argo-workflows/commit/b024c46c8fec8a682802c1d6667a79fede959ae4) feat: Add ability to submit CronWorkflow from CLI (#2003) - * [f6600fa49](https://github.com/argoproj/argo-workflows/commit/f6600fa499470ea7bd9fe68303759257c329d7ae) fix: Namespace and phase selection in UI (#2191) - * [c4a24dcab](https://github.com/argoproj/argo-workflows/commit/c4a24dcab016e82a4f1dc764dc67e0d8d324ded3) fix(k8sapi-executor): Fix KillContainer impl (#2160) - * [d22a5fe69](https://github.com/argoproj/argo-workflows/commit/d22a5fe69c2d5a1fd4c268822cf5e2cd76893a18) Update cli_with_server_test.go (#2189) - * [b9c828ad3](https://github.com/argoproj/argo-workflows/commit/b9c828ad3a8fe6e92263aafd5eb14f21a284f3fc) fix(archive): Only delete offloaded data we do not need. Fixes #2170 and #2156 (#2172) - * [73cb5418f](https://github.com/argoproj/argo-workflows/commit/73cb5418f13e359612bb6844ef1747c9e7e6522c) feat: Allow CronWorkflows to have instanceId (#2081) - * [9efea660b](https://github.com/argoproj/argo-workflows/commit/9efea660b611f02a1eeaa5dc5be857686ed82de2) Sort list and add Greenhouse (#2182) - * [cae399bae](https://github.com/argoproj/argo-workflows/commit/cae399bae466266bef0351efae77162615f9790f) fix: Fixed the Exec Provider token bug (#2181) - * [fc476b2a4](https://github.com/argoproj/argo-workflows/commit/fc476b2a4f09c12c0eb4a669b5cc1a18adca206e) fix(ui): Retry workflow event stream on connection loss. Fixes #2179 (#2180) - * [65058a279](https://github.com/argoproj/argo-workflows/commit/65058a2798fd31ebd4fb99afc41da6a9171ca5be) fix: Correctly create code from changed protos. (#2178) - * [fcfe1d436](https://github.com/argoproj/argo-workflows/commit/fcfe1d43693c98f0e6c5fe3e2b02ac6a4a9836e6) feat: Implemented open default browser in local mode (#2122) - * [f6cee5525](https://github.com/argoproj/argo-workflows/commit/f6cee552532702089e62e5fece4dae77e4c99336) fix: Specify download .tgz extension (#2164) - * [8a1e611a0](https://github.com/argoproj/argo-workflows/commit/8a1e611a03da8374567c9654f8baf29b66c83c6e) feat: Update archived workdflow column to be JSON. Closes #2133 (#2152) - * [f591c471c](https://github.com/argoproj/argo-workflows/commit/f591c471c336e99c206094d21567fe01c978bf3c) fix!: Change `argo token` to `argo auth token`. Closes #2149 (#2150) - * [409a51547](https://github.com/argoproj/argo-workflows/commit/409a5154726dd16475b3aaf97f05f191cdb65808) fix: Add certs to argocli image. Fixes #2129 (#2143) - * [b094802a0](https://github.com/argoproj/argo-workflows/commit/b094802a03406328699bffad6deeceb5bdb61777) fix: Allow download of artifacs in server auth-mode. Fixes #2129 (#2147) - * [520fa5407](https://github.com/argoproj/argo-workflows/commit/520fa54073ab20a9bcd2f115f65f50d9761dc230) fix: Correct SQL syntax. (#2141) - * [059cb9b18](https://github.com/argoproj/argo-workflows/commit/059cb9b1879361b77a293b3156bc9dfab2cefe71) fix: logs UI should fall back to archive (#2139) - * [4cda9a05b](https://github.com/argoproj/argo-workflows/commit/4cda9a05bf8cee20027132e4b3428ca9654bed5a) fix: route all unknown web content requests to index.html (#2134) - * [14d8b5d39](https://github.com/argoproj/argo-workflows/commit/14d8b5d3913c2a6b320c564d6fc11c1d90769a97) fix: archiveLogs needs to copy stderr (#2136) - * [91319ee49](https://github.com/argoproj/argo-workflows/commit/91319ee49f1fefec13233cb843b46f42cf5a9830) fixed ui navigation issues with basehref (#2130) - * [badfd1833](https://github.com/argoproj/argo-workflows/commit/badfd18335ec1b26d395ece0ad65d12aeb11beec) feat: Add support to delete by using labels. Depended on by #2116 (#2123) - * [a75ac1b48](https://github.com/argoproj/argo-workflows/commit/a75ac1b487a50bad19b3c58262fb3b170640ab4a) fix: mark CLI common.go vars and funcs as DEPRECATED (#2119) - * [be21a0f17](https://github.com/argoproj/argo-workflows/commit/be21a0f17ed851032a16cfa90934a04662da6d2d) feat(server): Restart server when config changes. Fixes #2090 (#2092) - * [b2bd25bc2](https://github.com/argoproj/argo-workflows/commit/b2bd25bc2ba15f1ffa39bade75b09af5e3bb81a4) fix: Disable webpack dot rule (#2112) - * [865b4f3a2](https://github.com/argoproj/argo-workflows/commit/865b4f3a2b51cc08cf4a80423933a97f876af4a2) addcompany (#2109) - * [213e3a9d9](https://github.com/argoproj/argo-workflows/commit/213e3a9d9ec43b9f05fe7c5cf11d3f704a8649dd) fix: Fix Resource Deletion Bug (#2084) - * [ab1de233b](https://github.com/argoproj/argo-workflows/commit/ab1de233b47ec7c284fd20705b9efa00626877f7) refactor(cli): Introduce v1.Interface for CLI. Closes #2107 (#2048) - * [7a19f85ca](https://github.com/argoproj/argo-workflows/commit/7a19f85caa8760f28ffae6227a529823a0867218) feat: Implemented Basic Auth scheme (#2093) - * [7611b9f6c](https://github.com/argoproj/argo-workflows/commit/7611b9f6c6359680a4d450116ee893e4dc174811) fix(ui): Add support for bash href. Fixes ##2100 (#2105) - * [516d05f81](https://github.com/argoproj/argo-workflows/commit/516d05f81a86c586bc19aad7836f35bb85130025) fix: Namespace redirects no longer error and are snappier (#2106) - * [16aed5c8e](https://github.com/argoproj/argo-workflows/commit/16aed5c8ec0256fc78d95149435c37dac1db087a) fix: Skip running --token testing if it is not on CI (#2104) - * [aece7e6eb](https://github.com/argoproj/argo-workflows/commit/aece7e6ebdf2478dd7efa5706490c5c7abe858e6) Parse container ID in correct way on CRI-O. Fixes #2095 (#2096) - * [b6a2be896](https://github.com/argoproj/argo-workflows/commit/b6a2be89689222470288339570aa0a719e775002) feat: support arg --token when talking to argo-server (#2027) (#2089) - * [492842aa1](https://github.com/argoproj/argo-workflows/commit/492842aa17cc447d68f1181c02990bfa7a78913a) docs(README): Add Capital One to user list (#2094) - * [d56a0e12a](https://github.com/argoproj/argo-workflows/commit/d56a0e12a283aaa5398e03fe423fed83d60ca370) fix(controller): Fix template resolution for step groups. Fixes #1868 (#1920) - * [b97044d2a](https://github.com/argoproj/argo-workflows/commit/b97044d2a47a79fab26fb0e3142c82e88a582f64) fix(security): Fixes an issue that allowed you to list archived workf… (#2079) - -### Contributors - - * Aaron Curtis - * Alex Collins - * Alexey Volkov - * Daisuke Taniwaki - * Derek Wang - * Dineshmohan Rajaveeran - * Huan-Cheng Chang - * Jialu Zhu - * Juan C. Muller - * Nasrudin Bin Salim - * Nick Groszewski - * Rafał Bigaj - * Roman Galeev - * Saravanan Balasubramanian - * Simon Behar - * Tom Wieczorek - * Tristan Colgate-McFarlane - * fsiegmund - * mdvorakramboll - * tkilpela - -## v2.5.3-rc4 (2020-01-27) - - -### Contributors - - -## v2.5.2 (2020-02-24) - - * [4b25e2ac1](https://github.com/argoproj/argo-workflows/commit/4b25e2ac1d495991261e97c86d211d658423ab7f) Update manifests to v2.5.2 - * [6092885c9](https://github.com/argoproj/argo-workflows/commit/6092885c91c040435cba7134e30e8c1c92574c7b) fix(archive): Fix bug in migrating cluster name. Fixes #2272 (#2279) - -### Contributors - - * Alex Collins - -## v2.5.1 (2020-02-20) - - * [fb496a244](https://github.com/argoproj/argo-workflows/commit/fb496a244383822af5d4c71431062cebd6de0ee4) Update manifests to v2.5.1 - * [61114d62e](https://github.com/argoproj/argo-workflows/commit/61114d62ec7b01c1cd9c68dd1917732673ddbca2) fix: Fixes double logging in UI. Fixes #2270 (#2271) - * [4737c8a26](https://github.com/argoproj/argo-workflows/commit/4737c8a26c30ca98e3ef2ea6147e8bcee45decbb) fix: Correctly report version. Fixes #2264 (#2268) - * [e096feaf3](https://github.com/argoproj/argo-workflows/commit/e096feaf330b7ebf8c2be31c5f0f932a1670158c) fix: Removed duplicate Watch Command (#2262) - -### Contributors - - * Alex Collins - * tkilpela - -## v2.5.0 (2020-02-18) - - * [11d2232ed](https://github.com/argoproj/argo-workflows/commit/11d2232edfc4ac1176cc1ed4a47c77aeec48aeb7) Update manifests to v2.5.0 - * [661f8a111](https://github.com/argoproj/argo-workflows/commit/661f8a1113a2a02eb521a6a5e5286d38b42e5f84) fix: Ignore bookmark events for restart. Fixes #2249 (#2253) - * [6c1a6601b](https://github.com/argoproj/argo-workflows/commit/6c1a6601b151efb4a9ada9a9c997130e319daf3f) fix(offloading): Change offloaded nodes datatype to JSON to support 1GB. Fixes #2246 (#2250) - -### Contributors - - * Alex Collins - -## v2.5.0-rc12 (2020-02-13) - - * [befd3594f](https://github.com/argoproj/argo-workflows/commit/befd3594f1d54e9e1bedd08d781025d43e6bed5b) Update manifests to v2.5.0-rc12 - * [4670c99ec](https://github.com/argoproj/argo-workflows/commit/4670c99ec819dcc91c807def6c2b4e7128e2b987) fix(offload): Fix bug which deleted completed workflows. Fixes #2233 (#2234) - -### Contributors - - * Alex Collins - -## v2.5.0-rc11 (2020-02-11) - - * [47d9a41a9](https://github.com/argoproj/argo-workflows/commit/47d9a41a902c18797e36c9371e3ab7a3e261605b) Update manifests to v2.5.0-rc11 - * [04917cde0](https://github.com/argoproj/argo-workflows/commit/04917cde047098c1fdf07965a01e07c97d2e36af) fix: Remove quotes from UI (#2213) - * [2705a1141](https://github.com/argoproj/argo-workflows/commit/2705a114195aa7dfc2617f2ebba54fbf603b1fd2) fix(offloading): Correctly deleted offloaded data. Fixes #2206 (#2207) - * [930ced392](https://github.com/argoproj/argo-workflows/commit/930ced39241b427a521b609c403e7a39f6cc8c48) fix(ui): fixed workflow filtering and ordering. Fixes #2201 (#2202) - * [881123129](https://github.com/argoproj/argo-workflows/commit/8811231299434e89ee9279e400db3445d83fec39) fix: Correct login instructions. (#2198) - -### Contributors - - * Alex Collins - * fsiegmund - -## v2.5.0-rc10 (2020-02-07) - - * [b557eeb98](https://github.com/argoproj/argo-workflows/commit/b557eeb981f0e7ac3b12f4e861ff9ca099338143) Update manifests to v2.5.0-rc10 - * [d6f5953d7](https://github.com/argoproj/argo-workflows/commit/d6f5953d73d3940e0151011b7c32446c4c1c0ec4) Update ReadMe for EBSCO (#2195) - * [b024c46c8](https://github.com/argoproj/argo-workflows/commit/b024c46c8fec8a682802c1d6667a79fede959ae4) feat: Add ability to submit CronWorkflow from CLI (#2003) - * [f6600fa49](https://github.com/argoproj/argo-workflows/commit/f6600fa499470ea7bd9fe68303759257c329d7ae) fix: Namespace and phase selection in UI (#2191) - * [c4a24dcab](https://github.com/argoproj/argo-workflows/commit/c4a24dcab016e82a4f1dc764dc67e0d8d324ded3) fix(k8sapi-executor): Fix KillContainer impl (#2160) - * [d22a5fe69](https://github.com/argoproj/argo-workflows/commit/d22a5fe69c2d5a1fd4c268822cf5e2cd76893a18) Update cli_with_server_test.go (#2189) - -### Contributors - - * Alex Collins - * Dineshmohan Rajaveeran - * Saravanan Balasubramanian - * Simon Behar - * Tom Wieczorek - -## v2.5.0-rc9 (2020-02-06) - - * [bea41b498](https://github.com/argoproj/argo-workflows/commit/bea41b498fd3ece93e0d2f344b58ca31e1f28080) Update manifests to v2.5.0-rc9 - * [b9c828ad3](https://github.com/argoproj/argo-workflows/commit/b9c828ad3a8fe6e92263aafd5eb14f21a284f3fc) fix(archive): Only delete offloaded data we do not need. Fixes #2170 and #2156 (#2172) - * [73cb5418f](https://github.com/argoproj/argo-workflows/commit/73cb5418f13e359612bb6844ef1747c9e7e6522c) feat: Allow CronWorkflows to have instanceId (#2081) - * [9efea660b](https://github.com/argoproj/argo-workflows/commit/9efea660b611f02a1eeaa5dc5be857686ed82de2) Sort list and add Greenhouse (#2182) - * [cae399bae](https://github.com/argoproj/argo-workflows/commit/cae399bae466266bef0351efae77162615f9790f) fix: Fixed the Exec Provider token bug (#2181) - * [fc476b2a4](https://github.com/argoproj/argo-workflows/commit/fc476b2a4f09c12c0eb4a669b5cc1a18adca206e) fix(ui): Retry workflow event stream on connection loss. Fixes #2179 (#2180) - * [65058a279](https://github.com/argoproj/argo-workflows/commit/65058a2798fd31ebd4fb99afc41da6a9171ca5be) fix: Correctly create code from changed protos. (#2178) - * [fcfe1d436](https://github.com/argoproj/argo-workflows/commit/fcfe1d43693c98f0e6c5fe3e2b02ac6a4a9836e6) feat: Implemented open default browser in local mode (#2122) - * [f6cee5525](https://github.com/argoproj/argo-workflows/commit/f6cee552532702089e62e5fece4dae77e4c99336) fix: Specify download .tgz extension (#2164) - * [8a1e611a0](https://github.com/argoproj/argo-workflows/commit/8a1e611a03da8374567c9654f8baf29b66c83c6e) feat: Update archived workdflow column to be JSON. Closes #2133 (#2152) - * [f591c471c](https://github.com/argoproj/argo-workflows/commit/f591c471c336e99c206094d21567fe01c978bf3c) fix!: Change `argo token` to `argo auth token`. Closes #2149 (#2150) - -### Contributors - - * Alex Collins - * Juan C. Muller - * Saravanan Balasubramanian - * Simon Behar - * fsiegmund - -## v2.5.0-rc8 (2020-02-03) - - * [392de8144](https://github.com/argoproj/argo-workflows/commit/392de814471abb3ca6c12ad7243c72c1a52591ff) Update manifests to v2.5.0-rc8 - * [409a51547](https://github.com/argoproj/argo-workflows/commit/409a5154726dd16475b3aaf97f05f191cdb65808) fix: Add certs to argocli image. Fixes #2129 (#2143) - * [b094802a0](https://github.com/argoproj/argo-workflows/commit/b094802a03406328699bffad6deeceb5bdb61777) fix: Allow download of artifacs in server auth-mode. Fixes #2129 (#2147) - * [520fa5407](https://github.com/argoproj/argo-workflows/commit/520fa54073ab20a9bcd2f115f65f50d9761dc230) fix: Correct SQL syntax. (#2141) - * [059cb9b18](https://github.com/argoproj/argo-workflows/commit/059cb9b1879361b77a293b3156bc9dfab2cefe71) fix: logs UI should fall back to archive (#2139) - * [4cda9a05b](https://github.com/argoproj/argo-workflows/commit/4cda9a05bf8cee20027132e4b3428ca9654bed5a) fix: route all unknown web content requests to index.html (#2134) - * [14d8b5d39](https://github.com/argoproj/argo-workflows/commit/14d8b5d3913c2a6b320c564d6fc11c1d90769a97) fix: archiveLogs needs to copy stderr (#2136) - * [91319ee49](https://github.com/argoproj/argo-workflows/commit/91319ee49f1fefec13233cb843b46f42cf5a9830) fixed ui navigation issues with basehref (#2130) - * [badfd1833](https://github.com/argoproj/argo-workflows/commit/badfd18335ec1b26d395ece0ad65d12aeb11beec) feat: Add support to delete by using labels. Depended on by #2116 (#2123) - -### Contributors - - * Alex Collins - * Tristan Colgate-McFarlane - * fsiegmund - -## v2.5.0-rc7 (2020-01-31) - - * [40e7ca37c](https://github.com/argoproj/argo-workflows/commit/40e7ca37cf5834e5ad8f799ea9ede61f7549a7d9) Update manifests to v2.5.0-rc7 - * [a75ac1b48](https://github.com/argoproj/argo-workflows/commit/a75ac1b487a50bad19b3c58262fb3b170640ab4a) fix: mark CLI common.go vars and funcs as DEPRECATED (#2119) - * [be21a0f17](https://github.com/argoproj/argo-workflows/commit/be21a0f17ed851032a16cfa90934a04662da6d2d) feat(server): Restart server when config changes. Fixes #2090 (#2092) - * [b2bd25bc2](https://github.com/argoproj/argo-workflows/commit/b2bd25bc2ba15f1ffa39bade75b09af5e3bb81a4) fix: Disable webpack dot rule (#2112) - * [865b4f3a2](https://github.com/argoproj/argo-workflows/commit/865b4f3a2b51cc08cf4a80423933a97f876af4a2) addcompany (#2109) - * [213e3a9d9](https://github.com/argoproj/argo-workflows/commit/213e3a9d9ec43b9f05fe7c5cf11d3f704a8649dd) fix: Fix Resource Deletion Bug (#2084) - * [ab1de233b](https://github.com/argoproj/argo-workflows/commit/ab1de233b47ec7c284fd20705b9efa00626877f7) refactor(cli): Introduce v1.Interface for CLI. Closes #2107 (#2048) - * [7a19f85ca](https://github.com/argoproj/argo-workflows/commit/7a19f85caa8760f28ffae6227a529823a0867218) feat: Implemented Basic Auth scheme (#2093) - -### Contributors - - * Alex Collins - * Jialu Zhu - * Saravanan Balasubramanian - * Simon Behar - -## v2.5.0-rc6 (2020-01-30) - - * [7b7fcf01a](https://github.com/argoproj/argo-workflows/commit/7b7fcf01a2c7819aa7da8d4ab6e5ae93e5b81436) Update manifests to v2.5.0-rc6 - * [7611b9f6c](https://github.com/argoproj/argo-workflows/commit/7611b9f6c6359680a4d450116ee893e4dc174811) fix(ui): Add support for bash href. Fixes ##2100 (#2105) - * [516d05f81](https://github.com/argoproj/argo-workflows/commit/516d05f81a86c586bc19aad7836f35bb85130025) fix: Namespace redirects no longer error and are snappier (#2106) - * [16aed5c8e](https://github.com/argoproj/argo-workflows/commit/16aed5c8ec0256fc78d95149435c37dac1db087a) fix: Skip running --token testing if it is not on CI (#2104) - * [aece7e6eb](https://github.com/argoproj/argo-workflows/commit/aece7e6ebdf2478dd7efa5706490c5c7abe858e6) Parse container ID in correct way on CRI-O. Fixes #2095 (#2096) - -### Contributors - - * Alex Collins - * Derek Wang - * Rafał Bigaj - * Simon Behar - -## v2.5.0-rc5 (2020-01-29) - - * [4609f3d70](https://github.com/argoproj/argo-workflows/commit/4609f3d70fef44c35634c743b15060d7865e0879) Update manifests to v2.5.0-rc5 - * [b6a2be896](https://github.com/argoproj/argo-workflows/commit/b6a2be89689222470288339570aa0a719e775002) feat: support arg --token when talking to argo-server (#2027) (#2089) - * [492842aa1](https://github.com/argoproj/argo-workflows/commit/492842aa17cc447d68f1181c02990bfa7a78913a) docs(README): Add Capital One to user list (#2094) - * [d56a0e12a](https://github.com/argoproj/argo-workflows/commit/d56a0e12a283aaa5398e03fe423fed83d60ca370) fix(controller): Fix template resolution for step groups. Fixes #1868 (#1920) - * [b97044d2a](https://github.com/argoproj/argo-workflows/commit/b97044d2a47a79fab26fb0e3142c82e88a582f64) fix(security): Fixes an issue that allowed you to list archived workf… (#2079) - -### Contributors - - * Alex Collins - * Daisuke Taniwaki - * Derek Wang - * Nick Groszewski - -## v2.5.0-rc4 (2020-01-27) - - * [2afcb0f27](https://github.com/argoproj/argo-workflows/commit/2afcb0f27cd32cf5a6600f8d4826ace578f9ee20) Update manifests to v2.5.0-rc4 - * [c4f49cf07](https://github.com/argoproj/argo-workflows/commit/c4f49cf074ad874996145674d635165f6256ca15) refactor: Move server code (cmd/server/ -> server/) (#2071) - * [2542454c1](https://github.com/argoproj/argo-workflows/commit/2542454c1daf61bc3826fa370c21799059904093) fix(controller): Do not crash if cm is empty. Fixes #2069 (#2070) - -### Contributors - - * Alex Collins - * Simon Behar - -## v2.5.0-rc3 (2020-01-27) - - * [091c2f7e8](https://github.com/argoproj/argo-workflows/commit/091c2f7e892bed287cf701cafe9bee0ccf5c0ce8) lint - * [30775fac8](https://github.com/argoproj/argo-workflows/commit/30775fac8a92cf7bdf84ada11746a7643d464885) Update manifests to v2.5.0-rc3 - * [85fa9aafa](https://github.com/argoproj/argo-workflows/commit/85fa9aafa70a98ce999157bb900971f24bd81101) fix: Do not expect workflowChange to always be defined (#2068) - * [6f65bc2b7](https://github.com/argoproj/argo-workflows/commit/6f65bc2b77ddcf4616c78d6db4955bf839a0c21a) fix: "base64 -d" not always available, using "base64 --decode" (#2067) - * [5328389aa](https://github.com/argoproj/argo-workflows/commit/5328389aac14da059148ad840a9a72c322947e9e) adds "verify-manifests" target - * [ef1c403e3](https://github.com/argoproj/argo-workflows/commit/ef1c403e3f49cf06f9bbed2bfdcc7d89548031cb) fix: generate no-db manifests - * [6f2c88028](https://github.com/argoproj/argo-workflows/commit/6f2c880280d490ba746a86d828ade61d8b58c7a5) feat(ui): Use cookies in the UI. Closes #1949 (#2058) - * [4592aec68](https://github.com/argoproj/argo-workflows/commit/4592aec6805ce1110edcb7dc4e3e7454a2042441) fix(api): Change `CronWorkflowName` to `Name`. Fixes #1982 (#2033) - * [4676a9465](https://github.com/argoproj/argo-workflows/commit/4676a9465ac4c2faa856f971706766f46e08edef) try and improve the release tasks - * [e26c11af7](https://github.com/argoproj/argo-workflows/commit/e26c11af747651c6642cf0abd3cbc4ccac7b95de) fix: only run archived wf testing when persistence is enabled (#2059) - * [b3cab5dfb](https://github.com/argoproj/argo-workflows/commit/b3cab5dfbb5e5973b1dc448946d16ee0cd690d6a) fix: Fix permission test cases (#2035) - -### Contributors - - * Alex Collins - * Derek Wang - * Simon Behar - -## v2.5.0-rc2 (2020-01-24) - - * [243eecebc](https://github.com/argoproj/argo-workflows/commit/243eecebc96fe2c8905cf4a5a7870e7d6c7c60e8) make manifests - * [8663652a7](https://github.com/argoproj/argo-workflows/commit/8663652a75717ea77f983a9602ccf32aa187b137) make manifesets - * [6cf64a21b](https://github.com/argoproj/argo-workflows/commit/6cf64a21bbe4ab1abd210844298a28b5803d6f59) Update Makefile - * [216d14ad1](https://github.com/argoproj/argo-workflows/commit/216d14ad10d0e8508a58ebe54383880f5d513160) fixed makefile - * [ba2f7891a](https://github.com/argoproj/argo-workflows/commit/ba2f7891ae8021ac2d235080aa35cd6391226989) merge conflict - * [8752f026c](https://github.com/argoproj/argo-workflows/commit/8752f026c569e4fe29ed9cc1539ee537a8e9fcef) merge conflict - * [50777ed88](https://github.com/argoproj/argo-workflows/commit/50777ed8868745db8051970b51e69fb1a930acf2) fix: nil pointer in GC (#2055) - * [b408e7cd2](https://github.com/argoproj/argo-workflows/commit/b408e7cd28b95a08498f6e30fcbef385d0ff89f5) fix: nil pointer in GC (#2055) - * [7ed058c3c](https://github.com/argoproj/argo-workflows/commit/7ed058c3c30d9aea2a2cf6cc44893dfbeb886419) fix: offload Node Status in Get and List api call (#2051) - * [4ac115606](https://github.com/argoproj/argo-workflows/commit/4ac115606bf6f0b3c5c837020efd41bf90789a00) fix: offload Node Status in Get and List api call (#2051) - * [aa6a536de](https://github.com/argoproj/argo-workflows/commit/aa6a536deae7d67ae7dd2995d94849bc1861e21e) fix(persistence): Allow `argo server` to run without persistence (#2050) - * [71ba82382](https://github.com/argoproj/argo-workflows/commit/71ba823822c190bfdb71073604bb987bb938cff4) Update README.md (#2045) - * [c79530526](https://github.com/argoproj/argo-workflows/commit/c795305268d5793e6672252ae6ff7fb6a54f23fd) fix(persistence): Allow `argo server` to run without persistence (#2050) - -### Contributors - - * Alex Collins - * Ed Lee - * Saravanan Balasubramanian - -## v2.5.0-rc1 (2020-01-23) - - * [b0ee44ac1](https://github.com/argoproj/argo-workflows/commit/b0ee44ac19604abe0de447027d8aea5ce32c68ea) fixed git push - * [e4cfefee7](https://github.com/argoproj/argo-workflows/commit/e4cfefee7af541a73d1f6cd3b5c132ae5c52ed24) revert cmd/server/static/files.go - * [ecdb8b093](https://github.com/argoproj/argo-workflows/commit/ecdb8b09337ef1a9bf04681619774a10b6f07607) v2.5.0-rc1 - * [6638936df](https://github.com/argoproj/argo-workflows/commit/6638936df69f2ab9016091a06f7dd2fd2c8945ea) Update manifests to 2.5.0-rc1 - * [c3e02d818](https://github.com/argoproj/argo-workflows/commit/c3e02d81844ad486111a1691333b18f921d6bf7b) Update Makefile - * [43656c6e6](https://github.com/argoproj/argo-workflows/commit/43656c6e6d82fccf06ff2c267cdc634d0345089c) Update Makefile - * [b49d82d71](https://github.com/argoproj/argo-workflows/commit/b49d82d71d07e0cdcedb7d1318d0eb53f19ce8cd) Update manifests to v2.5.0-rc1 - * [38bc90ac7](https://github.com/argoproj/argo-workflows/commit/38bc90ac7fe91d99823b37e825fda11f33598cb2) Update Makefile - * [1db74e1a2](https://github.com/argoproj/argo-workflows/commit/1db74e1a2658fa7de925cd4c81fbfd98f648cd99) fix(archive): upsert archive + ci: Pin images on CI, add readiness probes, clean-up logging and other tweaks (#2038) - * [c46c68367](https://github.com/argoproj/argo-workflows/commit/c46c6836706dce54aea4a13deee864bd3c6cb906) feat: Allow workflow-level parameters to be modified in the UI when submitting a workflow (#2030) - * [faa9dbb59](https://github.com/argoproj/argo-workflows/commit/faa9dbb59753a068c64a1aa5923e3e359c0866d8) fix(Makefile): Rename staticfiles make target. Fixes #2010 (#2040) - * [1a96007fe](https://github.com/argoproj/argo-workflows/commit/1a96007fed6a57d14a0e364000b54a364293438b) fix: Redirect to correct page when using managed namespace. Fixes #2015 (#2029) - * [787263142](https://github.com/argoproj/argo-workflows/commit/787263142162b62085572660f5e6497279f82ab1) fix(api): Updates proto message naming (#2034) - * [4a1307c89](https://github.com/argoproj/argo-workflows/commit/4a1307c89e58f554af8e0cdc44e5e66e4623dfb4) feat: Adds support for MySQL. Fixes #1945 (#2013) - * [5c98a14ec](https://github.com/argoproj/argo-workflows/commit/5c98a14ecdc78a5be48f34c455d90782157c4cbe) feat(controller): Add audit logs to workflows. Fixes #1769 (#1930) - * [2982c1a82](https://github.com/argoproj/argo-workflows/commit/2982c1a82cd6f1e7fb755a948d7a165aa0aeebc0) fix(validate): Allow placeholder in values taken from inputs. Fixes #1984 (#2028) - * [3293c83f6](https://github.com/argoproj/argo-workflows/commit/3293c83f6170ad4dc022067bb37f12d07d2834c1) feat: Add version to offload nodes. Fixes #1944 and #1946 (#1974) - * [f8569ae91](https://github.com/argoproj/argo-workflows/commit/f8569ae913053c8ba4cd9ca72c9c237dd83200c0) feat: Auth refactoring to support single version token (#1998) - * [eb360d60e](https://github.com/argoproj/argo-workflows/commit/eb360d60ea81e8deefbaf41bcb76921acd08b16f) Fix README (#2023) - * [ef1bd3a32](https://github.com/argoproj/argo-workflows/commit/ef1bd3a32c434c565defc7b325463e8d831262f2) fix typo (#2021) - * [f25a45deb](https://github.com/argoproj/argo-workflows/commit/f25a45deb4a7179044034da890884432e750d98a) feat(controller): Exposes container runtime executor as CLI option. (#2014) - * [3b26af7dd](https://github.com/argoproj/argo-workflows/commit/3b26af7dd4cc3d08ee50f3bc2f389efd516b9248) Enable s3 trace support. Bump version to v2.5.0. Tweak proto id to match Workflow (#2009) - * [5eb15bb54](https://github.com/argoproj/argo-workflows/commit/5eb15bb5409f54f1a4759dde2479b7569e5f81e4) fix: Fix workflow level timeouts (#1369) - * [5868982bc](https://github.com/argoproj/argo-workflows/commit/5868982bcddf3b9c9ddb98151bf458f6868dce81) fix: Fixes the `test` job on master (#2008) - * [29c850728](https://github.com/argoproj/argo-workflows/commit/29c850728fa701d62078910e1641588c959c28c5) fix: Fixed grammar on TTLStrategy (#2006) - * [2f58d202c](https://github.com/argoproj/argo-workflows/commit/2f58d202c21910500ecc4abdb9e23270c9791d0a) fix: v2 token bug (#1991) - * [ed36d92f9](https://github.com/argoproj/argo-workflows/commit/ed36d92f99ea65e06dc78b82923d74c57130dfc3) feat: Add quick start manifests to Git. Change auth-mode to default to server. Fixes #1990 (#1993) - * [91331a894](https://github.com/argoproj/argo-workflows/commit/91331a894d713f085207e30406e72b8f65ad0227) fix: No longer delete the argo ns as this is dangerous (#1995) - * [1a777cc66](https://github.com/argoproj/argo-workflows/commit/1a777cc6662b0c95ccf3de12c1a328c4cb12bc78) feat(cron): Added timezone support to cron workflows. Closes #1931 (#1986) - * [48b85e570](https://github.com/argoproj/argo-workflows/commit/48b85e5705a235257b5926d0714eeb173b4347cb) fix: WorkflowTempalteTest fix (#1992) - * [51dab8a4a](https://github.com/argoproj/argo-workflows/commit/51dab8a4a79e5180d795ef10586e31ecf4075214) feat: Adds `argo server` command. Fixes #1966 (#1972) - * [dd704dd65](https://github.com/argoproj/argo-workflows/commit/dd704dd6557e972c8dc3c9816996305a23c80f37) feat: Renders namespace in UI. Fixes #1952 and #1959 (#1965) - * [14d58036f](https://github.com/argoproj/argo-workflows/commit/14d58036faa444ee49a4905a632db7e0a5ab60ba) feat(server): Argo Server. Closes #1331 (#1882) - * [f69655a09](https://github.com/argoproj/argo-workflows/commit/f69655a09c82236d91703fbce2ee1a07fc3641be) fix: Added decompress in retry, resubmit and resume. (#1934) - * [1e7ccb53e](https://github.com/argoproj/argo-workflows/commit/1e7ccb53e8604654c073f6578ae024fd341f048a) updated jq version to 1.6 (#1937) - * [c51c1302f](https://github.com/argoproj/argo-workflows/commit/c51c1302f48cec5b9c6009b9b7e50962d338c679) feat: Enhancement for namespace installation mode configuration (#1939) - * [6af100d54](https://github.com/argoproj/argo-workflows/commit/6af100d5470137cc17c019546f3cad2acf5e4a31) feat: Add suspend and resume to CronWorkflows CLI (#1925) - * [232a465d0](https://github.com/argoproj/argo-workflows/commit/232a465d00b6104fe4801b773b0b3ceffdafb116) feat: Added onExit handlers to Step and DAG (#1716) - * [e4107bb83](https://github.com/argoproj/argo-workflows/commit/e4107bb831af9eb4b99753f7e324ec33042cdc55) Updated Readme.md for companies using Argo: (#1916) - * [7e9b2b589](https://github.com/argoproj/argo-workflows/commit/7e9b2b58915c5cb51276e21c81344e010472cbae) feat: Support for scheduled Workflows with CronWorkflow CRD (#1758) - * [5d7e91852](https://github.com/argoproj/argo-workflows/commit/5d7e91852b09ca2f3f912a8f1efaa6c28e07b524) feat: Provide values of withItems maps as JSON in {{item}}. Fixes #1905 (#1906) - * [de3ffd78b](https://github.com/argoproj/argo-workflows/commit/de3ffd78b9c16ed09065aeb16e966904e964a572) feat: Enhanced Different TTLSecondsAfterFinished depending on if job is in Succeeded, Failed or Error, Fixes (#1883) - * [83ae2df41](https://github.com/argoproj/argo-workflows/commit/83ae2df4130468a95b720ce33c9b9e27e7005b17) fix: Decrease docker build time by ignoring node_modules (#1909) - * [59a190697](https://github.com/argoproj/argo-workflows/commit/59a190697286bf19ee4a5c398c1af590a2419003) feat: support iam roles for service accounts in artifact storage (#1899) - * [6526b6cc5](https://github.com/argoproj/argo-workflows/commit/6526b6cc5e4671317fa0bc8c62440364c37a9700) fix: Revert node creation logic (#1818) - * [160a79404](https://github.com/argoproj/argo-workflows/commit/160a794046299c9d0420ae1710641814f30a9b7f) fix: Update Gopkg.lock with dep ensure -update (#1898) - * [ce78227ab](https://github.com/argoproj/argo-workflows/commit/ce78227abe5a3c901e5b7a7dd823fb2551dff584) fix: quick fail after pod termination (#1865) - * [cd3bd235f](https://github.com/argoproj/argo-workflows/commit/cd3bd235f550fbc24c31d1763fde045c9c321fbe) refactor: Format Argo UI using prettier (#1878) - * [b48446e09](https://github.com/argoproj/argo-workflows/commit/b48446e09e29d4f18f6a0cf0e6ff1166770286b1) fix: Fix support for continueOn failed for DAG. Fixes #1817 (#1855) - * [482569615](https://github.com/argoproj/argo-workflows/commit/482569615734d7cb5e24c90d399f3ec98fb2ed96) fix: Fix template scope (#1836) - * [eb585ef73](https://github.com/argoproj/argo-workflows/commit/eb585ef7381c4c9547eb9c2e922e175c0556da03) fix: Use dynamically generated placeholders (#1844) - * [54f44909a](https://github.com/argoproj/argo-workflows/commit/54f44909a0e68bc24209e9e83999421b814e80c9) feat: Always archive logs if in config. Closes #1790 (#1860) - * [f5f40728c](https://github.com/argoproj/argo-workflows/commit/f5f40728c4be2d852e8199a5754aee39ed72399f) fix: Minor comment fix (#1872) - * [72fad7ec0](https://github.com/argoproj/argo-workflows/commit/72fad7ec0cf3aa463bd9c2c8c8f961738408cf93) Update docs (#1870) - * [788898954](https://github.com/argoproj/argo-workflows/commit/788898954f7eff5b096f7597e74fc68104d8bf78) Move Workflows UI from https://github.com/argoproj/argo-ui (#1859) - * [87f26c8de](https://github.com/argoproj/argo-workflows/commit/87f26c8de2adc9563a3811aacc1eb31475a84f0b) fix: Move ISSUE_TEMPLATE/ under .github/ (#1858) - * [bd78d1597](https://github.com/argoproj/argo-workflows/commit/bd78d1597e82bf2bf0193e4bf49b6386c68e8222) fix: Ensure timer channel is empty after stop (#1829) - * [afc63024d](https://github.com/argoproj/argo-workflows/commit/afc63024de79c2e211a1ed0e0ede87b99825c63f) Code duplication (#1482) - * [68b72a8fd](https://github.com/argoproj/argo-workflows/commit/68b72a8fd1773ba5f1afb4ec6ba9bf8a4d2b7ad4) add CCRi to list of users in README (#1845) - * [941f30aaf](https://github.com/argoproj/argo-workflows/commit/941f30aaf4e51e1eec13e842a0b8d46767929cec) Add Sidecar Technologies to list of who uses Argo (#1850) - * [a08048b6d](https://github.com/argoproj/argo-workflows/commit/a08048b6de84ff7355728b85851aa84b08be0851) Adding Wavefront to the users list (#1852) - * [cb0598ea8](https://github.com/argoproj/argo-workflows/commit/cb0598ea82bd676fefd98e2040752cfa06516a98) Fixed Panic if DB context has issue (#1851) - * [e5fb88485](https://github.com/argoproj/argo-workflows/commit/e5fb884853d2ad0d1f32022723e211b902841945) fix: Fix a couple of nil derefs (#1847) - * [b3d458504](https://github.com/argoproj/argo-workflows/commit/b3d458504b319b3b02b82a872a5e13c59cb3128f) Add HOVER to the list of who uses Argo (#1825) - * [99db30d67](https://github.com/argoproj/argo-workflows/commit/99db30d67b42cbd9c7fa35bbdd35a57040c2f222) InsideBoard uses Argo (#1835) - * [ac8efcf40](https://github.com/argoproj/argo-workflows/commit/ac8efcf40e45750ae3c78f696f160049ea85dc8e) Red Hat uses Argo (#1828) - * [41ed3acfb](https://github.com/argoproj/argo-workflows/commit/41ed3acfb68c1200ea5f03643120cac81f7d3df6) Adding Fairwinds to the list of companies that use Argo (#1820) - * [5274afb97](https://github.com/argoproj/argo-workflows/commit/5274afb97686a4d2a58c50c3b23dd2b680b881e6) Add exponential back-off to retryStrategy (#1782) - * [e522e30ac](https://github.com/argoproj/argo-workflows/commit/e522e30acebc17793540ac4270d14747b2617b26) Handle operation level errors PVC in Retry (#1762) - * [f2e6054e9](https://github.com/argoproj/argo-workflows/commit/f2e6054e9376f2d2be1d928ee79746b8b49937df) Do not resolve remote templates in lint (#1787) - * [3852bc3f3](https://github.com/argoproj/argo-workflows/commit/3852bc3f3311e9ac174976e9a3e8f625b87888eb) SSL enabled database connection for workflow repository (#1712) (#1756) - * [f2676c875](https://github.com/argoproj/argo-workflows/commit/f2676c875e0af8e43b8967c669a33871bc02995c) Fix retry node name issue on error (#1732) - * [d38a107c8](https://github.com/argoproj/argo-workflows/commit/d38a107c84b91ad476f4760d984450efda296fdc) Refactoring Template Resolution Logic (#1744) - * [23e946045](https://github.com/argoproj/argo-workflows/commit/23e9460451566e04b14acd336fccf54b0623efc4) Error occurred on pod watch should result in an error on the wait container (#1776) - * [57d051b52](https://github.com/argoproj/argo-workflows/commit/57d051b52de7c9b78d926f0be7b158adb08803c8) Added hint when using certain tokens in when expressions (#1810) - * [0e79edff4](https://github.com/argoproj/argo-workflows/commit/0e79edff4b879558a19132035446fca2fbe3f2ca) Make kubectl print status and start/finished time (#1766) - * [723b3c15e](https://github.com/argoproj/argo-workflows/commit/723b3c15e55d2f8dceb86f1ac0a6dc7d1a58f10b) Fix code-gen docs (#1811) - * [711bb1148](https://github.com/argoproj/argo-workflows/commit/711bb11483a0ccb46600795c636c98d9c3a7f16c) Fix withParam node naming issue (#1800) - * [4351a3360](https://github.com/argoproj/argo-workflows/commit/4351a3360f6b20298a28a06be545bc349b22b9e4) Minor doc fix (#1808) - * [efb748fe3](https://github.com/argoproj/argo-workflows/commit/efb748fe35c6f24c736db8e002078abd02b57141) Fix some issues in examples (#1804) - * [a3e312899](https://github.com/argoproj/argo-workflows/commit/a3e31289915e4d129a743b9284442775ef41a15c) Add documentation for executors (#1778) - * [1ac75b390](https://github.com/argoproj/argo-workflows/commit/1ac75b39040e6f292ee322122a157e05f55f1f73) Add to linter (#1777) - * [3bead0db3](https://github.com/argoproj/argo-workflows/commit/3bead0db3d2777638992ba5e11a2de1c65be162c) Add ability to retry nodes after errors (#1696) - * [b50845e22](https://github.com/argoproj/argo-workflows/commit/b50845e22e8910d27291bab30f0c3dbef1fe5dad) Support no-headers flag (#1760) - * [7ea2b2f8c](https://github.com/argoproj/argo-workflows/commit/7ea2b2f8c10c3004c3c13a49d200df704895f93c) Minor rework of suspened node (#1752) - * [9ab1bc88f](https://github.com/argoproj/argo-workflows/commit/9ab1bc88f58c551208ce5e76eea0c6fb83359710) Update README.md (#1768) - * [e66fa328e](https://github.com/argoproj/argo-workflows/commit/e66fa328e396fe35dfad8ab1e3088ab088aea8be) Fixed lint issues (#1739) - * [63e12d098](https://github.com/argoproj/argo-workflows/commit/63e12d0986cb4b138715b8f2b9c483de5547f64e) binary up version (#1748) - * [1b7f9becd](https://github.com/argoproj/argo-workflows/commit/1b7f9becdfc47688018e6d71ac417fb7278637ab) Minor typo fix (#1754) - * [4c002677e](https://github.com/argoproj/argo-workflows/commit/4c002677e360beb9d6e4398618bafdce025cda42) fix blank lines (#1753) - * [fae738268](https://github.com/argoproj/argo-workflows/commit/fae7382686d917d78e3909d1f6db79c272a1aa11) Fail suspended steps after deadline (#1704) - * [b2d7ee62e](https://github.com/argoproj/argo-workflows/commit/b2d7ee62e903c062b62da35dc390e38c05ba1591) Fix typo in docs (#1745) - * [f25924486](https://github.com/argoproj/argo-workflows/commit/f2592448636bc35b7f9ec0fdc48b92135ba9852f) Removed uneccessary debug Println (#1741) - * [846d01edd](https://github.com/argoproj/argo-workflows/commit/846d01eddc271f330e00414d1ea2277ac390651b) Filter workflows in list based on name prefix (#1721) - * [8ae688c6c](https://github.com/argoproj/argo-workflows/commit/8ae688c6cbcc9494195431be7754fe69eb33a9f4) Added ability to auto-resume from suspended state (#1715) - * [fb617b63a](https://github.com/argoproj/argo-workflows/commit/fb617b63a09679bb74427cd5d13192b1fd8f48cf) unquote strings from parameter-file (#1733) - * [341203417](https://github.com/argoproj/argo-workflows/commit/34120341747e0261425b49a5600c42efbb1812a3) example for pod spec from output of previous step (#1724) - * [12b983f4c](https://github.com/argoproj/argo-workflows/commit/12b983f4c00bda3f9bedd14a316b0beade6158ed) Add gonum.org/v1/gonum/graph to Gopkg.toml (#1726) - * [327fcb242](https://github.com/argoproj/argo-workflows/commit/327fcb242b20107c859142b3dd68745b3440e5eb) Added Protobuf extension (#1601) - * [602e5ad8e](https://github.com/argoproj/argo-workflows/commit/602e5ad8e4002f7df0bd02014505cbc7de3fd37c) Fix invitation link. (#1710) - * [eb29ae4c8](https://github.com/argoproj/argo-workflows/commit/eb29ae4c89b89d4d4192a5f8c08d44ad31fa4cd2) Fixes bugs in demo (#1700) - * [ebb25b861](https://github.com/argoproj/argo-workflows/commit/ebb25b861b1b452207582b6dea0060bf418037ff) `restartPolicy` -> `retryStrategy` in examples (#1702) - * [167d65b15](https://github.com/argoproj/argo-workflows/commit/167d65b15ac0d3483071e0506f3e98a92a034183) Fixed incorrect `pod.name` in retry pods (#1699) - * [e0818029d](https://github.com/argoproj/argo-workflows/commit/e0818029d190cfd616527cccf208b5a9866224e1) fixed broke metrics endpoint per #1634 (#1695) - * [36fd09a13](https://github.com/argoproj/argo-workflows/commit/36fd09a1321fd145b36b4f9067b61fabad363926) Apply Strategic merge patch against the pod spec (#1687) - * [d35464670](https://github.com/argoproj/argo-workflows/commit/d35464670439b660c7c9ab0bcd9d3686ffe08687) Fix retry node processing (#1694) - * [dd517e4c2](https://github.com/argoproj/argo-workflows/commit/dd517e4c2db59b4c704ed7aeaed8505a757a60f7) Print multiple workflows in one command (#1650) - * [09a6cb4e8](https://github.com/argoproj/argo-workflows/commit/09a6cb4e81c1d9f5c8c082c9e96ce783fa20796f) Added status of previous steps as variables (#1681) - * [ad3dd4d4a](https://github.com/argoproj/argo-workflows/commit/ad3dd4d4a41b58e30983e8a93f06c1526c8aa9a0) Fix issue that workflow.priority substitution didn't pass validation (#1690) - * [095d67f8d](https://github.com/argoproj/argo-workflows/commit/095d67f8d0f1d309529c8a400cb16d0a0e2765b9) Store locally referenced template properly (#1670) - * [30a91ef00](https://github.com/argoproj/argo-workflows/commit/30a91ef002e7c8850f45e6fe7ac01a7966ff31b8) Handle retried node properly (#1669) - * [263cb7038](https://github.com/argoproj/argo-workflows/commit/263cb7038b927fabe0f67b4455e17534b51c2989) Update README.md Argo Ansible role: Provisioning Argo Workflows on Kubernetes/OpenShift (#1673) - * [867f5ff7e](https://github.com/argoproj/argo-workflows/commit/867f5ff7e72bc8b5d9b6be5a5f8849ccd2a1108c) Handle sidecar killing properly (#1675) - * [f0ab9df9e](https://github.com/argoproj/argo-workflows/commit/f0ab9df9ef8090fc388c32adbe9180dbaee683f5) Fix typo (#1679) - * [502db42db](https://github.com/argoproj/argo-workflows/commit/502db42db84f317af8660d862ddd48c28cbd3b8e) Don't provision VM for empty artifacts (#1660) - * [b5dcac811](https://github.com/argoproj/argo-workflows/commit/b5dcac8114d6f4b5fe32bae049d2c70b4dea4d15) Resolve WorkflowTemplate lazily (#1655) - * [d15994bbb](https://github.com/argoproj/argo-workflows/commit/d15994bbbb0a1ca8fc60b452ae532b10510c4762) [User] Update Argo users list (#1661) - * [4a654ca69](https://github.com/argoproj/argo-workflows/commit/4a654ca6914923656bd1dc21ca5b8c4aa75b9e25) Stop failing if artifact file exists, but empty (#1653) - * [c6cddafe1](https://github.com/argoproj/argo-workflows/commit/c6cddafe19854d91bff41f093f48ac444a781c0d) Bug fixes in getting started (#1656) - * [ec7883735](https://github.com/argoproj/argo-workflows/commit/ec7883735e20f87fe483b26c947bd891a695a2bd) Update workflow_level_host_aliases.yaml (#1657) - * [7e5af4748](https://github.com/argoproj/argo-workflows/commit/7e5af4748d406f244378da86fda339a0c9e74476) Fix child node template handling (#1654) - * [7f385a6bb](https://github.com/argoproj/argo-workflows/commit/7f385a6bbf67ab780ab86c941cbd426f9b003834) Use stored templates to raggregate step outputs (#1651) - * [cd6f36279](https://github.com/argoproj/argo-workflows/commit/cd6f3627992b6947dd47c98420d0a0fec4de9112) Fix dag output aggregation correctly (#1649) - * [706075a55](https://github.com/argoproj/argo-workflows/commit/706075a55f694f94cfe729efca8eacb31d14f7f0) Fix DAG output aggregation (#1648) - * [fa32dabdc](https://github.com/argoproj/argo-workflows/commit/fa32dabdc0a5a74469a0e86e04b9868508503a73) Fix missing merged changes in validate.go (#1647) - * [457160275](https://github.com/argoproj/argo-workflows/commit/457160275cc42be4c5fa6c1050c6e61a614b9544) fixed example wrong comment (#1643) - * [69fd8a58d](https://github.com/argoproj/argo-workflows/commit/69fd8a58d4877d616f3b576a2e8c8cbd224e029a) Delay killing sidecars until artifacts are saved (#1645) - * [ec5f98605](https://github.com/argoproj/argo-workflows/commit/ec5f98605429f8d757f3b92fe6b2a2e8a4cb235f) pin colinmarc/hdfs to the next commit, which no longer has vendored deps (#1622) - * [4b84f975f](https://github.com/argoproj/argo-workflows/commit/4b84f975f14714cedad2dc9697c9a181075b04ea) Fix global lint issue (#1641) - * [bb579138c](https://github.com/argoproj/argo-workflows/commit/bb579138c6104baab70f859e8ed05954718c5ee8) Fix regression where global outputs were unresolveable in DAGs (#1640) - * [cbf99682c](https://github.com/argoproj/argo-workflows/commit/cbf99682c7a84306066b059834a625892b86d28c) Fix regression where parallelism could cause workflow to fail (#1639) - -### Contributors - - * Adam Thornton - * Aditya Sundaramurthy - * Akshay Chitneni - * Alessandro Marrella - * Alex Collins - * Alexander Matyushentsev - * Alexey Volkov - * Anastasia Satonina - * Andrew Suderman - * Antoine Dao - * Avi Weit - * Daisuke Taniwaki - * David Seapy - * Deepen Mehta - * Derek Wang - * Elton - * Erik Parmann - * Huan-Cheng Chang - * Jesse Suen - * Jonathan Steele - * Jonathon Belotti - * Julian Fahrer - * Marek Čermák - * MengZeLee - * Michael Crenshaw - * Neutron Soutmun - * Niklas Hansson - * Pavel Kravchenko - * Per Buer - * Praneet Chandra - * Rick Avendaño - * Saravanan Balasubramanian - * Shubham Koli (FaultyCarry) - * Simon Behar - * Tobias Bradtke - * Vincent Boulineau - * Wei Yan - * William Reed - * Zhipeng Wang - * descrepes - * dherman - * gerdos82 - * mark9white - * nglinh - * sang - * vdinesh2461990 - * zhujl1991 - -## v2.4.3 (2019-12-05) - - * [cfe5f377b](https://github.com/argoproj/argo-workflows/commit/cfe5f377bc3552fba90afe6db7a76edd92c753cd) Update version to v2.4.3 - * [256e9a2ab](https://github.com/argoproj/argo-workflows/commit/256e9a2abb21f3fc3f3e5434852ff01ffb715a3b) Update version to v2.4.3 - * [b99e6a0ea](https://github.com/argoproj/argo-workflows/commit/b99e6a0ea326c0c4616a4ca6a26b8ce22243adb9) Error occurred on pod watch should result in an error on the wait container (#1776) - * [b00fea143](https://github.com/argoproj/argo-workflows/commit/b00fea143e269f28e0b3a2ba80aef4a1fa4b0ae7) SSL enabled database connection for workflow repository (#1712) (#1756) - * [400274f49](https://github.com/argoproj/argo-workflows/commit/400274f490ee8407a8cf49f9c5023c0290ecfc4c) Added hint when using certain tokens in when expressions (#1810) - * [15a0aa7a7](https://github.com/argoproj/argo-workflows/commit/15a0aa7a7080bddf00fc6b228d9bf87db194de3b) Handle operation level errors PVC in Retry (#1762) - * [81c7f5bd7](https://github.com/argoproj/argo-workflows/commit/81c7f5bd79e6c792601bcbe9d43acccd9399f5fc) Do not resolve remote templates in lint (#1787) - * [20cec1d9b](https://github.com/argoproj/argo-workflows/commit/20cec1d9bbbae8d9da9a2cd25f74922c940e6d96) Fix retry node name issue on error (#1732) - * [468cb8fe5](https://github.com/argoproj/argo-workflows/commit/468cb8fe52b2208a82e65106a1e5e8cab29d8cac) Refactoring Template Resolution Logic (#1744) - * [67369fb37](https://github.com/argoproj/argo-workflows/commit/67369fb370fc3adf76dfaee858e3abc5db1a3ceb) Support no-headers flag (#1760) - * [340ab0734](https://github.com/argoproj/argo-workflows/commit/340ab073417df98f2ae698b523e78e1ed0099fce) Filter workflows in list based on name prefix (#1721) - * [e9581273b](https://github.com/argoproj/argo-workflows/commit/e9581273b5e56066e936ce7f2eb9ccd2652d15cc) Added ability to auto-resume from suspended state (#1715) - * [a0a1b6fb1](https://github.com/argoproj/argo-workflows/commit/a0a1b6fb1b0afbbd19d9815b36a3a32c0126dd4c) Fixed incorrect `pod.name` in retry pods (#1699) - -### Contributors - - * Antoine Dao - * Daisuke Taniwaki - * Saravanan Balasubramanian - * Simon Behar - * gerdos82 - * sang - -## v2.4.2 (2019-10-21) - - * [675c66267](https://github.com/argoproj/argo-workflows/commit/675c66267f0c916de0f233d8101aa0646acb46d4) fixed broke metrics endpoint per #1634 (#1695) - * [1a9310c6f](https://github.com/argoproj/argo-workflows/commit/1a9310c6fd089b9f8f848b324cdf219d86684bd4) Apply Strategic merge patch against the pod spec (#1687) - * [0d0562aa1](https://github.com/argoproj/argo-workflows/commit/0d0562aa122b4ef48fd81c3fc2aa9a7bd92ae4ce) Fix retry node processing (#1694) - * [08f49d01c](https://github.com/argoproj/argo-workflows/commit/08f49d01cf6b634f5a2b4e16f4da04bfc51037ab) Print multiple workflows in one command (#1650) - * [defbc297d](https://github.com/argoproj/argo-workflows/commit/defbc297d7e1abb4c729278e362c438cc09d23c7) Added status of previous steps as variables (#1681) - * [6ac443302](https://github.com/argoproj/argo-workflows/commit/6ac4433020fe48cacfeda60f0f296861e92e742f) Fix issue that workflow.priority substitution didn't pass validation (#1690) - * [ab9d710a0](https://github.com/argoproj/argo-workflows/commit/ab9d710a007eb65f8dc5fddf344d65dca5348ddb) Update version to v2.4.2 - * [338af3e7a](https://github.com/argoproj/argo-workflows/commit/338af3e7a4f5b22ef6eead04dffd774baec56391) Store locally referenced template properly (#1670) - * [be0929dcd](https://github.com/argoproj/argo-workflows/commit/be0929dcd89188054a1a3f0ca424d990468d1381) Handle retried node properly (#1669) - * [88e210ded](https://github.com/argoproj/argo-workflows/commit/88e210ded6354f1867837165292901bfb72c2670) Update README.md Argo Ansible role: Provisioning Argo Workflows on Kubernetes/OpenShift (#1673) - * [946b0fa26](https://github.com/argoproj/argo-workflows/commit/946b0fa26a11090498b118e69f3f4a840d89afd2) Handle sidecar killing properly (#1675) - * [4ce972bd7](https://github.com/argoproj/argo-workflows/commit/4ce972bd7dba747a0208b5ac1457db4e19390e85) Fix typo (#1679) - -### Contributors - - * Daisuke Taniwaki - * Marek Čermák - * Rick Avendaño - * Saravanan Balasubramanian - * Simon Behar - * Tobias Bradtke - * mark9white - -## v2.4.1 (2019-10-08) - - * [d7f099992](https://github.com/argoproj/argo-workflows/commit/d7f099992d8cf93c280df2ed38a0b9a1b2614e56) Update version to v2.4.1 - * [6b876b205](https://github.com/argoproj/argo-workflows/commit/6b876b20599e171ff223aaee21e56b39ab978ed7) Don't provision VM for empty artifacts (#1660) - * [0d00a52ed](https://github.com/argoproj/argo-workflows/commit/0d00a52ed28653e3135b3956e62e02efffa62cac) Resolve WorkflowTemplate lazily (#1655) - * [effd7c33c](https://github.com/argoproj/argo-workflows/commit/effd7c33cd73c82ae762cc35b312b180d5ab282e) Stop failing if artifact file exists, but empty (#1653) - -### Contributors - - * Alexey Volkov - * Daisuke Taniwaki - * Saravanan Balasubramanian - * Simon Behar - -## v2.4.0 (2019-10-07) - - * [a65763142](https://github.com/argoproj/argo-workflows/commit/a65763142ecc2dbd3507f1da860f64220c535f5b) Fix child node template handling (#1654) - * [982c7c559](https://github.com/argoproj/argo-workflows/commit/982c7c55994c87bab15fd71ef2a17bd905d63edd) Use stored templates to raggregate step outputs (#1651) - * [a8305ed7e](https://github.com/argoproj/argo-workflows/commit/a8305ed7e6f3a4ac5876b1468245716e88e71e92) Fix dag output aggregation correctly (#1649) - * [f14dd56d9](https://github.com/argoproj/argo-workflows/commit/f14dd56d9720ae5116fa6b0e3d320a05fc8bc6f4) Fix DAG output aggregation (#1648) - * [30c3b9372](https://github.com/argoproj/argo-workflows/commit/30c3b937240c0d12eb2ad020d55fe246759a5bbe) Fix missing merged changes in validate.go (#1647) - * [85f50e30a](https://github.com/argoproj/argo-workflows/commit/85f50e30a452a78aab547f17c19fe8464a10685c) fixed example wrong comment (#1643) - * [09e22fb25](https://github.com/argoproj/argo-workflows/commit/09e22fb257554a33f86bac9dff2532ae23975093) Delay killing sidecars until artifacts are saved (#1645) - * [99e28f1ce](https://github.com/argoproj/argo-workflows/commit/99e28f1ce2baf35d686f04974b878f99e4ca4827) pin colinmarc/hdfs to the next commit, which no longer has vendored deps (#1622) - * [885aae405](https://github.com/argoproj/argo-workflows/commit/885aae40589dc4f004a0e1027cd651a816e493ee) Fix global lint issue (#1641) - * [972abdd62](https://github.com/argoproj/argo-workflows/commit/972abdd623265777b7ceb6271139812a02471a56) Fix regression where global outputs were unresolveable in DAGs (#1640) - * [7272bec46](https://github.com/argoproj/argo-workflows/commit/7272bec4655affc5bae7254f1630c5b68948fe15) Fix regression where parallelism could cause workflow to fail (#1639) - * [6b77abb2a](https://github.com/argoproj/argo-workflows/commit/6b77abb2aa40b6c321dd7a6671a2f9ce18e38955) Add back SetGlogLevel calls - * [e7544f3d8](https://github.com/argoproj/argo-workflows/commit/e7544f3d82909b267335b7ee19a4fc6a2f0e5c5b) Update version to v2.4.0 - * [76461f925](https://github.com/argoproj/argo-workflows/commit/76461f925e4e53cdf65b362115d09aa5325dea6b) Update CHANGELOG for v2.4.0 (#1636) - * [c75a08616](https://github.com/argoproj/argo-workflows/commit/c75a08616e8e6bd1aeb37fc9fc824197491aec9c) Regenerate installation manifests (#1638) - * [e20cb28cf](https://github.com/argoproj/argo-workflows/commit/e20cb28cf8a4f331316535dcfd793ea91c281feb) Grant get secret role to controller to support persistence (#1615) - * [644946e4e](https://github.com/argoproj/argo-workflows/commit/644946e4e07672051f9be0f71ca0d2ca7641648e) Save stored template ID in nodes (#1631) - * [5d530beca](https://github.com/argoproj/argo-workflows/commit/5d530becae49e1e235d72dd5ac29cc40282bc401) Fix retry workflow state (#1632) - * [2f0af5221](https://github.com/argoproj/argo-workflows/commit/2f0af5221030858e6a5306545ca3577aad17ac1a) Update operator.go (#1630) - * [6acea0c1c](https://github.com/argoproj/argo-workflows/commit/6acea0c1c21a17e14dc95632e80655f7fff09e2e) Store resolved templates (#1552) - * [df8260d6f](https://github.com/argoproj/argo-workflows/commit/df8260d6f64fcacc24c13cf5cc4a3fc3f0a6db18) Increase timeout of golangci-lint (#1623) - * [138f89f68](https://github.com/argoproj/argo-workflows/commit/138f89f684cec5a8b237584e46199815922f98c3) updated invite link (#1621) - * [d027188d0](https://github.com/argoproj/argo-workflows/commit/d027188d0fce8e44bb0cefb2d46c1e55b9f112a2) Updated the API Rule Violations list (#1618) - * [a317fbf14](https://github.com/argoproj/argo-workflows/commit/a317fbf1412c4636066def42cd6b7adc732319f3) Prevent controller from crashing due to glog writing to /tmp (#1613) - * [20e91ea58](https://github.com/argoproj/argo-workflows/commit/20e91ea580e532b9c62f3bd16c5f6f8ed0838fdd) Added WorkflowStatus and NodeStatus types to the Open API Spec. (#1614) - * [ffb281a55](https://github.com/argoproj/argo-workflows/commit/ffb281a5567666db68a5acab03ba7a0188954bf8) Small code cleanup and add tests (#1562) - * [1cb8345de](https://github.com/argoproj/argo-workflows/commit/1cb8345de0694cffc30882eac59a05cb8eb06bc4) Add merge keys to Workflow objects to allow for StrategicMergePatches (#1611) - * [c855a66a6](https://github.com/argoproj/argo-workflows/commit/c855a66a6a9e3239fe5d585f5b5f36a07d48c5ed) Increased Lint timeout (#1612) - * [4bf83fc3d](https://github.com/argoproj/argo-workflows/commit/4bf83fc3d0d6b1e1d2c85f7b9b10a051134f7b0a) Fix DAG enable failFast will hang in some case (#1595) - * [e9f3d9cbc](https://github.com/argoproj/argo-workflows/commit/e9f3d9cbc029a9d55cf35ea51c2486078110bb2d) Do not relocate the mounted docker.sock (#1607) - * [1bd50fa2d](https://github.com/argoproj/argo-workflows/commit/1bd50fa2dfd828a04ff012868c98ba33bac41136) Added retry around RuntimeExecutor.Wait call when waiting for main container completion (#1597) - * [0393427b5](https://github.com/argoproj/argo-workflows/commit/0393427b54f397237152f5b74f6d09d0c20c1618) Issue1571 Support ability to assume IAM roles in S3 Artifacts (#1587) - * [ffc0c84f5](https://github.com/argoproj/argo-workflows/commit/ffc0c84f509226f02d47cb2d5280faa7e2b92841) Update Gopkg.toml and Gopkg.lock (#1596) - * [aa3a8f1c9](https://github.com/argoproj/argo-workflows/commit/aa3a8f1c99fcb70bb199750644f74b17812cc586) Update from github.com/ghodss/yaml to sigs.k8s.io/yaml (#1572) - * [07a26f167](https://github.com/argoproj/argo-workflows/commit/07a26f16747e3c71e76ba83b43336fd7a49622fb) Regard resource templates as leaf nodes (#1593) - * [89e959e7a](https://github.com/argoproj/argo-workflows/commit/89e959e7aaf396bc09cc012014e425ece2b5d644) Fix workflow template in namespaced controller (#1580) - * [cd04ab8bb](https://github.com/argoproj/argo-workflows/commit/cd04ab8bb923012182f2dc2b35dbf14726f7b1a4) remove redundant codes (#1582) - * [5bba8449a](https://github.com/argoproj/argo-workflows/commit/5bba8449ac7f3c563282eec1cb1f0dfc28d0d7c8) Add entrypoint label to workflow default labels (#1550) - * [9685d7b67](https://github.com/argoproj/argo-workflows/commit/9685d7b67be91bf81059c1c96120a4fe6288399e) Fix inputs and arguments during template resolution (#1545) - * [19210ba63](https://github.com/argoproj/argo-workflows/commit/19210ba635a4288f51eb2dd827f03715aea72750) added DataStax as an organization that uses Argo (#1576) - * [b5f2fdef0](https://github.com/argoproj/argo-workflows/commit/b5f2fdef097fe0fd69c60c6ada893547fd944d22) Support AutomountServiceAccountToken and executor specific service account(#1480) - * [8808726cf](https://github.com/argoproj/argo-workflows/commit/8808726cf3d0bc3aa71e3f1653262685dbfa0acf) Fix issue saving outputs which overlap paths with inputs (#1567) - * [ba7a1ed65](https://github.com/argoproj/argo-workflows/commit/ba7a1ed650e7251dfadf5e9ae1fc2cdda7e9eaa2) Add coverage make target (#1557) - * [ced0ee96c](https://github.com/argoproj/argo-workflows/commit/ced0ee96ced59d9b070a1e81a9c148f78a69bfb9) Document workflow controller dockerSockPath config (#1555) - * [3e95f2da6](https://github.com/argoproj/argo-workflows/commit/3e95f2da6af78cc482009692b65cdc565a0ff412) Optimize argo binary install documentation (#1563) - * [e2ebb1666](https://github.com/argoproj/argo-workflows/commit/e2ebb166683d8a6c96502ce6e72f1a3ae48f0b4b) docs(readme): fix workflow types link (#1560) - * [6d150a15e](https://github.com/argoproj/argo-workflows/commit/6d150a15eb96183fb21faf6a49b0997e6150880b) Initialize the wfClientset before using it (#1548) - * [5331fc02e](https://github.com/argoproj/argo-workflows/commit/5331fc02e257266a4a5887dfe6277e5a0b42e7fc) Remove GLog config from argo executor (#1537) - * [ed4ac6d06](https://github.com/argoproj/argo-workflows/commit/ed4ac6d0697401da6dec3989ecd63dd7567f0750) Update main.go (#1536) - -### Contributors - - * Alexander Matyushentsev - * Anastasia Satonina - * Anes Benmerzoug - * Brian Mericle - * Daisuke Taniwaki - * David Seapy - * Ed Lee - * Erik Parmann - * Ian Howell - * Jesse Suen - * John Wass - * Jonathon Belotti - * Mostapha Sadeghipour Roudsari - * Pablo Osinaga - * Premkumar Masilamani - * Saravanan Balasubramanian - * Takayuki Kasai - * Xianlu Bird - * Xie.CS - * mark9white - -## v2.4.0-rc1 (2019-08-08) - - * [6131721f4](https://github.com/argoproj/argo-workflows/commit/6131721f43545196399d7ffe3a72c1b9dc04df87) Remove GLog config from argo executor (#1537) - * [8e94ca370](https://github.com/argoproj/argo-workflows/commit/8e94ca3709c55dd2004509790e2326d1863de272) Update main.go (#1536) - * [dfb06b6df](https://github.com/argoproj/argo-workflows/commit/dfb06b6dfa8868324103bb67fbaf712c69238206) Update version to v2.4.0-rc1 - * [9fca14412](https://github.com/argoproj/argo-workflows/commit/9fca144128c97499d11f07a0ee008a9921e1f5f8) Update argo dependencies to kubernetes v1.14 (#1530) - * [0246d184a](https://github.com/argoproj/argo-workflows/commit/0246d184add04e44f77ffbe00e796b3adaf535d2) Use cache to retrieve WorkflowTemplates (#1534) - * [4864c32ff](https://github.com/argoproj/argo-workflows/commit/4864c32ffa40861c5ca066f67615da6d52eaa8b5) Update README.md (#1533) - * [4df114fae](https://github.com/argoproj/argo-workflows/commit/4df114fae66e87727cfcb871731ec002af1515c7) Update CHANGELOG for v2.4 (#1531) - * [c7e5cba14](https://github.com/argoproj/argo-workflows/commit/c7e5cba14a835fbfd0aba88b99197675ce1f0c66) Introduce podGC strategy for deleting completed/successful pods (#1234) - * [bb0d14af9](https://github.com/argoproj/argo-workflows/commit/bb0d14af9d320a141cb307b6a883c1eaafa498c3) Update ISSUE_TEMPLATE.md (#1528) - * [b5702d8ae](https://github.com/argoproj/argo-workflows/commit/b5702d8ae725c5caa4058d39f77e6d1e7e549da4) Format sources and order imports with the help of goimports (#1504) - * [d3ff77bf4](https://github.com/argoproj/argo-workflows/commit/d3ff77bf475095c73f034fb3b23c279c62f4269e) Added Architecture doc (#1515) - * [fc1ec1a51](https://github.com/argoproj/argo-workflows/commit/fc1ec1a51462c9a114417db801e3a9715d3dc6b4) WorkflowTemplate CRD (#1312) - * [f99d3266d](https://github.com/argoproj/argo-workflows/commit/f99d3266d1879579338f124c56f1fc14867308a3) Expose all input parameters to template as JSON (#1488) - * [bea605261](https://github.com/argoproj/argo-workflows/commit/bea605261be82d8bb91bf703ad68875f1093ebb8) Fix argo logs empty content when workflow run in virtual kubelet env (#1201) - * [d82de8813](https://github.com/argoproj/argo-workflows/commit/d82de8813910afaf9b3fb77d029faa7953bfee3a) Implemented support for WorkflowSpec.ArtifactRepositoryRef (#1350) - * [0fa20c7ba](https://github.com/argoproj/argo-workflows/commit/0fa20c7ba317d8c9a837bcc37d92f3fe79808499) Fix validation (#1508) - * [87e2cb604](https://github.com/argoproj/argo-workflows/commit/87e2cb6043a305839ca37cc77c7611aaa7bdbd44) Add --dry-run option to `argo submit` (#1506) - * [e7e50af6e](https://github.com/argoproj/argo-workflows/commit/e7e50af6e56b1eeddccc82a2dbc8b421d1a63942) Support git shallow clones and additional ref fetches (#1521) - * [605489cd5](https://github.com/argoproj/argo-workflows/commit/605489cd5dd688527e60efee0aff239e3439c2dc) Allow overriding workflow labels in 'argo submit' (#1475) - * [47eba5191](https://github.com/argoproj/argo-workflows/commit/47eba519107c229edf61dbe024a6a5e0f1618a8d) Fix issue [Documentation] kubectl get service argo-artifacts -o wide (#1516) - * [02f38262c](https://github.com/argoproj/argo-workflows/commit/02f38262c40901346ddd622685bc6bfd344a2717) Fixed #1287 Executor kubectl version is obsolete (#1513) - * [f62105e65](https://github.com/argoproj/argo-workflows/commit/f62105e659a22ccc0875151698eab540090885f6) Allow Makefile variables to be set from the command line (#1501) - * [e62be65ba](https://github.com/argoproj/argo-workflows/commit/e62be65ba25ae68a1bed10bddf33b4dae4991249) Fix a compiler error in a unit test (#1514) - * [5c5c29af7](https://github.com/argoproj/argo-workflows/commit/5c5c29af729b39f5f9b8a7fe6b8c1dede53eae3a) Fix the lint target (#1505) - * [e03287bfb](https://github.com/argoproj/argo-workflows/commit/e03287bfb7f97f639c8d81617808f709ca547eaa) Allow output parameters with .value, not only .valueFrom (#1336) - * [781d3b8ae](https://github.com/argoproj/argo-workflows/commit/781d3b8ae243b2c32ea3c4abd5b4a99fe9fc9cad) Implemented Conditionally annotate outputs of script template only when consumed #1359 (#1462) - * [b028e61db](https://github.com/argoproj/argo-workflows/commit/b028e61db71e74b5730469a5f23a734937ddb8d9) change 'continue-on-fail' example to better reflect its description (#1494) - * [97e824c9a](https://github.com/argoproj/argo-workflows/commit/97e824c9a5b71baea658e8de6130bee089fb764d) Readme update to add argo and airflow comparison (#1502) - * [414d6ce7b](https://github.com/argoproj/argo-workflows/commit/414d6ce7b8aebcbd3b8822f407ec71ed465c103d) Fix a compiler error (#1500) - * [ca1d5e671](https://github.com/argoproj/argo-workflows/commit/ca1d5e671519aaa9f38f5f2564eb70c138fadda7) Fix: Support the List within List type in withParam #1471 (#1473) - * [75cb8b9cd](https://github.com/argoproj/argo-workflows/commit/75cb8b9cd92cd7fcce4b921b88232bb05f2672b2) Fix #1366 unpredictable global artifact behavior (#1461) - * [082e5c4f6](https://github.com/argoproj/argo-workflows/commit/082e5c4f617c4120584ad601a8d85e0a3ce36a26) Exposed workflow priority as a variable (#1476) - * [38c4def7f](https://github.com/argoproj/argo-workflows/commit/38c4def7fb100e954757649553db8c04ea64f318) Fix: Argo CLI should show warning if there is no workflow definition in file #1486 - * [af7e496db](https://github.com/argoproj/argo-workflows/commit/af7e496db6ee8c10c9a2b6b51a27265bc6b0ee6d) Add Commodus Tech as official user (#1484) - * [8c559642f](https://github.com/argoproj/argo-workflows/commit/8c559642f2ec8abaea3204279fa3d6ff5ad40add) Update OWNERS (#1485) - * [007d1f881](https://github.com/argoproj/argo-workflows/commit/007d1f8816736a758fa3720f0081e01dbc4200e3) Fix: 1008 `argo wait` and `argo submit --wait` should exit 1 if workflow fails (#1467) - * [3ab7bc94c](https://github.com/argoproj/argo-workflows/commit/3ab7bc94c01d7a470bd05198b99c33e1a0221847) Document the insecureIgnoreHostKey git flag (#1483) - * [7d9bb51ae](https://github.com/argoproj/argo-workflows/commit/7d9bb51ae328f1a8cc7daf7d8ef108cf190df0ce) Fix failFast bug: When a node in the middle fails, the entire workflow will hang (#1468) - * [42adbf32e](https://github.com/argoproj/argo-workflows/commit/42adbf32e8d4c626c544795c2fc1adb70676e968) Add --no-color flag to logs (#1479) - * [67fc29c57](https://github.com/argoproj/argo-workflows/commit/67fc29c57db795a7020f355ab32cd883cfaf701e) fix typo: symboloic > symbolic (#1478) - * [7c3e1901f](https://github.com/argoproj/argo-workflows/commit/7c3e1901f49fe34cbe9d084274f6e64c48270635) Added Codec to the Argo community list (#1477) - * [0a9cf9d3b](https://github.com/argoproj/argo-workflows/commit/0a9cf9d3b06a3b304c0c690a298d8dc3d51c015b) Add doc about failFast feature (#1453) - * [6a5903000](https://github.com/argoproj/argo-workflows/commit/6a5903000fe8a7b3610c32435b2363cbf6334d1b) Support PodSecurityContext (#1463) - * [e392d854b](https://github.com/argoproj/argo-workflows/commit/e392d854bf78db89413782a23e62b0e38cf9c780) issue-1445: changing temp directory for output artifacts from root to tmp (#1458) - * [7a21adfeb](https://github.com/argoproj/argo-workflows/commit/7a21adfeb0af18c2452648a8bb3698a687f99b5e) New Feature: provide failFast flag, allow a DAG to run all branches of the DAG (either success or failure) (#1443) - * [b9b87b7fa](https://github.com/argoproj/argo-workflows/commit/b9b87b7fa0cd3177c2b89cacff189f4893c5af95) Centralized Longterm workflow persistence storage (#1344) - * [cb09609bd](https://github.com/argoproj/argo-workflows/commit/cb09609bd646a394c3a6f739dd447022a2bdb327) mention sidecar in failure message for sidecar containers (#1430) - * [373bbe6ec](https://github.com/argoproj/argo-workflows/commit/373bbe6ec9e819c39152292f79752792ce40b94d) Fix demo's doc issue of install minio chart (#1450) - * [835523341](https://github.com/argoproj/argo-workflows/commit/835523341bcc96b6e9358be71e7432d0ac4058c5) Add threekit to user list (#1444) - * [83f82ad17](https://github.com/argoproj/argo-workflows/commit/83f82ad172de0472643495d3ef3e0ce6d959900a) Improve bash completion (#1437) - * [ee0ec78ac](https://github.com/argoproj/argo-workflows/commit/ee0ec78ac98eaa112d343906a6e9b6490c39817f) Update documentation for workflow.outputs.artifacts (#1439) - * [9e30c06e3](https://github.com/argoproj/argo-workflows/commit/9e30c06e32b072b87e0d17095448d114175c713f) Revert "Update demo.md (#1396)" (#1433) - * [c08de6300](https://github.com/argoproj/argo-workflows/commit/c08de6300c3b394c34a5b3596455dcb50c29af48) Add paging function for list command (#1420) - * [bba2f9cbe](https://github.com/argoproj/argo-workflows/commit/bba2f9cbe9aa0eb053c19b03bc8fa7c002f579b0) Fixed: Implemented Template level service account (#1354) - * [d635c1def](https://github.com/argoproj/argo-workflows/commit/d635c1def74936869edbd8b9037ac81ea0af1772) Ability to configure hostPath mount for `/var/run/docker.sock` (#1419) - * [d2f7162ac](https://github.com/argoproj/argo-workflows/commit/d2f7162ac26550642ebe1792c65fb5e6ca9c0e7a) Terminate all containers within pod after main container completes (#1423) - * [1607d74a8](https://github.com/argoproj/argo-workflows/commit/1607d74a8de0704b82627364645a99b699d40cc0) PNS executor intermitently failed to capture entire log of script templates (#1406) - * [5e47256c7](https://github.com/argoproj/argo-workflows/commit/5e47256c7f86b56cfbf2ce53f73ed093eef2e3b6) Fix typo (#1431) - * [5635c33aa](https://github.com/argoproj/argo-workflows/commit/5635c33aa263080fe84e29a11a52f86fee583ca2) Update demo.md (#1396) - * [83425455b](https://github.com/argoproj/argo-workflows/commit/83425455bff34527e65ca1371347eed5203ae99a) Add OVH as official user (#1417) - * [82e5f63d3](https://github.com/argoproj/argo-workflows/commit/82e5f63d3680e7e4a22747803b0753b5ec31d2ad) Typo fix in ARTIFACT_REPO.md (#1425) - * [15fa6f52d](https://github.com/argoproj/argo-workflows/commit/15fa6f52d926ee5e839321900f613f6e546e6b6e) Update OWNERS (#1429) - * [96b9a40e9](https://github.com/argoproj/argo-workflows/commit/96b9a40e9aafe9c0132ce1b9f1eb01f05c3894ca) Orders uses alphabetically (#1411) - * [bc81fe288](https://github.com/argoproj/argo-workflows/commit/bc81fe288ebf9811774b36dd6eba9a851ac7717e) Fiixed: persistentvolumeclaims already exists #1130 (#1363) - * [6a042d1f7](https://github.com/argoproj/argo-workflows/commit/6a042d1f7eb01f1f369c2325aecebf71a3bea3a4) Update README.md (#1404) - * [aa811fbdb](https://github.com/argoproj/argo-workflows/commit/aa811fbdb914fe386cfbf3fa84a51bfd5104b5d0) Update README.md (#1402) - * [abe3c99f1](https://github.com/argoproj/argo-workflows/commit/abe3c99f19a1ec28775a276b50ad588a2dd660ca) Add Mirantis as an official user (#1401) - * [18ab750ae](https://github.com/argoproj/argo-workflows/commit/18ab750aea4de8f7dc67433f4e73505c80e13222) Added Argo Rollouts to README (#1388) - * [67714f99b](https://github.com/argoproj/argo-workflows/commit/67714f99b4bf664eb5e853b25ebf4b12bb98f733) Make locating kubeconfig in example os independent (#1393) - * [672dc04f7](https://github.com/argoproj/argo-workflows/commit/672dc04f737a3be099fe64c343587c35074b0938) Fixed: withParam parsing of JSON/YAML lists #1389 (#1397) - * [b9aec5f98](https://github.com/argoproj/argo-workflows/commit/b9aec5f9833d5fa2055d06d1a71fdb75709eea21) Fixed: make verify-codegen is failing on the master branch (#1399) (#1400) - * [270aabf1d](https://github.com/argoproj/argo-workflows/commit/270aabf1d8cabd69b9851209ad5d9c874348e21d) Fixed: failed to save outputs: verify serviceaccount default:default has necessary privileges (#1362) - * [163f4a5d3](https://github.com/argoproj/argo-workflows/commit/163f4a5d322352bd98f9a88ebd6089cf5e5b49ad) Fixed: Support hostAliases in WorkflowSpec #1265 (#1365) - * [abb174788](https://github.com/argoproj/argo-workflows/commit/abb174788dce1bc6bed993a2967f7d8e112e44ca) Add Max Kelsen to USERS in README.md (#1374) - * [dc5491930](https://github.com/argoproj/argo-workflows/commit/dc5491930e09eebe700952e28359deeb8e0d2314) Update docs for the v2.3.0 release and to use the stable tag - * [4001c964d](https://github.com/argoproj/argo-workflows/commit/4001c964dbc70962e1cc1d80a4aff64cf8594ec3) Update README.md (#1372) - * [6c18039be](https://github.com/argoproj/argo-workflows/commit/6c18039be962996d971145be8349d2ed3e396c80) Fix issue where a DAG with exhausted retries would get stuck Running (#1364) - * [d7e74fe3a](https://github.com/argoproj/argo-workflows/commit/d7e74fe3a96277ba532e4a2f40303a92d2d0ce94) Validate action for resource templates (#1346) - * [810949d51](https://github.com/argoproj/argo-workflows/commit/810949d5106b4d1d533b647d1d61559c208b590a) Fixed : CLI Does Not Honor metadata.namespace #1288 (#1352) - * [e58859d79](https://github.com/argoproj/argo-workflows/commit/e58859d79516508838fead8222f0e26a6c2a2861) [Fix #1242] Failed DAG nodes are now kept and set to running on RetryWorkflow. (#1250) - * [d5fe5f981](https://github.com/argoproj/argo-workflows/commit/d5fe5f981fb112ba01ed77521ae688f8a15f67b9) Use golangci-lint instead of deprecated gometalinter (#1335) - * [26744d100](https://github.com/argoproj/argo-workflows/commit/26744d100e91eb757f48bfedd539e7e4a044faf3) Support an easy way to set owner reference (#1333) - * [8bf7578e1](https://github.com/argoproj/argo-workflows/commit/8bf7578e1884c61128603bbfaa677fd79cc17ea8) Add --status filter for get command (#1325) - -### Contributors - - * Aisuko - * Alex Capras - * Alex Collins - * Alexander Matyushentsev - * Alexey Volkov - * Anes Benmerzoug - * Ben Wells - * Brandon Steinman - * Christian Muehlhaeuser - * Cristian Pop - * Daisuke Taniwaki - * Daniel Duvall - * Ed Lee - * Edwin Jacques - * Ian Howell - * Jacob O'Farrell - * Jaime - * Jean-Louis Queguiner - * Jesse Suen - * Jonathon Belotti - * Mostapha Sadeghipour Roudsari - * Mukulikak - * Orion Delwaterman - * Paul Brit - * Saravanan Balasubramanian - * Semjon Kopp - * Stephen Steiner - * Tim Schrodi - * Xianlu Bird - * Ziyang Wang - * commodus-sebastien - * hidekuro - * ianCambrio - * jacky - * mark9white - * tralexa - -## v2.3.0 (2019-05-20) - - * [88fcc70dc](https://github.com/argoproj/argo-workflows/commit/88fcc70dcf6e60697e6716edc7464a403c49b27e) Update VERSION to v2.3.0, changelog, and manifests - * [1731cd7c2](https://github.com/argoproj/argo-workflows/commit/1731cd7c2cd6a739d9efb369a7732bc15498460f) Fix issue where a DAG with exhausted retries would get stuck Running (#1364) - * [3f6ac9c9f](https://github.com/argoproj/argo-workflows/commit/3f6ac9c9f1ccd92d4dabf52e964a1dd52b1622f6) Update release instructions - -### Contributors - - * Jesse Suen - -## v2.3.0-rc3 (2019-05-07) - - * [2274130dc](https://github.com/argoproj/argo-workflows/commit/2274130dc55de0b019ac9fd5232c192364f275c9) Update version to v2.3.0-rc3 - * [b024b3d83](https://github.com/argoproj/argo-workflows/commit/b024b3d83a4bfd46bd6b4a5075e9f8f968457309) Fix: # 1328 argo submit --wait and argo wait quits while workflow is running (#1347) - * [24680b7fc](https://github.com/argoproj/argo-workflows/commit/24680b7fc8a1fd573b39d61ba7bdce5b143eb686) Fixed : Validate the secret credentials name and key (#1358) - * [f641d84eb](https://github.com/argoproj/argo-workflows/commit/f641d84eb5cd489c98b39b41b69dbea9a3312e01) Fix input artifacts with multiple ssh keys (#1338) - * [e680bd219](https://github.com/argoproj/argo-workflows/commit/e680bd219a2478835d5d8cefcbfb96bd11acc40b) add / test (#1240) - * [ee788a8a6](https://github.com/argoproj/argo-workflows/commit/ee788a8a6c70c5c64f535b6a901e837a9b4d5797) Fix #1340 parameter substitution bug (#1345) - * [60b65190a](https://github.com/argoproj/argo-workflows/commit/60b65190a22e176429e586afe58a86a14b390c66) Fix missing template local volumes, Handle volumes only used in init containers (#1342) - * [4e37a444b](https://github.com/argoproj/argo-workflows/commit/4e37a444bde2a034885d0db35f7b38684505063e) Add documentation on releasing - -### Contributors - - * Daisuke Taniwaki - * Hideto Inamura - * Ilias Katsakioris - * Jesse Suen - * Saravanan Balasubramanian - * almariah - -## v2.3.0-rc2 (2019-04-21) - - * [bb1bfdd91](https://github.com/argoproj/argo-workflows/commit/bb1bfdd9106d9b64aa2dccf8d3554bdd31513cf8) Update version to v2.3.0-rc2. Update changelog - * [49a6b6d7a](https://github.com/argoproj/argo-workflows/commit/49a6b6d7ac1bb5f6b390eff1b218205d995142cb) wait will conditionally become privileged if main/sidecar privileged (resolves #1323) - * [34af5a065](https://github.com/argoproj/argo-workflows/commit/34af5a065e42230148b48603fc81f57fb2b4c22c) Fix regression where argoexec wait would not return when podname was too long - * [bd8d5cb4b](https://github.com/argoproj/argo-workflows/commit/bd8d5cb4b7510afb7bd43bd75e5c5d26ccc85ca4) `argo list` was not displaying non-zero priorities correctly - * [64370a2d1](https://github.com/argoproj/argo-workflows/commit/64370a2d185db66a8d2188d986c52a3b73aaf92b) Support parameter substitution in the volumes attribute (#1238) - * [6607dca93](https://github.com/argoproj/argo-workflows/commit/6607dca93db6255a2abc30ae76b5f935fce5735d) Issue1316 Pod creation with secret volumemount (#1318) - * [a5a2bcf21](https://github.com/argoproj/argo-workflows/commit/a5a2bcf21900019d979328250009af4137f7ff2a) Update README.md (#1321) - * [950de1b94](https://github.com/argoproj/argo-workflows/commit/950de1b94efc18473a85e1f23c9ed5e6ff75ba93) Export the methods of `KubernetesClientInterface` (#1294) - * [1c729a72a](https://github.com/argoproj/argo-workflows/commit/1c729a72a2ae431623332b65646c97cb689eab01) Update v2.3.0 CHANGELOG.md - -### Contributors - - * Chris Chambers - * Ed Lee - * Ilias Katsakioris - * Jesse Suen - * Saravanan Balasubramanian - -## v2.3.0-rc1 (2019-04-10) - - * [40f9a8759](https://github.com/argoproj/argo-workflows/commit/40f9a87593d312a46f7fa24aaf32e125458cc701) Reorganize manifests to kustomize 2 and update version to v2.3.0-rc1 - * [75b28a37b](https://github.com/argoproj/argo-workflows/commit/75b28a37b923e278fc89fd647f78a42e7a3bf029) Implement support for PNS (Process Namespace Sharing) executor (#1214) - * [b4edfd30b](https://github.com/argoproj/argo-workflows/commit/b4edfd30b0e3034d98e938b491cf5bd054b36525) Fix SIGSEGV in watch/CheckAndDecompress. Consolidate duplicate code (resolves #1315) - * [02550be31](https://github.com/argoproj/argo-workflows/commit/02550be31e53da79f1f4dbebda3ede7dc1052086) Archive location should conditionally be added to template only when needed - * [c60010da2](https://github.com/argoproj/argo-workflows/commit/c60010da29bd36c10c6e627802df6d6a06c1a59a) Fix nil pointer dereference with secret volumes (#1314) - * [db89c477d](https://github.com/argoproj/argo-workflows/commit/db89c477d65a29fc0a95ca55f68e1bd23d0170e0) Fix formatting issues in examples documentation (#1310) - * [0d400f2ce](https://github.com/argoproj/argo-workflows/commit/0d400f2ce6db9478b4eaa6fe24849a686c9d1d44) Refactor checkandEstimate to optimize podReconciliation (#1311) - * [bbdf2e2c8](https://github.com/argoproj/argo-workflows/commit/bbdf2e2c8f1b5a8dc83e88fedba9b1899f6bc78b) Add alibaba cloud to officially using argo list (#1313) - * [abb77062f](https://github.com/argoproj/argo-workflows/commit/abb77062fc06ae964ce7ccd1a534ec8bbdf3747c) CheckandEstimate implementation to optimize podReconciliation (#1308) - * [1a028d545](https://github.com/argoproj/argo-workflows/commit/1a028d5458ffef240f8af31caeecda91f057c3ba) Secrets should be passed to pods using volumes instead of API calls (#1302) - * [e34024a3c](https://github.com/argoproj/argo-workflows/commit/e34024a3ca285d1af3b5ba3b3235dc7adc0472b7) Add support for init containers (#1183) - * [4591e44fe](https://github.com/argoproj/argo-workflows/commit/4591e44fe0e4de543f4c4339de0808346e0807e3) Added support for artifact path references (#1300) - * [928e4df81](https://github.com/argoproj/argo-workflows/commit/928e4df81c4b33f0c0750f01b3aa3c4fc7ff256c) Add Karius to users in README.md (#1305) - * [de779f361](https://github.com/argoproj/argo-workflows/commit/de779f36122205790915622f5ee91c9a9d5b9086) Add community meeting notes link (#1304) - * [a8a555791](https://github.com/argoproj/argo-workflows/commit/a8a55579131605d4dc769cb599bc99c06350dfb7) Speed up podReconciliation using parallel goroutine (#1286) - * [934511192](https://github.com/argoproj/argo-workflows/commit/934511192e4045b87be1675ff7e9dfa79faa9fcb) Add dns config support (#1301) - * [850f3f15d](https://github.com/argoproj/argo-workflows/commit/850f3f15dd1965e99cd636711a5e3306bc4bd0c0) Admiralty: add link to blog post, add user (#1295) - * [d5f4b428c](https://github.com/argoproj/argo-workflows/commit/d5f4b428ce02de34a37d5cb2fdba4dfa9fd16e75) Fix for Resource creation where template has same parameter templating (#1283) - * [9b555cdb3](https://github.com/argoproj/argo-workflows/commit/9b555cdb30f6092d5f53891f318fb74b8371c039) Issue#896 Workflow steps with non-existant output artifact path will succeed (#1277) - * [adab9ed6b](https://github.com/argoproj/argo-workflows/commit/adab9ed6bc4f8f337105182c56abad39bccb9676) Argo CI is current inactive (#1285) - * [59fcc5cc3](https://github.com/argoproj/argo-workflows/commit/59fcc5cc33ce67c057064dc37a463707501615e1) Add workflow labels and annotations global vars (#1280) - * [1e111caa1](https://github.com/argoproj/argo-workflows/commit/1e111caa1d2cc672b3b53c202b96a5f660a7e9b2) Fix bug with DockerExecutor's CopyFile (#1275) - * [73a37f2b2](https://github.com/argoproj/argo-workflows/commit/73a37f2b2a12d74ddf6a4b54e04b50fa1a7c68a1) Add the `mergeStrategy` option to resource patching (#1269) - * [e6105243c](https://github.com/argoproj/argo-workflows/commit/e6105243c785d9f53aef6fcfd344e855ad4f7d84) Reduce redundancy pod label action (#1271) - * [4bfbb20bc](https://github.com/argoproj/argo-workflows/commit/4bfbb20bc23f8bf4611a6314fb80f8138b17b9b9) Error running 1000s of tasks: "etcdserver: request is too large" #1186 (#1264) - * [b2743f30c](https://github.com/argoproj/argo-workflows/commit/b2743f30c411f5ad8f8c8b481a5d6b6ff83c33bd) Proxy Priority and PriorityClassName to pods (#1179) - * [70c130ae6](https://github.com/argoproj/argo-workflows/commit/70c130ae626f7c58d9e5ac0eed8977f51696fcbd) Update versions (#1218) - * [b03841297](https://github.com/argoproj/argo-workflows/commit/b03841297e4b0dab0380b441cf41f5ed34db44bf) Git cloning via SSH was not verifying host public key (#1261) - * [3f06385b1](https://github.com/argoproj/argo-workflows/commit/3f06385b129c02e23ea283f7c66d347cb8899564) Issue#1165 fake outputs don't notify and task completes successfully (#1247) - * [fa042aa28](https://github.com/argoproj/argo-workflows/commit/fa042aa285947c5fa365ef06a9565d0b4e20da0e) typo, executo -> executor (#1243) - * [1cb88baee](https://github.com/argoproj/argo-workflows/commit/1cb88baee9ded1ede27a9d3f1e31f06f4369443d) Fixed Issue#1223 Kubernetes Resource action: patch is not supported (#1245) - * [2b0b8f1c3](https://github.com/argoproj/argo-workflows/commit/2b0b8f1c3f46aa41e4b4ddaf14ad1fdebccfaf8a) Fix the Prometheus address references (#1237) - * [94cda3d53](https://github.com/argoproj/argo-workflows/commit/94cda3d53c6a72e3fc225ba08796bfd9420eccd6) Add feature to continue workflow on failed/error steps/tasks (#1205) - * [3f1fb9d5e](https://github.com/argoproj/argo-workflows/commit/3f1fb9d5e61d300c4922e48a748dc17285e07f07) Add Gardener to "Who uses Argo" (#1228) - * [cde5cd320](https://github.com/argoproj/argo-workflows/commit/cde5cd320fa987ac6dd539a3126f29c73cd7277a) Include stderr when retrieving docker logs (#1225) - * [2b1d56e7d](https://github.com/argoproj/argo-workflows/commit/2b1d56e7d4e583e2e06b37904714b350faf03d97) Update README.md (#1224) - * [eeac5a0e1](https://github.com/argoproj/argo-workflows/commit/eeac5a0e11b4a6f4bc28757a3b0684598b8c4974) Remove extra quotes around output parameter value (#1232) - * [8b67e1bfd](https://github.com/argoproj/argo-workflows/commit/8b67e1bfdc7ed5ea153cb17f9a740afe2bd4efa8) Update README.md (#1236) - * [baa3e6221](https://github.com/argoproj/argo-workflows/commit/baa3e622121e66c9fec7c612c88027b7cacbd1b2) Update README with typo fixes (#1220) - * [f6b0c8f28](https://github.com/argoproj/argo-workflows/commit/f6b0c8f285217fd0e6089b0cf03ca4926d1b4758) Executor can access the k8s apiserver with a out-of-cluster config file (#1134) - * [0bda53c77](https://github.com/argoproj/argo-workflows/commit/0bda53c77c54b037e7d91b18554053362b1e4d35) fix dag retries (#1221) - * [8aae29317](https://github.com/argoproj/argo-workflows/commit/8aae29317a8cfef2edc084a4c74a44c83d845936) Issue #1190 - Fix incorrect retry node handling (#1208) - * [f1797f780](https://github.com/argoproj/argo-workflows/commit/f1797f78044504dbf2e1f7285cc9c18ac79f5e81) Add schedulerName to workflow and template spec (#1184) - * [2ddae1610](https://github.com/argoproj/argo-workflows/commit/2ddae161037f603d2a3c12ba6b495dc422547b58) Set executor image pull policy for resource template (#1174) - * [edcb56296](https://github.com/argoproj/argo-workflows/commit/edcb56296255267a3c8fa639c3ad26a016caab80) Dockerfile: argoexec base image correction (fixes #1209) (#1213) - * [f92284d71](https://github.com/argoproj/argo-workflows/commit/f92284d7108ebf92907008d8f12a0696ee467a43) Minor spelling, formatting, and style updates. (#1193) - * [bd249a83e](https://github.com/argoproj/argo-workflows/commit/bd249a83e119d6161fa1c593b09fb381db448a0d) Issue #1128 - Use polling instead of fs notify to get annotation changes (#1194) - * [14a432e75](https://github.com/argoproj/argo-workflows/commit/14a432e75119e37d42715b7e83992789c6dac454) Update community/README (#1197) - * [eda7e0843](https://github.com/argoproj/argo-workflows/commit/eda7e08438d2314bb5eb178a1335a3c28555ab34) Updated OWNERS (#1198) - * [73504a24e](https://github.com/argoproj/argo-workflows/commit/73504a24e885c6df9d1cceb4aa123c79eca7b7cd) Fischerjulian adds ruby to rest docs (#1196) - * [311ad86f1](https://github.com/argoproj/argo-workflows/commit/311ad86f101c58a1de1cef313a1516b4c79e643f) Fix missing docker binary in argoexec image. Improve reuse of image layers - * [831e2198e](https://github.com/argoproj/argo-workflows/commit/831e2198e22503394acca1cce0dbcf8dcebb2931) Issue #988 - Submit should not print logs to stdout unless output is 'wide' (#1192) - * [17250f3a5](https://github.com/argoproj/argo-workflows/commit/17250f3a51d545c49114882d0da6ca29eda7c6f2) Add documentation how to use parameter-file's (#1191) - * [01ce5c3bc](https://github.com/argoproj/argo-workflows/commit/01ce5c3bcf0dde5536b596d48bd48a93b3f2eee0) Add Docker Hub build hooks - * [93289b42f](https://github.com/argoproj/argo-workflows/commit/93289b42f96cd49cdc048d84626cb28ef6932940) Refactor Makefile/Dockerfile to remove volume binding in favor of build stages (#1189) - * [8eb4c6663](https://github.com/argoproj/argo-workflows/commit/8eb4c66639c5fd1a607c73a4d765468a99c43da1) Issue #1123 - Fix 'kubectl get' failure if resource namespace is different from workflow namespace (#1171) - * [eaaad7d47](https://github.com/argoproj/argo-workflows/commit/eaaad7d47257302f203bab24bce1b7d479453351) Increased S3 artifact retry time and added log (#1138) - * [f07b5afea](https://github.com/argoproj/argo-workflows/commit/f07b5afeaf950f49f87cdffb5116e82c8b0d43a1) Issue #1113 - Wait for daemon pods completion to handle annotations (#1177) - * [2b2651b0a](https://github.com/argoproj/argo-workflows/commit/2b2651b0a7f5d6873c8470fad137d42f9b7d7240) Do not mount unnecessary docker socket (#1178) - * [1fc03144c](https://github.com/argoproj/argo-workflows/commit/1fc03144c55f987993c7777b190b1848fc3833cd) Argo users: Equinor (#1175) - * [e381653b6](https://github.com/argoproj/argo-workflows/commit/e381653b6d6d6a6babba2e8f05f6f103e81a191d) Update README. (#1173) (#1176) - * [5a917140c](https://github.com/argoproj/argo-workflows/commit/5a917140cb56a27e7b6f3b1d5068f4838863c273) Update README and preview notice in CLA. - * [521eb25ae](https://github.com/argoproj/argo-workflows/commit/521eb25aeb2b8351d72bad4a3d3aa2d1fa55eb23) Validate ArchiveLocation artifacts (#1167) - * [528e8f803](https://github.com/argoproj/argo-workflows/commit/528e8f803683ee462ccc05fc9b00dc57858c0e93) Add missing patch in namespace kustomization.yaml (#1170) - * [0b41ca0a2](https://github.com/argoproj/argo-workflows/commit/0b41ca0a2410b01205712a2186dd12851eecb707) Add Preferred Networks to users in README.md (#1172) - * [649d64d1b](https://github.com/argoproj/argo-workflows/commit/649d64d1bd375f779cd150446bddce94582067d2) Add GitHub to users in README.md (#1151) - * [864c7090a](https://github.com/argoproj/argo-workflows/commit/864c7090a0bfcaa12237ff6e894a9d26ab463a7a) Update codegen for network config (#1168) - * [c3cc51be2](https://github.com/argoproj/argo-workflows/commit/c3cc51be2e14e931d6e212aa30842a2c514082d1) Support HDFS Artifact (#1159) - * [8db000666](https://github.com/argoproj/argo-workflows/commit/8db0006667dec74c58cbab744b014c67fda55c65) add support for hostNetwork & dnsPolicy config (#1161) - * [149d176fd](https://github.com/argoproj/argo-workflows/commit/149d176fdf3560d74afa91fe91a0ee38bf7ec3bd) Replace exponential retry with poll (#1166) - * [31e5f63cb](https://github.com/argoproj/argo-workflows/commit/31e5f63cba89b06abc2cdce0d778c6b8d937a23e) Fix tests compilation error (#1157) - * [6726d9a96](https://github.com/argoproj/argo-workflows/commit/6726d9a961a2c3ed5467430d3631a36cfbf361de) Fix failing TestAddGlobalArtifactToScope unit test - * [4fd758c38](https://github.com/argoproj/argo-workflows/commit/4fd758c38fc232bf26bb5e1d4e7e23321ba91416) Add slack badge to README (#1164) - * [3561bff70](https://github.com/argoproj/argo-workflows/commit/3561bff70ad6bfeca8967be6aa4ac24fbbc8ac27) Issue #1136 - Fix metadata for DAG with loops (#1149) - * [c7fec9d41](https://github.com/argoproj/argo-workflows/commit/c7fec9d41c0e2d3369e111f8b1d0f1d0ca77edae) Reflect minio chart changes in documentation (#1147) - * [f6ce78334](https://github.com/argoproj/argo-workflows/commit/f6ce78334762cbc3c6de1604c11ea4f5f618c275) add support for other archs (#1137) - * [cb538489a](https://github.com/argoproj/argo-workflows/commit/cb538489a187134577e2146afcf9367f45088ff7) Fix issue where steps with exhausted retires would not complete (#1148) - * [e400b65c5](https://github.com/argoproj/argo-workflows/commit/e400b65c5eca2de2aa891f8489dcd835ef0e161c) Fix global artifact overwriting in nested workflow (#1086) - * [174eb20a6](https://github.com/argoproj/argo-workflows/commit/174eb20a6a110c9bf647b040460df83b6ab031c4) Issue #1040 - Kill daemoned step if workflow consist of single daemoned step (#1144) - * [e078032e4](https://github.com/argoproj/argo-workflows/commit/e078032e469effdfc492c8eea97eb2701ceda0c2) Issue #1132 - Fix panic in ttl controller (#1143) - * [e09d9ade2](https://github.com/argoproj/argo-workflows/commit/e09d9ade25535ae7e78ca23636e4d158a98bba84) Issue #1104 - Remove container wait timeout from 'argo logs --follow' (#1142) - * [0f84e5148](https://github.com/argoproj/argo-workflows/commit/0f84e5148dd34c225a35eab7a1f5953afb45e724) Allow owner reference to be set in submit util (#1120) - * [3484099c8](https://github.com/argoproj/argo-workflows/commit/3484099c856716f6da5e02ad75a48b568f547695) Update generated swagger to fix verify-codegen (#1131) - * [587ab1a02](https://github.com/argoproj/argo-workflows/commit/587ab1a02772cd9b7ae7cd94f91b815ac4774297) Fix output artifact and parameter conflict (#1125) - * [6bb3adbc5](https://github.com/argoproj/argo-workflows/commit/6bb3adbc596349100c4f19155cfe976f4ea0e6fb) Adding Quantibio in Who uses Argo (#1111) - * [1ae3696c2](https://github.com/argoproj/argo-workflows/commit/1ae3696c27f343c947d9225c5cc2294c8b7c45e5) Install mime-support in argoexec to set proper mime types for S3 artifacts (resolves #1119) - * [515a90050](https://github.com/argoproj/argo-workflows/commit/515a9005057dfd260a8b60c4ba1ab8c3aa614f48) add support for ppc64le and s390x (#1102) - * [781428378](https://github.com/argoproj/argo-workflows/commit/78142837836cb100f6858d246d84100b74794cc6) Remove docker_lib mount volume which is not needed anymore (#1115) - * [e59398adf](https://github.com/argoproj/argo-workflows/commit/e59398adf39b8ef1d0ce273263e80d49e370c510) Fix examples docs of parameters. (#1110) - * [ec20d94b6](https://github.com/argoproj/argo-workflows/commit/ec20d94b6f1d0d88d579c8a27b964f6e9915ff55) Issue #1114 - Set FORCE_NAMESPACE_ISOLATION env variable in namespace install manifests (#1116) - * [49c1fa4f4](https://github.com/argoproj/argo-workflows/commit/49c1fa4f42e1c19ce3b8f4ac2c339894e1ed90d7) Update docs with examples using the K8s REST API - * [bb8a6a58f](https://github.com/argoproj/argo-workflows/commit/bb8a6a58fee8170d6db65c73a50c5fe640f3cb7d) Update ROADMAP.md - * [46855dcde](https://github.com/argoproj/argo-workflows/commit/46855dcde1d9ba904a1c94a97e602d0510f5e0d4) adding logo to be used by the OS Site (#1099) - * [438330c38](https://github.com/argoproj/argo-workflows/commit/438330c38da69a68d6b0b0b24f6aae0053fc35ee) #1081 added retry logic to s3 load and save function (#1082) - * [cb8b036b8](https://github.com/argoproj/argo-workflows/commit/cb8b036b8db3ebeb6ef73d9f2070a1ddaf0d2150) Initialize child node before marking phase. Fixes panic on invalid `When` (#1075) - * [60b508dd9](https://github.com/argoproj/argo-workflows/commit/60b508dd9ec36ef45013d72ec6166dd9a30d77fe) Drop reference to removed `argo install` command. (#1074) - * [62b24368a](https://github.com/argoproj/argo-workflows/commit/62b24368a93d57eb505bf226e042a8eb0bf72da4) Fix typo in demo.md (#1089) - * [b5dfa0217](https://github.com/argoproj/argo-workflows/commit/b5dfa0217470c97d8e83716a22cf3bd274c4a2d5) Use relative links on README file (#1087) - * [95b72f38c](https://github.com/argoproj/argo-workflows/commit/95b72f38c94d12735e79bb8bec1a46b10514603c) Update docs to outline bare minimum set of privileges for a workflow - * [d4ef6e944](https://github.com/argoproj/argo-workflows/commit/d4ef6e944c302b5d2b75d4c49e1833c3a28c1f9a) Add new article and minor edits. (#1083) - * [afdac9bb3](https://github.com/argoproj/argo-workflows/commit/afdac9bb34fe8a01ad511323a00ccf6c07e41137) Issue #740 - System level workflow parallelism limits & priorities (#1065) - * [a53a76e94](https://github.com/argoproj/argo-workflows/commit/a53a76e9401fab701eaa150307b21a28825c97ce) fix #1078 Azure AKS authentication issues (#1079) - * [79b3e3074](https://github.com/argoproj/argo-workflows/commit/79b3e30746f779e3cec3a28beaecb9c0df7024e1) Fix string format arguments in workflow utilities. (#1070) - * [76b14f545](https://github.com/argoproj/argo-workflows/commit/76b14f54520a92b81ced78d4cae2632655f396fc) Auto-complete workflow names (#1061) - * [f2914d63e](https://github.com/argoproj/argo-workflows/commit/f2914d63e9c8b41a13b5932f7962f208b7e5a0da) Support nested steps workflow parallelism (#1046) - * [eb48c23a2](https://github.com/argoproj/argo-workflows/commit/eb48c23a2525a62bbc1b8b4c94e3d50fd91014bd) Raise not implemented error when artifact saving is unsupported (#1062) - * [036969c0f](https://github.com/argoproj/argo-workflows/commit/036969c0f4f6ce6a3c948b5d161c0367cf07176b) Add Cratejoy to list of users (#1063) - * [a07bbe431](https://github.com/argoproj/argo-workflows/commit/a07bbe431cecbb1d50356f94111d3bd2dbc48bb6) Adding SAP Hybris in Who uses Argo (#1064) - * [7ef1cea68](https://github.com/argoproj/argo-workflows/commit/7ef1cea68c94f7f0e1e2f8bd75bedc5a7df8af90) Update dependencies to K8s v1.12 and client-go 9.0 - * [23d733bae](https://github.com/argoproj/argo-workflows/commit/23d733bae386db44ec80639daf91b29dbf86b335) Add namespace explicitly to pod metadata (#1059) - * [79ed7665d](https://github.com/argoproj/argo-workflows/commit/79ed7665d7419e7fbfe8b120c4cbcd486bebee57) Parameter and Argument names should support snake case (#1048) - * [6e6c59f13](https://github.com/argoproj/argo-workflows/commit/6e6c59f13ff84fd6b4f1e7f836c783941c434ce7) Submodules are dirty after checkout -- need to update (#1052) - * [f18716b74](https://github.com/argoproj/argo-workflows/commit/f18716b74c6f52d0c8bf4d64c05eae9db75bfb1f) Support for K8s API based Executor (#1010) - * [e297d1950](https://github.com/argoproj/argo-workflows/commit/e297d19501a8116b5a18c925a3c72d7c7e106ea0) Updated examples/README.md (#1051) - * [19d6cee81](https://github.com/argoproj/argo-workflows/commit/19d6cee8149917c994b737510d9c8dbfc6dbdd27) Updated ARTIFACT_REPO.md (#1049) - -### Contributors - - * Adrien Trouillaud - * Alexander Matyushentsev - * Alexey Volkov - * Andrei Miulescu - * Anna Winkler - * Bastian Echterhölter - * Chen Zhiwei - * Clemens Lange - * Daisuke Taniwaki - * Dan Norris - * Divya Vavili - * Ed Lee - * Edward Lee - * Erik Parmann - * Fred Dubois - * Greg Roodt - * Hamel Husain - * Howie Benefiel - * Ian Howell - * Ilias K - * Ilias Katsakioris - * Ismail Alidzhikov - * Jesse Suen - * Johannes 'fish' Ziemke - * Joshua Carp - * Julian Fischer - * Konstantin Zadorozhny - * Marcin Karkocha - * Matthew Coleman - * Miyamae Yuuya - * Naoto Migita - * Naresh Kumar Amrutham - * Nick Stott - * Pengfei Zhao - * Rocio Montes - * Saravanan Balasubramanian - * Tang Lee - * Tim Schrodi - * Val Sichkovskyi - * WeiYan - * Xianlu Bird - * gerardaus - * houz - * jacky - * jdfalko - * kshamajain99 - * shahin - * xubofei1983 - -## v2.2.1 (2018-10-11) - - * [0a928e93d](https://github.com/argoproj/argo-workflows/commit/0a928e93dac6d8522682931a0a68c52add310cdb) Update installation manifests to use v2.2.1 - * [3b52b2619](https://github.com/argoproj/argo-workflows/commit/3b52b26190163d1f72f3aef1a39f9f291378dafb) Fix linter warnings and update swagger - * [7d0e77ba7](https://github.com/argoproj/argo-workflows/commit/7d0e77ba74587d913b1f4aceb1443228a04d35de) Update changelog and bump version to 2.2.1 - * [b402e12fe](https://github.com/argoproj/argo-workflows/commit/b402e12feefe5cd1380e9479b2cc9bae838357bf) Issue #1033 - Workflow executor panic: workflows.argoproj.io/template workflows.argoproj.io/template not found in annotation file (#1034) - * [3f2e986e1](https://github.com/argoproj/argo-workflows/commit/3f2e986e130ca136514767fb1593d745ca418236) fix typo in examples/README.md (#1025) - * [9c5e056a8](https://github.com/argoproj/argo-workflows/commit/9c5e056a858a9b510cdacdbc5deb5857a97662f8) Replace tabs with spaces (#1027) - * [091f14071](https://github.com/argoproj/argo-workflows/commit/091f1407180990c745e981b24169c3bb4868dbe3) Update README.md (#1030) - * [159fe09c9](https://github.com/argoproj/argo-workflows/commit/159fe09c99c16738c0897f9d74087ec1b264954d) Fix format issues to resolve build errors (#1023) - * [363bd97b7](https://github.com/argoproj/argo-workflows/commit/363bd97b72ae5cb7fc52a560b6f7939248cdb72d) Fix error in env syntax (#1014) - * [ae7bf0a5f](https://github.com/argoproj/argo-workflows/commit/ae7bf0a5f7ddb1e5211e724bef15951198610942) Issue #1018 - Workflow controller should save information about archived logs in step outputs (#1019) - * [15d006c54](https://github.com/argoproj/argo-workflows/commit/15d006c54ee7149b0d86e6d60453ecc8c071c953) Add example of workflow using imagePullSecrets (resolves #1013) - * [2388294fa](https://github.com/argoproj/argo-workflows/commit/2388294fa412e153d8366910e4d47ba564f29856) Fix RBAC roles to include workflow delete for GC to work properly (resolves #1004) - * [6f611cb93](https://github.com/argoproj/argo-workflows/commit/6f611cb9383610471f941b5cab4227ce8bfea7c5) Fix issue where resubmission of a terminated workflow creates a terminated workflow (issue #1011) - * [4a7748f43](https://github.com/argoproj/argo-workflows/commit/4a7748f433f888fdc50b592db1002244ea466bdb) Disable Persistence in the demo example (#997) - * [55ae0cb24](https://github.com/argoproj/argo-workflows/commit/55ae0cb242a9cf6b390822ca6c0aa0868f5b06e3) Fix example pod name (#1002) - * [c275e7acb](https://github.com/argoproj/argo-workflows/commit/c275e7acb7b5e8f9820a09d8b0cb635f710b8674) Add imagePullPolicy config for executors (#995) - * [b1eed124e](https://github.com/argoproj/argo-workflows/commit/b1eed124e6d943c453d87a9b4291ba10198d0bc6) `tar -tf` will detect compressed tars correctly. (#998) - * [03a7137c9](https://github.com/argoproj/argo-workflows/commit/03a7137c9ca9459727b57fb0a0e95584c5305844) Add new organization using argo (#994) - * [838845287](https://github.com/argoproj/argo-workflows/commit/8388452870ed9a2d2e348a2844d3d7d1c4d61b05) Update argoproj/pkg to trim leading/trailing whitespace in S3 credentials (resolves #981) - * [978b49383](https://github.com/argoproj/argo-workflows/commit/978b49383d30cdbc7c9708eb281b7800ee5412df) Add syntax highlighting for all YAML snippets and most shell snippets (#980) - * [60d5dc11c](https://github.com/argoproj/argo-workflows/commit/60d5dc11c73e888898160b4cc329e87747cee4d2) Give control to decide whether or not to archive logs at a template level - * [8fab73b14](https://github.com/argoproj/argo-workflows/commit/8fab73b142b96f943592c66932ae0c5183e8c3db) Detect and indicate when container was OOMKilled - * [47a9e5560](https://github.com/argoproj/argo-workflows/commit/47a9e5560229c789b70a6624f23fb4433412fbc4) Update config map doc with instructions to enable log archiving - * [79dbbaa1e](https://github.com/argoproj/argo-workflows/commit/79dbbaa1ed30cae6279eabd9a84650107f4387b3) Add instructions to match git URL format to auth type in git example (issue #979) - * [429f03f5b](https://github.com/argoproj/argo-workflows/commit/429f03f5b26db42f1857a93b7599b545642c2f0a) Add feature list to README.md. Tweaks to getting started. - * [36fd19482](https://github.com/argoproj/argo-workflows/commit/36fd19482c6bebfb21076cba81b924deaff14f52) Update getting started guide with v2.2.0 instructions - -### Contributors - - * Alexander Matyushentsev - * Appréderisse Benjamin - * Daisuke Taniwaki - * David Bernard - * Feynman Liang - * Ilya Sotkov - * Jesse Suen - * Marco Sanvido - * Matt Hillsdon - * Sean Fern - * WeiYan - -## v2.2.0 (2018-08-30) - - * [af636ddd8](https://github.com/argoproj/argo-workflows/commit/af636ddd8455660f307d835814d3112b90815dfd) Update installation manifests to use v2.2.0 - * [7864ad367](https://github.com/argoproj/argo-workflows/commit/7864ad36788dc78d035d59ddb27ecd979f7216f4) Introduce `withSequence` to iterate a range of numbers in a loop (resolves #527) - * [99e9977e4](https://github.com/argoproj/argo-workflows/commit/99e9977e4ccf61171ca1e347f6a182ba1d8dba83) Introduce `argo terminate` to terminate a workflow without deleting it (resolves #527) - * [f52c04508](https://github.com/argoproj/argo-workflows/commit/f52c045087abff478603db4817de1933bddce5e7) Reorganize codebase to make CLI functionality available as a library - * [311169f7e](https://github.com/argoproj/argo-workflows/commit/311169f7e71c58fe9bf879a94681ee274ddf623c) Fix issue where sidecars and daemons were not reliably killed (resolves #879) - * [67ffb6eb7](https://github.com/argoproj/argo-workflows/commit/67ffb6eb7519936e1149f36e11dc9fda0f70a242) Add a reason/message for Unschedulable Pending pods - * [69c390f28](https://github.com/argoproj/argo-workflows/commit/69c390f288ccaaeefba1d5a7961acebfe2e7771a) Support for workflow level timeouts (resolves #848) - * [f88732ec0](https://github.com/argoproj/argo-workflows/commit/f88732ec09413716bf14927bef10355b21b88516) Update docs to use keyFormat field - * [0df022e77](https://github.com/argoproj/argo-workflows/commit/0df022e777f35bf0ea39ebbacfe4e5f92f099a62) Rename keyPattern to keyFormat. Remove pending pod query during pod reconciliation - * [75a9983b1](https://github.com/argoproj/argo-workflows/commit/75a9983b17869b76a93621f108ee85c70b8d8533) Fix potential panic in `argo watch` - * [9cb464497](https://github.com/argoproj/argo-workflows/commit/9cb4644975d16dbebc3607ffb91364c93bc14e30) Add TTLSecondsAfterFinished field and controller to garbage collect completed workflows (resolves #911) - * [7540714a4](https://github.com/argoproj/argo-workflows/commit/7540714a47f04f664362c7083c886058c62408f8) Add ability to archive container logs to the artifact repository (resolves #454) - * [11e57f4de](https://github.com/argoproj/argo-workflows/commit/11e57f4dea93fde60b204a5e7675fec999c66f56) Introduce archive strategies with ability to disable tar.gz archiving (resolves #784) - * [e180b5471](https://github.com/argoproj/argo-workflows/commit/e180b547133aa461bd5cc282a59f8954485d5b8f) Update CHANGELOG.md - * [5670bf5a6](https://github.com/argoproj/argo-workflows/commit/5670bf5a65cbac898b298edd682e603666ed5cb6) Introduce `argo watch` command to watch live workflows from terminal (resolves #969) - * [573943619](https://github.com/argoproj/argo-workflows/commit/5739436199980ec765b070f8e78669bc37115ad6) Support additional container runtimes through kubelet executor (#952) - * [a9c84c97d](https://github.com/argoproj/argo-workflows/commit/a9c84c97de8f088cd4ee91cd72cf75012fc70438) Error workflows which hit k8s/etcd 1M resource size limit (resolves #913) - * [67792eb89](https://github.com/argoproj/argo-workflows/commit/67792eb89e5aa678ffc52540dbc232d8598ce43f) Add parameter-file support (#966) - * [841832a35](https://github.com/argoproj/argo-workflows/commit/841832a3507be3b92e3b2a05fef1052b1cd6e20d) Aggregate workflow RBAC roles to built-in admin/edit/view clusterroles (resolves #960) - * [35bb70936](https://github.com/argoproj/argo-workflows/commit/35bb70936cf1b76e53f7f6f0e6acaccb9c6d06bf) Allow scaling of workflow and pod workers via controller CLI flags (resolves #962) - * [b479fa106](https://github.com/argoproj/argo-workflows/commit/b479fa10647bd1c1b86410b7837668c375b327be) Improve workflow configmap documentation for keyPattern - * [f1802f91d](https://github.com/argoproj/argo-workflows/commit/f1802f91d8934b2e4b9d1f64230230bc2a0b5baf) Introduce `keyPattern` workflow config to enable flexibility in archive location path (issue #953) - * [a5648a964](https://github.com/argoproj/argo-workflows/commit/a5648a9644fcea5f498c24a573a038290b92016f) Fix kubectl proxy link for argo-ui Service (#963) - * [09f059120](https://github.com/argoproj/argo-workflows/commit/09f0591205ec81b4ec03f0f5c534a13648346f41) Introduce Pending node state to highlight failures when start workflow pods - * [a3ff464f6](https://github.com/argoproj/argo-workflows/commit/a3ff464f67a862d4110848d94a46be39876ce57e) Speed up CI job - * [88627e842](https://github.com/argoproj/argo-workflows/commit/88627e842c082ddc4d75d15a3e2dc6c7ab4f1db8) Update base images to debian:9.5-slim. Use stable metalinter - * [753c5945b](https://github.com/argoproj/argo-workflows/commit/753c5945b62be209f05025c2e415a0753f5e0b01) Update argo-ci-builder image with new dependencies - * [674b61bb4](https://github.com/argoproj/argo-workflows/commit/674b61bb473787a157e543c10dcf158fa35bb39a) Remove unnecessary dependency on argo-cd and obsolete RBAC constants - * [60658de0c](https://github.com/argoproj/argo-workflows/commit/60658de0cf7403c4be014e92b7a3bb4772f4ad5f) Refactor linting/validation into standalone package. Support linting of .json files - * [f55d579a9](https://github.com/argoproj/argo-workflows/commit/f55d579a9478ed33755874f24656faec04611777) Detect and fail upon unknown fields during argo submit & lint (resolves #892) - * [edf6a5741](https://github.com/argoproj/argo-workflows/commit/edf6a5741de8bdf3a20852a55581883f1ec80d9a) Migrate to using argoproj.io/pkg packages - * [5ee1e0c7d](https://github.com/argoproj/argo-workflows/commit/5ee1e0c7daed4e2c8dca5643a800292ece067fca) Update artifact config docs (#957) - * [faca49c00](https://github.com/argoproj/argo-workflows/commit/faca49c009bead218ee974bfad2ccc36f84de1fb) Updated README - * [936c6df7e](https://github.com/argoproj/argo-workflows/commit/936c6df7eaae08082c1cc7ad750f664ff8a4c54c) Add table of content to examples (#956) - * [d2c03f67c](https://github.com/argoproj/argo-workflows/commit/d2c03f67c2fd45ff54c04db706c9ebf252fca6f2) Correct image used in install manifests - * [ec3b7be06](https://github.com/argoproj/argo-workflows/commit/ec3b7be065aa65aae207bd34930001b593009b80) Remove CLI installer/uninstaller. Executor image configured via CLI argument (issue #928) Remove redundant/unused downward API metadata - * [3a85e2429](https://github.com/argoproj/argo-workflows/commit/3a85e2429154a4d97c8fc7c92f9e8f482de6639f) Rely on `git checkout` instead of go-git checkout for more reliable revision resolution - * [ecef0e3dd](https://github.com/argoproj/argo-workflows/commit/ecef0e3dd506eefc222c1ebed58ab81265ac9638) Rename Prometheus metrics (#948) - * [b9cffe9cd](https://github.com/argoproj/argo-workflows/commit/b9cffe9cd7b347905a42cf3e217cc3b039bdfb3f) Issue #896 - Prometheus metrics and telemetry (#935) - * [290dee52b](https://github.com/argoproj/argo-workflows/commit/290dee52bfb94679870cee94ca9560bbe8bd7813) Support parameter aggregation of map results in scripts - * [fc20f5d78](https://github.com/argoproj/argo-workflows/commit/fc20f5d787ed11f03a24439c042b9ef45349eb95) Fix errors when submodules are from different URL (#939) - * [b4f1a00ad](https://github.com/argoproj/argo-workflows/commit/b4f1a00ad2862e6545dd4ad16279a49cd4585676) Add documentation about workflow variables - * [4a242518c](https://github.com/argoproj/argo-workflows/commit/4a242518c6ea81cd0d1e5aaab2822231d9b36d46) Update readme.md (#943) - * [a5baca60d](https://github.com/argoproj/argo-workflows/commit/a5baca60d1dfb8fb8c82a936ab383d49e075cff3) Support referencing of global workflow artifacts (issue #900) - * [9b5c85631](https://github.com/argoproj/argo-workflows/commit/9b5c85631765285b4593b7707ede014178f77679) Support for sophisticated expressions in `when` conditionals (issue #860) - * [ecc0f0272](https://github.com/argoproj/argo-workflows/commit/ecc0f0272f2257600abab8f4779c478957644d7c) Resolve revision added ability to specify shorthand revision and other things like HEAD~2 etc (#936) - * [11024318c](https://github.com/argoproj/argo-workflows/commit/11024318c0e2a9106f8a8b4a719daba12adf9f36) Support conditions with DAG tasks. Support aggregated outputs from scripts (issue #921) - * [d07c1d2f3](https://github.com/argoproj/argo-workflows/commit/d07c1d2f3b7c916887185eea749db2278bf9d043) Support withItems/withParam and parameter aggregation with DAG templates (issue #801) - * [94c195cb0](https://github.com/argoproj/argo-workflows/commit/94c195cb014ba2e5c5943d96dc0a3cc3243edb6a) Bump VERSION to v2.2.0 - * [9168c59dc](https://github.com/argoproj/argo-workflows/commit/9168c59dc486f840dc2b9713d92c14bdccebbaf8) Fix outbound node metadata with retry nodes causing disconnected nodes to be rendered in UI (issue #880) - * [c6ce48d08](https://github.com/argoproj/argo-workflows/commit/c6ce48d086638168b9bd8b998d65a2e3a4801540) Fix outbound node metadata issue with steps template causing incorrect edges to be rendered in UI - * [520b33d5f](https://github.com/argoproj/argo-workflows/commit/520b33d5fc6e7e670c33015fd74c5a2f3bd74a21) Add ability to aggregate and reference output parameters expanded by loops (issue #861) - * [ece1eef85](https://github.com/argoproj/argo-workflows/commit/ece1eef85ac1f92d2fad8a2fef8c657f04b4599a) Support submission of workflows as json, and from stdin (resolves #926) - * [4c31d61da](https://github.com/argoproj/argo-workflows/commit/4c31d61da2891e92a3ae0d09b6924655a07fc59f) Add `argo delete --older` to delete completed workflows older than specified duration (resolves #930) - * [c87cd33c1](https://github.com/argoproj/argo-workflows/commit/c87cd33c1bc46c06314129c882fec80269af8133) Update golang version to v1.10.3 - * [618b7eb84](https://github.com/argoproj/argo-workflows/commit/618b7eb84678e177a38e5aa81fa59ed891459aa5) Proper fix for assessing overall DAG phase. Add some DAG unit tests (resolves #885) - * [f223e5ad6](https://github.com/argoproj/argo-workflows/commit/f223e5ad62115399cf1394db4e9e65f05ae6da8b) Fix issue where a DAG would fail even if retry was successful (resolves #885) - * [143477f3d](https://github.com/argoproj/argo-workflows/commit/143477f3d5e0ab0d65dd97774aabdcd736ae4fbe) Start use of argoproj/pkg shared libraries - * [1220d0801](https://github.com/argoproj/argo-workflows/commit/1220d0801b8aa78c5364a4586cd119553d96bca5) Update argo-cluster-role to work with OpenShift (resolves #922) - * [4744f45a9](https://github.com/argoproj/argo-workflows/commit/4744f45a9c110b11fa73070a52e4166406fa5da4) Added SSH clone and proper git clone using go-git (#919) - * [d657abf4a](https://github.com/argoproj/argo-workflows/commit/d657abf4a37c9f2987b5cc2ee337743c981c3e48) Regenerate code and address OpenAPI rule validation errors (resolves #923) - * [c5ec4cf61](https://github.com/argoproj/argo-workflows/commit/c5ec4cf6194ab5f741eb2e1d4e387dcf32ba3ce7) Upgrade k8s dependencies to v1.10 (resolves #908) - * [ba8061abd](https://github.com/argoproj/argo-workflows/commit/ba8061abd296895555ea3d1d6ca7418fcd07d633) Redundant verifyResolvedVariables check in controller precluded the ability to use {{ }} in other circumstances - * [05a614496](https://github.com/argoproj/argo-workflows/commit/05a614496bb921b5fa081605227de1a8832260cd) Added link to community meetings (#912) - * [f33624d67](https://github.com/argoproj/argo-workflows/commit/f33624d67d0cf348dcdece46832081346c26bf80) Add an example on how to submit and wait on a workflow - * [aeed7f9da](https://github.com/argoproj/argo-workflows/commit/aeed7f9da490d8dc4ad40c00ac2272a19da4ff17) Added new members - * [288e4fc85](https://github.com/argoproj/argo-workflows/commit/288e4fc8577890e7fa6cc546f92aef4c954ce18c) Added Argo Events link. - * [3322506e5](https://github.com/argoproj/argo-workflows/commit/3322506e5a1d07e198f69cadd210b0b6cc6cfbc9) Updated README - * [3ce640a24](https://github.com/argoproj/argo-workflows/commit/3ce640a24509454302a5126c972fd5424673c00e) Issue #889 - Support retryStrategy for scripts (#890) - * [91c6afb2c](https://github.com/argoproj/argo-workflows/commit/91c6afb2cc07c113e4999f114279638aa6809fd6) adding BlackRock as corporate contributor/user (#886) - * [c8667b5c8](https://github.com/argoproj/argo-workflows/commit/c8667b5c81068326638a5e35c20336223b3894db) Fix issue where `argo lint` required spec level arguments to be supplied - * [ed7dedde1](https://github.com/argoproj/argo-workflows/commit/ed7dedde1f8be2a5f7be828a31ac9bb4025919e1) Update influx-ci example to choose a stable InfluxDB branch - * [135813e10](https://github.com/argoproj/argo-workflows/commit/135813e10e932a2187d007284766a816d9aa4442) Add datadog to the argo users (#882) - * [f10389484](https://github.com/argoproj/argo-workflows/commit/f103894843e9ed8cbaf4212e765c10311bec5989) Fix `make verify-codegen` build target when run in CI - * [785f2cbd1](https://github.com/argoproj/argo-workflows/commit/785f2cbd114e6d0097e21240d5cacece0b6d071e) Update references to v2.1.1. Add better checks in release Makefile target - * [d65e1cd3e](https://github.com/argoproj/argo-workflows/commit/d65e1cd3e77efbe6fc877ac689fd4cd19bc35093) readme: add Interline Technologies to user list (#867) - * [c903168ee](https://github.com/argoproj/argo-workflows/commit/c903168ee12f296f71f4953cda2163b8fa8cd409) Add documentation on global parameters (#871) - -### Contributors - - * Andrei Miulescu - * David Van Loon - * Drew Dara-Abrams - * Ed Lee - * Edward Lee - * Jesse Suen - * Julien Balestra - * Konstantin Zadorozhny - * Matthew Magaldi - * Nándor István Krácser - * Val Sichkovskyi - * Vincent Smith - * dthomson25 - -## v2.1.2 (2018-10-11) - - * [b82ce5b0b](https://github.com/argoproj/argo-workflows/commit/b82ce5b0b558ec5df70b760c0f67fc7e84cdfdf1) Update version to 2.1.2 - * [01a1214e6](https://github.com/argoproj/argo-workflows/commit/01a1214e6ae6680663168d308399b11aa7224d7e) Issue #1033 - Workflow executor panic: workflows.argoproj.io/template workflows.argoproj.io/template not found in annotation file (#1034) - -### Contributors - - * Alexander Matyushentsev - -## v2.1.1 (2018-05-29) - - * [ac241c95c](https://github.com/argoproj/argo-workflows/commit/ac241c95c13f08e868cd6f5ee32c9ce273e239ff) Update CHANGELOG for v2.1.1 - * [468e07600](https://github.com/argoproj/argo-workflows/commit/468e07600c5e124c8d2e0737f8c67a3265979952) Retrying failed steps templates could potentially result in disconnected children - * [8d96ea7b1](https://github.com/argoproj/argo-workflows/commit/8d96ea7b1b1ba843eb19a0632bc503d816ab9ef3) Switch to an UnstructuredInformer to guard against malformed workflow manifests (resolves #632) - * [5bef6cae2](https://github.com/argoproj/argo-workflows/commit/5bef6cae26dece96cadad855c9d54c5148f5e917) Suspend templates were not properly being connected to their children (resolves #869) - * [543e9392f](https://github.com/argoproj/argo-workflows/commit/543e9392f44873d1deb0a95fad3e00d67e8a7c70) Fix issue where a failed step in a template with parallelism would not complete (resolves #868) - * [289000cac](https://github.com/argoproj/argo-workflows/commit/289000cac81b199c2fc9e50d04831e3ccfcc0659) Update argocli Dockerfile and make cli-image part of release - * [d35a1e694](https://github.com/argoproj/argo-workflows/commit/d35a1e6949beca7cd032e5de5687e4e66869a916) Bump version to v2.1.1 - * [bbcff0c94](https://github.com/argoproj/argo-workflows/commit/bbcff0c94edf2b3270d7afc03b2538f47cb28492) Fix issue where `argo list` age column maxed out at 1d (resolves #857) - * [d68cfb7e5](https://github.com/argoproj/argo-workflows/commit/d68cfb7e585121e38e36c9d9dbd3e9cf8a1d9aac) Fix issue where volumes were not supported in script templates (resolves #852) - * [fa72b6dbe](https://github.com/argoproj/argo-workflows/commit/fa72b6dbe4533ed9e2cc2c9f6bb574bcd85c6d16) Fix implementation of DAG task targets (resolves #865) - * [dc003f43b](https://github.com/argoproj/argo-workflows/commit/dc003f43baeba5509bfadfc825ced533715b93c6) Children of nested DAG templates were not correctly being connected to its parent - * [b80657977](https://github.com/argoproj/argo-workflows/commit/b8065797712a29b0adefa5769cc6ffd2c6c7edd7) Simplify some examples for readability and clarity - * [7b02c050e](https://github.com/argoproj/argo-workflows/commit/7b02c050e86138983b20a38ee9efab52180141d5) Add CoreFiling to "Who uses Argo?" section. (#864) - * [4f2fde505](https://github.com/argoproj/argo-workflows/commit/4f2fde505d221783bec889f3c9339361f5e8be73) Add windows support for argo-cli (#856) - * [703241e60](https://github.com/argoproj/argo-workflows/commit/703241e60c7203550ac9f7947284e5d6fde3dc74) Updated ROADMAP.md for v2.2 - * [54f2138ef](https://github.com/argoproj/argo-workflows/commit/54f2138ef83f92d2038ebf7b925bd102bc5a7b8d) Spell check the examples README (#855) - * [f23feff5e](https://github.com/argoproj/argo-workflows/commit/f23feff5e9353b4796ad4f0afa33efcb1b9f0d95) Mkbranch (#851) - * [628b54089](https://github.com/argoproj/argo-workflows/commit/628b540891d1999c708accf064356d4dad22c7e0) DAG docs. (#850) - * [22f624396](https://github.com/argoproj/argo-workflows/commit/22f624396c3c8cacd288040935feb7da4e4a869d) Small edit to README - * [edc09afc3](https://github.com/argoproj/argo-workflows/commit/edc09afc332c6e2707688a050060548940eca852) Added OWNERS file - * [530e72444](https://github.com/argoproj/argo-workflows/commit/530e72444e2ced0c3c050e3238431dc32c1645c5) Update release notes and documentation for v2.1.0 - * [937963818](https://github.com/argoproj/argo-workflows/commit/9379638189cc194f1b34ff7295f0832eac1c1651) Avoid `println` which outputs to stderr. (#844) - * [30e472e94](https://github.com/argoproj/argo-workflows/commit/30e472e9495f264676c00875e4ba5ddfcc23e15f) Add gladly as an official argo user (#843) - * [cb4c1a13b](https://github.com/argoproj/argo-workflows/commit/cb4c1a13b8c92d2bbfb73c2f1d7c8fcc5697ec6b) Add ability to override metadata.name and/or metadata.generateName during submission (resolves #836) - * [834468a5d](https://github.com/argoproj/argo-workflows/commit/834468a5d12598062b870c073f9a0230028c71b0) Command print the logs for a container in a workflow - * [1cf13f9b0](https://github.com/argoproj/argo-workflows/commit/1cf13f9b008ae41bbb23af6b55bf8e982723292f) Issue #825 - fix locating outbound nodes for skipped node (#842) - * [30034d42b](https://github.com/argoproj/argo-workflows/commit/30034d42b4f35729dd4575153c268565efef47be) Bump from debian:9.1 to debian:9.4. (#841) - * [f3c41717b](https://github.com/argoproj/argo-workflows/commit/f3c41717b21339157b6519b86e22a5e20feb2b97) Owner reference example (#839) - * [191f7aff4](https://github.com/argoproj/argo-workflows/commit/191f7aff4b619bc6796c18c39e58ed9636865cf5) Minor edit to README - * [c8a2e25fa](https://github.com/argoproj/argo-workflows/commit/c8a2e25fa6085587018f65a0fc4ec31f012c2653) Fixed typo (#835) - * [cf13bf0b3](https://github.com/argoproj/argo-workflows/commit/cf13bf0b35ebbcefce1138fa77f04b268ccde394) Added users section to README - * [e4d76329b](https://github.com/argoproj/argo-workflows/commit/e4d76329bf13e72f09433a9ab219f9c025d232a9) Updated News in README - * [b631d0af4](https://github.com/argoproj/argo-workflows/commit/b631d0af4dee5ecbe6e70e39ad31b9f708efb6b9) added community meeting (#834) - * [e34728c66](https://github.com/argoproj/argo-workflows/commit/e34728c66bf37b76cb92f03552a2f2a200f09644) Fix issue where daemoned steps were not terminated properly in DAG templates (resolves #832) - * [2e9e113fb](https://github.com/argoproj/argo-workflows/commit/2e9e113fb3f2b86f75df9669f4bf11fca181a348) Update docs to work with latest minio chart - * [ea95f1910](https://github.com/argoproj/argo-workflows/commit/ea95f191047dd17bbcab8573541d25fbd51829c0) Use octal syntax for mode values (#833) - * [5fc67d2b7](https://github.com/argoproj/argo-workflows/commit/5fc67d2b785ac582a03e7dcdc83fc212839863d1) Updated community docs - * [8fa4f0063](https://github.com/argoproj/argo-workflows/commit/8fa4f0063893d8c419e4a9466abbc608c5c97811) Added community docs - * [423c8d144](https://github.com/argoproj/argo-workflows/commit/423c8d144eab054acf682127c1ca04c216199db0) Issue #830 - retain Step node children references - * [73990c787](https://github.com/argoproj/argo-workflows/commit/73990c787b08f2ce72f65b8169e9f1653b5b6877) Moved cricket gifs to a different s3 bucket - * [ca1858caa](https://github.com/argoproj/argo-workflows/commit/ca1858caade6385f5424e16f53da5d38f2fcb3b2) edit Argo license info so that GitHub recognizes it (#823) - * [206451f06](https://github.com/argoproj/argo-workflows/commit/206451f066924abf3b4b6756606234150bf10fc9) Fix influxdb-ci.yml example - * [da582a519](https://github.com/argoproj/argo-workflows/commit/da582a5194056a08d5eef95c2441b562cde08740) Avoid nil pointer for 2.0 workflows. (#820) - * [0f225cef9](https://github.com/argoproj/argo-workflows/commit/0f225cef91f4b276e24270a827c37dcd5292a4f0) ClusterRoleBinding was using incorrect service account namespace reference when overriding install namespace (resolves #814) - * [66ea711a1](https://github.com/argoproj/argo-workflows/commit/66ea711a1c7cc805282fd4065e029287f4617d57) Issue #816 - fix updating outboundNodes field of failed step group node (#817) - * [00ceef6aa](https://github.com/argoproj/argo-workflows/commit/00ceef6aa002199186475350b95ebc2d32debf14) install & uninstall commands use --namespace flag (#813) - -### Contributors - - * Adam Pearse - * Alexander Matyushentsev - * Andrea Kao - * Edward Lee - * Eric - * Javier Castellanos - * Jesse Suen - * Jonas Fonseca - * Lukasz Lempart - * Matt Hillsdon - * Mukulikak - * Sean Fitzgerald - * Sebastien Doido - -## v2.1.0-beta2 (2018-03-29) - - * [fe23c2f65](https://github.com/argoproj/argo-workflows/commit/fe23c2f651a61a2d7aa877a86edff9802d7b5b47) Issue #810 - `argo install`does not install argo ui (#811) - * [28673ed2f](https://github.com/argoproj/argo-workflows/commit/28673ed2f85ca39f5d9b136382ea9a87da0ca716) Update release date in change log - -### Contributors - - * Alexander Matyushentsev - -## v2.1.0-beta1 (2018-03-29) - - * [05e8a9838](https://github.com/argoproj/argo-workflows/commit/05e8a98386ccc73a02f39357f6faed69f7d11a17) Update change log for 2.1.0-beta1 release - * [bf38b6b50](https://github.com/argoproj/argo-workflows/commit/bf38b6b509ae3fb123e47da2570906d0262ccf67) Use socket type for hostPath to mount docker.sock (#804) (#809) - * [37680ef26](https://github.com/argoproj/argo-workflows/commit/37680ef26585f412930694cc809d9870d655bd13) Minimal shell completion support (#807) - * [c83ad24a6](https://github.com/argoproj/argo-workflows/commit/c83ad24a6fb5eb7054af16ae2c4f95de8df3965b) Omit empty status fields. (#806) - * [d7291a3ee](https://github.com/argoproj/argo-workflows/commit/d7291a3ee3b5375f8a079b60c568380e1bb91de9) Issue #660 - Support rendering logs from all steps using 'argo logs' command (#792) - * [7d3f1e83d](https://github.com/argoproj/argo-workflows/commit/7d3f1e83d3e08b13eb705ddd74244ea29e019c1a) Minor edits to README - * [7a4c9c1f9](https://github.com/argoproj/argo-workflows/commit/7a4c9c1f9c4fbd5282c57011c0bdcd48fe10137b) Added a review to README - * [383276f30](https://github.com/argoproj/argo-workflows/commit/383276f300e666bf133a0355f2da493997ddd6cc) Inlined LICENSE file. Renamed old license to COPYRIGHT - * [91d0f47fe](https://github.com/argoproj/argo-workflows/commit/91d0f47fec82c7cef156ac05287622adc0b0a53b) Build argo cli image (#800) - * [3b2c426ee](https://github.com/argoproj/argo-workflows/commit/3b2c426ee5ba6249fec0d0a59353bfe77cb0966c) Add ability to pass pod annotations and labels at the template level (#798) - * [d8be0287f](https://github.com/argoproj/argo-workflows/commit/d8be0287f04f1d0d3bdee60243e0742594009bc8) Add ability to use IAM role from EC2 instance for AWS S3 credentials - * [624f0f483](https://github.com/argoproj/argo-workflows/commit/624f0f48306183da33e2ef3aecf9566bb0ad8ad3) Update CHANGELOG.md for v2.1.0-beta1 release - * [e96a09a39](https://github.com/argoproj/argo-workflows/commit/e96a09a3911f039038ea3038bed3a8cd8d63e269) Allow spec.arguments to be not supplied during linting. Global parameters were not referencable from artifact arguments (resolves #791) - * [018e663a5](https://github.com/argoproj/argo-workflows/commit/018e663a53aeda35149ec9b8de28f26391eb688e) Fix for https://github.com/argoproj/argo/issues/739 Nested stepgroups render correctly (#790) - * [5c5b35ba2](https://github.com/argoproj/argo-workflows/commit/5c5b35ba271fb48c38bf65e386e3d8b574f49373) Fix install issue where service account was not being created - * [88e9e5ecb](https://github.com/argoproj/argo-workflows/commit/88e9e5ecb5fc9e5215033a11abf6f6ddf50db253) packr needs to run compiled in order to cross compile darwin binaries - * [dcdf9acf9](https://github.com/argoproj/argo-workflows/commit/dcdf9acf9c7c3f58b3adfbf1994a5d3e7574dd9c) Fix install tests and build failure - * [06c0d324b](https://github.com/argoproj/argo-workflows/commit/06c0d324bf93a037010186fe54e40590ea39d92c) Rewrite the installer such that manifests are maintainable - * [a45bf1b75](https://github.com/argoproj/argo-workflows/commit/a45bf1b7558b3eb60ec65d02c166c306e7797a79) Introduce support for exported global output parameters and artifacts - * [60c48a9aa](https://github.com/argoproj/argo-workflows/commit/60c48a9aa4b4dbf4c229e273faa945e0f5982539) Introduce `argo retry` to retry a failed workflow with the same name (resolves #762) onExit and related nodes should never be executed during resubmit/retry (resolves #780) - * [90c08bffc](https://github.com/argoproj/argo-workflows/commit/90c08bffc1b12b4c7941daccbf417772f17e3704) Refactor command structure - * [101509d6b](https://github.com/argoproj/argo-workflows/commit/101509d6b5ebeb957bb7ad6e819a961a26812a0e) Abstract the container runtime as an interface to support mocking and future runtimes Trim a trailing newline from path-based output parameters (resolves #758) - * [a3441d38b](https://github.com/argoproj/argo-workflows/commit/a3441d38b9be1f75506ab91dfbac7d6546d2b900) Add ability to reference global parameters in spec level fields (resolves #749) - * [cd73a9ce1](https://github.com/argoproj/argo-workflows/commit/cd73a9ce18aae35beee5012c68f553ab0c46030d) Fix template.parallelism limiting parallelism of entire workflow (resolves #772) Refactor operator to make template execution method signatures consistent - * [7d7b74fa8](https://github.com/argoproj/argo-workflows/commit/7d7b74fa8a62c43f8891a9af1dcae71f6efdc7e0) Make {{pod.name}} available as a parameter in pod templates (resolves #744) - * [3cf4bb136](https://github.com/argoproj/argo-workflows/commit/3cf4bb136a9857ea17921a2ec6cfd95b4b95a0d7) parse the artifactory URL before appending the artifact to the path (#774) - * [ea1257f71](https://github.com/argoproj/argo-workflows/commit/ea1257f717676997f0efcac9086ed348613a28c7) examples: use alpine python image - * [2114078c5](https://github.com/argoproj/argo-workflows/commit/2114078c533db0ab34b2f76fe481f03eba046cc1) fix typo - * [9f6055899](https://github.com/argoproj/argo-workflows/commit/9f6055899fff0b3161bb573159b13fd337e2e35f) Fix retry-container-to-completion example - * [07422f264](https://github.com/argoproj/argo-workflows/commit/07422f264ed62a428622505e1880d2d5787d50ae) Update CHANGELOG release date. Remove ui-image from release target - -### Contributors - - * Alexander Matyushentsev - * Dmitry Monakhov - * Edward Lee - * Jesse Suen - * Johannes 'fish' Ziemke - * Lukasz Lempart - * Matt Hillsdon - * Yang Pan - * dougsc - * gaganapplatix - -## v2.1.0-alpha1 (2018-02-21) - - -### Contributors - - -## v2.1.0 (2018-04-29) - - * [937963818](https://github.com/argoproj/argo-workflows/commit/9379638189cc194f1b34ff7295f0832eac1c1651) Avoid `println` which outputs to stderr. (#844) - * [30e472e94](https://github.com/argoproj/argo-workflows/commit/30e472e9495f264676c00875e4ba5ddfcc23e15f) Add gladly as an official argo user (#843) - * [cb4c1a13b](https://github.com/argoproj/argo-workflows/commit/cb4c1a13b8c92d2bbfb73c2f1d7c8fcc5697ec6b) Add ability to override metadata.name and/or metadata.generateName during submission (resolves #836) - * [834468a5d](https://github.com/argoproj/argo-workflows/commit/834468a5d12598062b870c073f9a0230028c71b0) Command print the logs for a container in a workflow - * [1cf13f9b0](https://github.com/argoproj/argo-workflows/commit/1cf13f9b008ae41bbb23af6b55bf8e982723292f) Issue #825 - fix locating outbound nodes for skipped node (#842) - * [30034d42b](https://github.com/argoproj/argo-workflows/commit/30034d42b4f35729dd4575153c268565efef47be) Bump from debian:9.1 to debian:9.4. (#841) - * [f3c41717b](https://github.com/argoproj/argo-workflows/commit/f3c41717b21339157b6519b86e22a5e20feb2b97) Owner reference example (#839) - * [191f7aff4](https://github.com/argoproj/argo-workflows/commit/191f7aff4b619bc6796c18c39e58ed9636865cf5) Minor edit to README - * [c8a2e25fa](https://github.com/argoproj/argo-workflows/commit/c8a2e25fa6085587018f65a0fc4ec31f012c2653) Fixed typo (#835) - * [cf13bf0b3](https://github.com/argoproj/argo-workflows/commit/cf13bf0b35ebbcefce1138fa77f04b268ccde394) Added users section to README - * [e4d76329b](https://github.com/argoproj/argo-workflows/commit/e4d76329bf13e72f09433a9ab219f9c025d232a9) Updated News in README - * [b631d0af4](https://github.com/argoproj/argo-workflows/commit/b631d0af4dee5ecbe6e70e39ad31b9f708efb6b9) added community meeting (#834) - * [e34728c66](https://github.com/argoproj/argo-workflows/commit/e34728c66bf37b76cb92f03552a2f2a200f09644) Fix issue where daemoned steps were not terminated properly in DAG templates (resolves #832) - * [2e9e113fb](https://github.com/argoproj/argo-workflows/commit/2e9e113fb3f2b86f75df9669f4bf11fca181a348) Update docs to work with latest minio chart - * [ea95f1910](https://github.com/argoproj/argo-workflows/commit/ea95f191047dd17bbcab8573541d25fbd51829c0) Use octal syntax for mode values (#833) - * [5fc67d2b7](https://github.com/argoproj/argo-workflows/commit/5fc67d2b785ac582a03e7dcdc83fc212839863d1) Updated community docs - * [8fa4f0063](https://github.com/argoproj/argo-workflows/commit/8fa4f0063893d8c419e4a9466abbc608c5c97811) Added community docs - * [423c8d144](https://github.com/argoproj/argo-workflows/commit/423c8d144eab054acf682127c1ca04c216199db0) Issue #830 - retain Step node children references - * [73990c787](https://github.com/argoproj/argo-workflows/commit/73990c787b08f2ce72f65b8169e9f1653b5b6877) Moved cricket gifs to a different s3 bucket - * [ca1858caa](https://github.com/argoproj/argo-workflows/commit/ca1858caade6385f5424e16f53da5d38f2fcb3b2) edit Argo license info so that GitHub recognizes it (#823) - * [206451f06](https://github.com/argoproj/argo-workflows/commit/206451f066924abf3b4b6756606234150bf10fc9) Fix influxdb-ci.yml example - * [da582a519](https://github.com/argoproj/argo-workflows/commit/da582a5194056a08d5eef95c2441b562cde08740) Avoid nil pointer for 2.0 workflows. (#820) - * [0f225cef9](https://github.com/argoproj/argo-workflows/commit/0f225cef91f4b276e24270a827c37dcd5292a4f0) ClusterRoleBinding was using incorrect service account namespace reference when overriding install namespace (resolves #814) - * [66ea711a1](https://github.com/argoproj/argo-workflows/commit/66ea711a1c7cc805282fd4065e029287f4617d57) Issue #816 - fix updating outboundNodes field of failed step group node (#817) - * [00ceef6aa](https://github.com/argoproj/argo-workflows/commit/00ceef6aa002199186475350b95ebc2d32debf14) install & uninstall commands use --namespace flag (#813) - * [fe23c2f65](https://github.com/argoproj/argo-workflows/commit/fe23c2f651a61a2d7aa877a86edff9802d7b5b47) Issue #810 - `argo install`does not install argo ui (#811) - * [28673ed2f](https://github.com/argoproj/argo-workflows/commit/28673ed2f85ca39f5d9b136382ea9a87da0ca716) Update release date in change log - * [05e8a9838](https://github.com/argoproj/argo-workflows/commit/05e8a98386ccc73a02f39357f6faed69f7d11a17) Update change log for 2.1.0-beta1 release - * [bf38b6b50](https://github.com/argoproj/argo-workflows/commit/bf38b6b509ae3fb123e47da2570906d0262ccf67) Use socket type for hostPath to mount docker.sock (#804) (#809) - * [37680ef26](https://github.com/argoproj/argo-workflows/commit/37680ef26585f412930694cc809d9870d655bd13) Minimal shell completion support (#807) - * [c83ad24a6](https://github.com/argoproj/argo-workflows/commit/c83ad24a6fb5eb7054af16ae2c4f95de8df3965b) Omit empty status fields. (#806) - * [d7291a3ee](https://github.com/argoproj/argo-workflows/commit/d7291a3ee3b5375f8a079b60c568380e1bb91de9) Issue #660 - Support rendering logs from all steps using 'argo logs' command (#792) - * [7d3f1e83d](https://github.com/argoproj/argo-workflows/commit/7d3f1e83d3e08b13eb705ddd74244ea29e019c1a) Minor edits to README - * [7a4c9c1f9](https://github.com/argoproj/argo-workflows/commit/7a4c9c1f9c4fbd5282c57011c0bdcd48fe10137b) Added a review to README - * [383276f30](https://github.com/argoproj/argo-workflows/commit/383276f300e666bf133a0355f2da493997ddd6cc) Inlined LICENSE file. Renamed old license to COPYRIGHT - * [91d0f47fe](https://github.com/argoproj/argo-workflows/commit/91d0f47fec82c7cef156ac05287622adc0b0a53b) Build argo cli image (#800) - * [3b2c426ee](https://github.com/argoproj/argo-workflows/commit/3b2c426ee5ba6249fec0d0a59353bfe77cb0966c) Add ability to pass pod annotations and labels at the template level (#798) - * [d8be0287f](https://github.com/argoproj/argo-workflows/commit/d8be0287f04f1d0d3bdee60243e0742594009bc8) Add ability to use IAM role from EC2 instance for AWS S3 credentials - * [624f0f483](https://github.com/argoproj/argo-workflows/commit/624f0f48306183da33e2ef3aecf9566bb0ad8ad3) Update CHANGELOG.md for v2.1.0-beta1 release - * [e96a09a39](https://github.com/argoproj/argo-workflows/commit/e96a09a3911f039038ea3038bed3a8cd8d63e269) Allow spec.arguments to be not supplied during linting. Global parameters were not referencable from artifact arguments (resolves #791) - * [018e663a5](https://github.com/argoproj/argo-workflows/commit/018e663a53aeda35149ec9b8de28f26391eb688e) Fix for https://github.com/argoproj/argo/issues/739 Nested stepgroups render correctly (#790) - * [5c5b35ba2](https://github.com/argoproj/argo-workflows/commit/5c5b35ba271fb48c38bf65e386e3d8b574f49373) Fix install issue where service account was not being created - * [88e9e5ecb](https://github.com/argoproj/argo-workflows/commit/88e9e5ecb5fc9e5215033a11abf6f6ddf50db253) packr needs to run compiled in order to cross compile darwin binaries - * [dcdf9acf9](https://github.com/argoproj/argo-workflows/commit/dcdf9acf9c7c3f58b3adfbf1994a5d3e7574dd9c) Fix install tests and build failure - * [06c0d324b](https://github.com/argoproj/argo-workflows/commit/06c0d324bf93a037010186fe54e40590ea39d92c) Rewrite the installer such that manifests are maintainable - * [a45bf1b75](https://github.com/argoproj/argo-workflows/commit/a45bf1b7558b3eb60ec65d02c166c306e7797a79) Introduce support for exported global output parameters and artifacts - * [60c48a9aa](https://github.com/argoproj/argo-workflows/commit/60c48a9aa4b4dbf4c229e273faa945e0f5982539) Introduce `argo retry` to retry a failed workflow with the same name (resolves #762) onExit and related nodes should never be executed during resubmit/retry (resolves #780) - * [90c08bffc](https://github.com/argoproj/argo-workflows/commit/90c08bffc1b12b4c7941daccbf417772f17e3704) Refactor command structure - * [101509d6b](https://github.com/argoproj/argo-workflows/commit/101509d6b5ebeb957bb7ad6e819a961a26812a0e) Abstract the container runtime as an interface to support mocking and future runtimes Trim a trailing newline from path-based output parameters (resolves #758) - * [a3441d38b](https://github.com/argoproj/argo-workflows/commit/a3441d38b9be1f75506ab91dfbac7d6546d2b900) Add ability to reference global parameters in spec level fields (resolves #749) - * [cd73a9ce1](https://github.com/argoproj/argo-workflows/commit/cd73a9ce18aae35beee5012c68f553ab0c46030d) Fix template.parallelism limiting parallelism of entire workflow (resolves #772) Refactor operator to make template execution method signatures consistent - * [7d7b74fa8](https://github.com/argoproj/argo-workflows/commit/7d7b74fa8a62c43f8891a9af1dcae71f6efdc7e0) Make {{pod.name}} available as a parameter in pod templates (resolves #744) - * [3cf4bb136](https://github.com/argoproj/argo-workflows/commit/3cf4bb136a9857ea17921a2ec6cfd95b4b95a0d7) parse the artifactory URL before appending the artifact to the path (#774) - * [ea1257f71](https://github.com/argoproj/argo-workflows/commit/ea1257f717676997f0efcac9086ed348613a28c7) examples: use alpine python image - * [2114078c5](https://github.com/argoproj/argo-workflows/commit/2114078c533db0ab34b2f76fe481f03eba046cc1) fix typo - * [9f6055899](https://github.com/argoproj/argo-workflows/commit/9f6055899fff0b3161bb573159b13fd337e2e35f) Fix retry-container-to-completion example - * [07422f264](https://github.com/argoproj/argo-workflows/commit/07422f264ed62a428622505e1880d2d5787d50ae) Update CHANGELOG release date. Remove ui-image from release target - * [5d60d073a](https://github.com/argoproj/argo-workflows/commit/5d60d073a1a6c2151ca3a07c15dd2580c92fc11d) Fix make release target - * [a013fb381](https://github.com/argoproj/argo-workflows/commit/a013fb381b30ecb513def88a0ec3160bdc18a5d1) Fix inability to override LDFLAGS when env variables were supplied to make - * [f63e552b1](https://github.com/argoproj/argo-workflows/commit/f63e552b1c8e191689cfb73751654782de94445c) Minor spell fix for parallelism - * [88d2ff3a7](https://github.com/argoproj/argo-workflows/commit/88d2ff3a7175b0667351d0be611b97c2ebee908c) Add UI changes description for 2.1.0-alpha1 release (#761) - * [ce4edb8df](https://github.com/argoproj/argo-workflows/commit/ce4edb8dfab89e9ff234b12d3ab4996183a095da) Add contributor credits - * [cc8f35b63](https://github.com/argoproj/argo-workflows/commit/cc8f35b636558f98cd2bd885142aa1f8fd94cb75) Add note about region discovery. - * [9c691a7c8](https://github.com/argoproj/argo-workflows/commit/9c691a7c88904a50427349b698039ff90b1cf83b) Trim spaces from aws keys - * [17e24481d](https://github.com/argoproj/argo-workflows/commit/17e24481d8b3d8416f3590bb11bbee85123c1eb5) add keyPrefix option to ARTIFACT_REPO.md - * [57a568bfd](https://github.com/argoproj/argo-workflows/commit/57a568bfddc42528cb75580501d0b65264318424) Issue #747 - Support --instanceId parameter in submit a workflow (#748) - * [81a6cd365](https://github.com/argoproj/argo-workflows/commit/81a6cd3653d1f0708bff4207e8df90c3dec4889a) Move UI code to separate repository (#742) - * [10c7de574](https://github.com/argoproj/argo-workflows/commit/10c7de57478e13f6a11c77bcdf3ac3b0ae78fda7) Fix rbac resource versions in install - * [2756e83d7](https://github.com/argoproj/argo-workflows/commit/2756e83d7a38bd7307d15ef0328ebc1cf7f40cae) Support workflow pod tolerations - * [9bdab63f4](https://github.com/argoproj/argo-workflows/commit/9bdab63f451a2fff04cd58b55ecb9518f937e512) Add workflow.namespace to global parameters - * [8bf7a1ad3](https://github.com/argoproj/argo-workflows/commit/8bf7a1ad3fde2e24f14a79294dd47cb5dae080b1) Statically link argo linux binary (resolves #735) - * [813cf8ed2](https://github.com/argoproj/argo-workflows/commit/813cf8ed26e2f894b0457ee67cbb8d53e86c32c5) Add NodeStatus.DisplayName to remove CLI/UI guesswork from displaying node names (resolves #731) - * [e783ccbd3](https://github.com/argoproj/argo-workflows/commit/e783ccbd30d1e11e3dcec1912b59c76e738a9d79) Rename some internal template type names for consistency - * [19dd406cf](https://github.com/argoproj/argo-workflows/commit/19dd406cf040041ad15ce1867167902954f0f1d5) Introduce suspend templates for suspending a workflow at a predetermined step (resolves #702). Make suspend part of the workflow spec instead of infering parallism in status. - * [d6489e12f](https://github.com/argoproj/argo-workflows/commit/d6489e12f5af8bbb372bfe077a01972235f219d3) Rename pause to suspend - * [f1e2f63db](https://github.com/argoproj/argo-workflows/commit/f1e2f63dbdf30895a7829337dcec6bcf4b54b5da) Change definition of WorkflowStep.Item to a struct instead of interface{} (resolves #723) Add better withItems unit testing and validation - * [cd18afae4](https://github.com/argoproj/argo-workflows/commit/cd18afae4932fd29b614a1b399edb84184d7a053) Missed handling of a error during workflow resubmission - * [a7ca59be8](https://github.com/argoproj/argo-workflows/commit/a7ca59be870397271fabf5dba7cdfca7d79a934f) Support resubmission of failed workflows with ability to re-use successful steps (resolves #694) - * [76b41877c](https://github.com/argoproj/argo-workflows/commit/76b41877c8a90b2e5529f9fe305f8ebdbcb72377) Include inputs as part of NodeStatus (resolves #730) - * [ba683c1b9](https://github.com/argoproj/argo-workflows/commit/ba683c1b916fd47bf21028cd1338ef8a7b4b7601) Support for manual pausing and resuming of workflows via Argo CLI (resolves #729) - * [5a806f93a](https://github.com/argoproj/argo-workflows/commit/5a806f93a398faefc276d958d476e77c12989a72) Add DAG gif for argo wiki (#728) - * [62a3fba10](https://github.com/argoproj/argo-workflows/commit/62a3fba106be6a331ba234614c24562e620154c0) Implement support for DAG templates to have output parameters/artifacts - * [989e8ed2c](https://github.com/argoproj/argo-workflows/commit/989e8ed2c9e87ae4cc33df832f8ae4fb87c69fa7) Support parameter and artifact passing between DAG tasks. Improved template validation - * [03d409a3a](https://github.com/argoproj/argo-workflows/commit/03d409a3ac62a9e631c1f195b53fff70c8dfab7b) Switch back to Updating CRDs (from Patch) to enable better unit testing - * [2da685d93](https://github.com/argoproj/argo-workflows/commit/2da685d93ff234f79689f40b3123667de81acce3) Fixed typos in examples/README.md - * [6cf94b1bf](https://github.com/argoproj/argo-workflows/commit/6cf94b1bf4d95c1e76a15c7ef36553cc301cf27d) Added output parameter example to examples/README.md - * [0517096c3](https://github.com/argoproj/argo-workflows/commit/0517096c32cd4f2443ae4208012c6110fbd07ab6) Add templateName as part of NodeStatus for UI consumption Simplify and centralize parallelism check into executeTemplate() Improved template validation - * [deae4c659](https://github.com/argoproj/argo-workflows/commit/deae4c659b3c38f78fe5c8537319ea954fcfa54d) Add parallelism control at the steps template level - * [c788484e1](https://github.com/argoproj/argo-workflows/commit/c788484e1cbbe158c2d7cdddd30b1a8242e2c30c) Remove hard-wired executor limits and make it configurable in the controller (resolves #724) - * [f27c7ffd4](https://github.com/argoproj/argo-workflows/commit/f27c7ffd4e9bed1ddbbcb0e660854f6b2ce2daac) Fix linting issues (ineffassign, errcheck) - * [98a44c99c](https://github.com/argoproj/argo-workflows/commit/98a44c99c2515f2295327ae9572732586ddc3d7b) Embed container type into the script template instead of cherry-picking fields (resolves #711) - * [c0a8f949b](https://github.com/argoproj/argo-workflows/commit/c0a8f949b5ce9048fbc6f9fcc89876c8ad32c85c) Bump VERSION to 2.1.0 - * [207de8247](https://github.com/argoproj/argo-workflows/commit/207de82474a3c98411072345f542ebee4d8e7208) Add parallism field to limit concurrent pod execution at a workflow level (issue #666) - * [460c9555b](https://github.com/argoproj/argo-workflows/commit/460c9555b760aa9405e959a96b6c8cf339096573) Do not initialize DAG task nodes if they did not execute - * [931d7723c](https://github.com/argoproj/argo-workflows/commit/931d7723cc42b3fc6d937b737735c9985cf91958) Update docs to refer to v2.0.0 - * [0978b9c61](https://github.com/argoproj/argo-workflows/commit/0978b9c61cb7435d31ef8d252b80e03708a70adc) Support setting UI base Url (#722) - * [b75cd98f6](https://github.com/argoproj/argo-workflows/commit/b75cd98f6c038481ec3d2253e6404952bcaf4bd5) updated argo-user slack link - * [b3598d845](https://github.com/argoproj/argo-workflows/commit/b3598d845c4cdb9ac7c4ae5eff5024ecd3fc5fd6) Add examples as functional and expected failure e2e tests - * [83966e609](https://github.com/argoproj/argo-workflows/commit/83966e6095e2468368b0929613e7371074ee972b) Fix regression where executor did not annotate errors correctly - * [751fd2702](https://github.com/argoproj/argo-workflows/commit/751fd27024d9f3bfc40051d2ca694b25a42307ea) Update UI references to v2.0.0. Update changelog - * [75caa877b](https://github.com/argoproj/argo-workflows/commit/75caa877bc08184cad6dd34366b2b9f8b3dccc38) Initial work for dag based cli for everything. get now works (#714) - * [8420deb30](https://github.com/argoproj/argo-workflows/commit/8420deb30a48839a097d3f5cd089e4b493b5e751) Skipped steps were being re-initialized causing a controller panic - * [491ed08ff](https://github.com/argoproj/argo-workflows/commit/491ed08ffe2f8430fcf35bf36e6dd16707eb5a0a) Check-in the OpenAPI spec. Automate generation as part of `make update-codegen` - * [8b7e2e24e](https://github.com/argoproj/argo-workflows/commit/8b7e2e24e8cf7ae6b701f08b0702ac045e0336f8) Check-in the OpenAPI spec. Automate generation as part of `make update-codegen` - * [563bda756](https://github.com/argoproj/argo-workflows/commit/563bda756732802caeaa516fd0c493c6e07f6cf9) Fix update-openapigen.sh script to presume bash. Tweak documentation - * [5b9a602b4](https://github.com/argoproj/argo-workflows/commit/5b9a602b4a763ac633f7ede86f13253451855462) Add documentation to types. Add program to generate OpenAPI spec - * [427269103](https://github.com/argoproj/argo-workflows/commit/4272691035e0588bbd301449c122ee2851e3c87f) Fix retry in dag branch (#709) - * [d929e79f6](https://github.com/argoproj/argo-workflows/commit/d929e79f623017a923d1c4e120c363e08fe7a64a) Generate OpenAPI models for the workflow spec (issue #707) - * [1d5afee6e](https://github.com/argoproj/argo-workflows/commit/1d5afee6ea48743bb854e69ffa333f361e52e289) Shortened url - * [617d848da](https://github.com/argoproj/argo-workflows/commit/617d848da27d0035c20f21f3f6bddbe0e04550db) Added news to README - * [ae36b22b6](https://github.com/argoproj/argo-workflows/commit/ae36b22b6d0d0ce8c230aedcce0814489162ae5b) Fix typo s/Customer/Custom/ (#704) - * [5a589fcd9](https://github.com/argoproj/argo-workflows/commit/5a589fcd932116720411d53aeb6454e297456e06) Add ability to specify imagePullSecrets in the workflow.spec (resolves #699) - * [2f77bc1ed](https://github.com/argoproj/argo-workflows/commit/2f77bc1ed00042388d0492cfd480d7c22599112c) Add ability to specify affinity rules at both the workflow and template level (resolves #701) - * [c2dd9b635](https://github.com/argoproj/argo-workflows/commit/c2dd9b635657273c3974fc358fcdf797c821ac92) Fix unit test breakages - * [d38324b46](https://github.com/argoproj/argo-workflows/commit/d38324b46100e6ba07ad1c8ffc957c257aac41d7) Add boundaryID field in NodeStatus to group nodes by template boundaries - * [639ad1e15](https://github.com/argoproj/argo-workflows/commit/639ad1e15312da5efa88fd62a0f3aced2ac17c52) Introduce Type field in NodeStatus to to assist with visualization - * [fdafbe27e](https://github.com/argoproj/argo-workflows/commit/fdafbe27e5e2f4f2d58913328ae22db9a6c363b4) Sidecars unable to reference volume claim templates (resolves #697) - * [0b0b52c3b](https://github.com/argoproj/argo-workflows/commit/0b0b52c3b45cbe5ac62da7b26b30d19fc1f9eb3e) Referencing output artifacts from a container with retries was not functioning (resolves #698) - * [9597f82cd](https://github.com/argoproj/argo-workflows/commit/9597f82cd7a8b65cb03e4dfaa3023dcf20619b9d) Initial support for DAG based workflows (#693) - * [bf2b376a1](https://github.com/argoproj/argo-workflows/commit/bf2b376a142ed4fdf70ba4f3702533e7b75fc6b2) Update doc references to point to v2.0.0-beta1. Fix secrets example - -### Contributors - - * Adam Pearse - * Alexander Matyushentsev - * Andrea Kao - * Dan Bode - * David Kale - * Divya Vavili - * Dmitry Monakhov - * Edward Lee - * Javier Castellanos - * Jesse Dubay - * Jesse Suen - * Johannes 'fish' Ziemke - * Lukasz Lempart - * Matt Hillsdon - * Mukulikak - * Sean Fitzgerald - * Sebastien Doido - * Yang Pan - * dougsc - * gaganapplatix - -## v2.0.0-beta1 (2018-01-18) - - * [549870c1e](https://github.com/argoproj/argo-workflows/commit/549870c1ee08138b20b8a4b0c026569cf1e6c19a) Fix argo-ui download links to point to v2.0.0-beta1 - * [a202049d3](https://github.com/argoproj/argo-workflows/commit/a202049d327c64e282a37d7598bddc1faa1a3c1a) Update CHANGELOG for v2.0.0-beta1 - * [a3739035f](https://github.com/argoproj/argo-workflows/commit/a3739035f8e1f517721489fc53b58a8e27a575e1) Remove dind requirement from argo-ci test steps - * [1bdd0c03d](https://github.com/argoproj/argo-workflows/commit/1bdd0c03dbb9d82ad841ca19be6e1ea93aeb82f7) Include completed pods when attempting to reconcile deleted pods Switch back to Patch (from Update) for persisting workflow changes - * [a4a438921](https://github.com/argoproj/argo-workflows/commit/a4a4389219cbe84e3bc7b3731cdfccb9ee5f5730) Sleep 1s after persisting workflow to give informer cache a chance to sync (resolves #686) - * [5bf49531f](https://github.com/argoproj/argo-workflows/commit/5bf49531f99ef9d8b8aefeac26a4a3fa0177e70d) Updated demo.md with link to ARTIFACT_REPO.md - * [863d547a1](https://github.com/argoproj/argo-workflows/commit/863d547a1a2a146a898c06c835187e0595af5689) Rely on controller generated timestamps for node.StartedAt instad of pod.CreationTimestamp - * [672542d1f](https://github.com/argoproj/argo-workflows/commit/672542d1f08c206f89f8747e9b14b675cdd77446) Re-apply workflow changes and reattempt update on resource conflicts. Make completed pod labeling asynchronous - * [81bd6d3d4](https://github.com/argoproj/argo-workflows/commit/81bd6d3d46d2fd7ea57aa095ae134116cfca90f2) Resource state retry (#690) - * [44dba889c](https://github.com/argoproj/argo-workflows/commit/44dba889cb743552557fcd7453ee81a89875142d) Tune controller to 20 QPS, 30 Burst, 8 wf workers, 8 pod workers - * [178b9d37c](https://github.com/argoproj/argo-workflows/commit/178b9d37cc452af214df7c9c41522124c117e7a3) Show running/completed pod counts in `argo list -o wide` - * [0c565f5f5](https://github.com/argoproj/argo-workflows/commit/0c565f5f5e9f69244e9828ced7c3916ac605f460) Switch to Updating workflow resources instead of Patching (resolves #686) - * [a571f592f](https://github.com/argoproj/argo-workflows/commit/a571f592fa131771b8d71126fc27809e24462cfe) Ensure sidecars get killed unequivocally. Final argoexec stats were not getting printed - * [a0b2d78c8](https://github.com/argoproj/argo-workflows/commit/a0b2d78c869f277c20c4cd3ba18b8d2688674e54) Show duration by default in `argo get`. --since flag should always include Running - * [101103136](https://github.com/argoproj/argo-workflows/commit/101103136287b8ee16a7afda94cc6ff59be07ef6) Executor hardening: add retries and memoization for executor k8s API calls Recover from unexpected panics and annotate the error. - * [f2b8f248a](https://github.com/argoproj/argo-workflows/commit/f2b8f248ab8d483e0ba41a287611393500c7b507) Regenerate deepcopy code after type changes for raw input artifacts - * [322e0e3aa](https://github.com/argoproj/argo-workflows/commit/322e0e3aa3cb2e650f3ad4b7ff9157f71a92e8b4) renamed file as per review comment - * [0a386ccaf](https://github.com/argoproj/argo-workflows/commit/0a386ccaf705a1abe1f9239adc966fceb7a808ae) changes from the review - renamed "contents" to "data" - lint issue - * [d9ebbdc1b](https://github.com/argoproj/argo-workflows/commit/d9ebbdc1b31721c8095d3c5426c1c811054a94a7) support for raw input as artifact - * [a1f821d58](https://github.com/argoproj/argo-workflows/commit/a1f821d589d47ca5b12b94ad09306a706a43d150) Introduce communication channel from workflow-controller to executor through pod annotations - * [b324f9f52](https://github.com/argoproj/argo-workflows/commit/b324f9f52109b9aa29bc89d63810be6e421eb54f) Artifactory repository was not using correct casing for repoURL field - * [3d45d25ac](https://github.com/argoproj/argo-workflows/commit/3d45d25ac497a09fa291d20f867a75f59b6abf92) Add `argo list --since` to filter workflows newer than a relative duration - * [cc2efdec3](https://github.com/argoproj/argo-workflows/commit/cc2efdec368c2f133c076a9eda9065f64762a9fa) Add ability to set loglevel of controller via CLI flag - * [60c124e5d](https://github.com/argoproj/argo-workflows/commit/60c124e5dddb6ebfee6300d36f6a3877838ec17c) Remove hack.go and use dep to install code-generators - * [d14755a7c](https://github.com/argoproj/argo-workflows/commit/d14755a7c5f583c1f3c8c762ae8628e780f566cf) `argo list` was not handling the default case correctly - * [472f5604e](https://github.com/argoproj/argo-workflows/commit/472f5604e27ca6310e016f846c97fda5d7bca9dd) Improvements to `argo list` * sort workflows by running vs. completed, then by finished time * add --running, --completed, --status XXX filters * add -o wide option to show parameters and -o name to show only the names - * [b063f938f](https://github.com/argoproj/argo-workflows/commit/b063f938f34f650333df6ec5a2e6a325a5b45299) Use minimal ClusterRoles for workflow-controller and argo-ui deployments - * [21bc2bd07](https://github.com/argoproj/argo-workflows/commit/21bc2bd07ebbfb478c87032e2ece9939ea436030) Added link to configuring artifact repo from main README - * [b54bc067b](https://github.com/argoproj/argo-workflows/commit/b54bc067bda02f95937774fb3345dc2010d3efc6) Added link to configuring artifact repo from main README - * [58ec51699](https://github.com/argoproj/argo-workflows/commit/58ec51699534e73d82c3f44027326b438cf5c063) Updated ARTIFACT_REPO.md - * [1057d0878](https://github.com/argoproj/argo-workflows/commit/1057d087838bcbdbffc70367e0fc02778907c9af) Added detailed instructions on configuring AWS and GCP artifact rpos - * [b0a7f0da8](https://github.com/argoproj/argo-workflows/commit/b0a7f0da85fabad34814ab129eaba43862a1d2dd) Issue 680 - Argo UI is failing to render workflow which has not been picked up by workflow controller (#681) - * [e91c227ac](https://github.com/argoproj/argo-workflows/commit/e91c227acc1f86b7e341aaac534930f9b529cd89) Document and clarify artifact passing (#676) - * [290f67997](https://github.com/argoproj/argo-workflows/commit/290f6799752ef602b27c193212495e27f40dd687) Allow containers to be retried. (#661) - * [80f9b1b63](https://github.com/argoproj/argo-workflows/commit/80f9b1b636704ebad6ebb8df97c5e81dc4f815f9) Improve the error message when insufficent RBAC privileges is detected (resolves #659) - * [3cf67df42](https://github.com/argoproj/argo-workflows/commit/3cf67df422f34257296d2de09d2ca3c8c87abf84) Regenerate autogenerated code after changes to types - * [baf370529](https://github.com/argoproj/argo-workflows/commit/baf37052976458401a6c0e44d06f30dc8d819680) Add support for resource template outputs. Remove output.parameters.path in favor of valueFrom - * [dc1256c20](https://github.com/argoproj/argo-workflows/commit/dc1256c2034f0add4bef3f82ce1a71b454d4eef5) Fix expected file name for issue template - * [a492ad141](https://github.com/argoproj/argo-workflows/commit/a492ad14177eb43cdd6c2a017c9aec87183682ed) Add a GitHub issues template - * [55be93a68](https://github.com/argoproj/argo-workflows/commit/55be93a68d8991f76a31adaf49f711436a35a9d0) Add a --dry-run option to `argo install`. Remove CRD creation from controller startup - * [fddc052df](https://github.com/argoproj/argo-workflows/commit/fddc052df8a3478aede67057f2b06938c2a6a7a4) Fix README.md to contain influxdb-client in the example (#669) - * [67236a594](https://github.com/argoproj/argo-workflows/commit/67236a5940231f7b9dc2ca2f4cb4cb70b7c18d45) Update getting started doc to use `brew install` and better instructions for RBAC clusters (resolves #654, #530) - * [5ac197538](https://github.com/argoproj/argo-workflows/commit/5ac19753846566d0069b76e3e6c6dd03f0e6950c) Support rendering retry steps (#670) - * [3cca0984c](https://github.com/argoproj/argo-workflows/commit/3cca0984c169ea59e8e2758a04550320b1981875) OpenID Connect auth support (#663) - * [c222cb53a](https://github.com/argoproj/argo-workflows/commit/c222cb53a168f9bd40b7731d0b2f70db977990c2) Clarify where the Minio secret comes from. - * [a78e2e8d5](https://github.com/argoproj/argo-workflows/commit/a78e2e8d551d6afad2e0fbce7a9f0a1bd023c11b) Remove parallel steps that use volumes. - * [355173857](https://github.com/argoproj/argo-workflows/commit/355173857f98a9a9704ab23235b3186bde8092b9) Prevent a potential k8s scheduler panic from incomplete setting of pod ownership reference (resolves #656) - * [1a8bc26d4](https://github.com/argoproj/argo-workflows/commit/1a8bc26d40597f2f0475aa9197a6b3912c5bbb56) Updated README - * [9721fca0e](https://github.com/argoproj/argo-workflows/commit/9721fca0e1ae9d1d57aa8d1872450ce8ee7487e2) Updated README - * [e31776061](https://github.com/argoproj/argo-workflows/commit/e3177606105a936da7eba29924fa49ad497703c9) Fix typos in READMEs - * [555d50b0e](https://github.com/argoproj/argo-workflows/commit/555d50b0ebeef1c753394de974dad2e0d4a5b787) Simplify some getting started instructions. Correct some usages of container resources field - * [4abc9c40e](https://github.com/argoproj/argo-workflows/commit/4abc9c40e7656a5783620e41b33e4ed3bb7249e2) Updated READMEs - * [a0add24f9](https://github.com/argoproj/argo-workflows/commit/a0add24f9778789473b2b097fb31a56ae11bfce9) Switch to k8s-codegen generated workflow client and informer - * [9b08b6e99](https://github.com/argoproj/argo-workflows/commit/9b08b6e997633d5f2e94392f000079cbe93ee023) Added link for argoproj slack channel - * [682bbdc09](https://github.com/argoproj/argo-workflows/commit/682bbdc09b66698090d309e91b5caf4483931e34) Update references to point to latest argo release - -### Contributors - - * Alexander Matyushentsev - * Ed Lee - * Jesse Suen - * Matt Hillsdon - * Rhys Parry - * Sandeep Bhojwani - * Shri Javadekar - * gaganapplatix - -## v2.0.0-alpha3 (2018-01-02) - - * [940dd56d9](https://github.com/argoproj/argo-workflows/commit/940dd56d98c75eb93da3b5de598882754cb74fc7) Fix artifactory unit test and linting issues - * [e7ba2b441](https://github.com/argoproj/argo-workflows/commit/e7ba2b44114fca8a3cb2b8635dc2fdfeaa440d9e) Update help page links (#651) - * [53dac4c74](https://github.com/argoproj/argo-workflows/commit/53dac4c74933c333124a0cb1d8cf6c9255f9199d) Add artifactory and UI fixes to 2.0.0-alpha3 CHANGELOG - * [4b4eff43f](https://github.com/argoproj/argo-workflows/commit/4b4eff43f20ed678d34efe567a4d61d1364d7124) Allow disabling web console feature (#649) - * [90b7f2e67](https://github.com/argoproj/argo-workflows/commit/90b7f2e67dddebba1678e215bde75d68867b4469) Added support for artifactory - * [849e916e5](https://github.com/argoproj/argo-workflows/commit/849e916e5bf98f320f1a65b12ffe246d9ebbb6f6) Adjusted styles for logs stream (#614) - * [a8a960303](https://github.com/argoproj/argo-workflows/commit/a8a960303423cde2e511d4af9c2c8ae834076b21) Update CHANGELOG for 2.0.0-alpha3 - * [e7c7678cc](https://github.com/argoproj/argo-workflows/commit/e7c7678cc605285e5b3224c757e5e4be57ab4d5c) Fix issue preventing ability to pass JSON as a command line param (resolves #646) - * [7f5e2b96b](https://github.com/argoproj/argo-workflows/commit/7f5e2b96bd96e0bccf4778383aa9b94a1768e9c0) Add validation checks for volumeMount/artifact path collision and activeDeadlineSeconds (#620) - * [dc4a94633](https://github.com/argoproj/argo-workflows/commit/dc4a94633c4d00d78a7ea53272e425962de405ba) Add the ability to specify the service account used by pods in the workflow (resolves #634) Also add argo CLI support for supplying/overriding spec.serviceAccountName from command line. - * [16f7000aa](https://github.com/argoproj/argo-workflows/commit/16f7000aa77b2759fa0a65d6e42456bcb660f824) Workflow operator will recover from unexpected panics and mark the workflow with error (resolves #633) - * [18dca7fe2](https://github.com/argoproj/argo-workflows/commit/18dca7fe21d57e6a5415c53bfdb87a889ac32456) Issue #629 - Add namespace to workflow list and workflow details page (#639) - * [e656bace7](https://github.com/argoproj/argo-workflows/commit/e656bace75aaa859f04121f2c1d95631b462fe62) Issue #637 - Implement Workflow list and workflow details page live update (#638) - * [1503ce3ae](https://github.com/argoproj/argo-workflows/commit/1503ce3aee40eba741819a1403847df4bbcb7b23) Issue #636 - Upgrade to ui-lib 2.0.3 to fix xterm incompatibility (#642) - * [f9170e8ab](https://github.com/argoproj/argo-workflows/commit/f9170e8abb7121b0d0cbc3e4c07b9bdc2224fb76) Remove manifest-passing.yaml example now that we have resource templates - * [25be5fd63](https://github.com/argoproj/argo-workflows/commit/25be5fd6368bac3fde8e4392b3cb9d4159983a1a) Implementation for resource templates and resource success/failure conditions - * [402ad565f](https://github.com/argoproj/argo-workflows/commit/402ad565f4a3b95c449ddd4c6dc468947aeb7192) Updated examples/README - * [8536c7fc8](https://github.com/argoproj/argo-workflows/commit/8536c7fc89a0ceb39208efe2076919d0390e3d2e) added secret example to examples/README - * [e5002b828](https://github.com/argoproj/argo-workflows/commit/e5002b8286af2c1f7ec64953114e1d97c889ca37) Add '--wait' to argo submit. - * [9646e55f8](https://github.com/argoproj/argo-workflows/commit/9646e55f8bb8fbac80d456853aa891c2ae069adb) Installer was not update configmap correctly with new executor image during upgrade - * [69d72913a](https://github.com/argoproj/argo-workflows/commit/69d72913a3a72bbf7b075be847303305b4bef1a5) Support private git repos using secret selector fields in the git artifact (resolves #626) - * [64e17244e](https://github.com/argoproj/argo-workflows/commit/64e17244ef04b9d2aa6abf6f18d4e7ef2d20ff37) Add argo ci workflow (#619) - * [e89984355](https://github.com/argoproj/argo-workflows/commit/e8998435598e8239d7b77a60cfda43e8f2869b4d) Resolve controller panic when a script template with an input artifact was submitted (resolves #617). Utilize the kubernetes.Interface and fake.Clientset to support unit test mocking. Added a unit test to reproduce the panic. Add an e2e test to verify functionality works. - * [52075b456](https://github.com/argoproj/argo-workflows/commit/52075b45611783d909609433bb44702888b5db37) Introduce controller instance IDs to support multiple workflow controllers in a cluster (resolves #508) - * [133a23ce2](https://github.com/argoproj/argo-workflows/commit/133a23ce20b4570ded81fac76a430f0399c1eea1) Add ability to timeout a container/script using activeDeadlineSeconds - * [b5b16e552](https://github.com/argoproj/argo-workflows/commit/b5b16e55260df018cc4de14bf298ce59714b4396) Support for workflow exit handlers - * [906b3e7c7](https://github.com/argoproj/argo-workflows/commit/906b3e7c7cac191f920016362b076a28f18d97c1) Update ROADMAP.md - * [5047422ae](https://github.com/argoproj/argo-workflows/commit/5047422ae71869672c84364d099e1488b29fbbe8) Update CHANGELOG.md - * [2b6583dfb](https://github.com/argoproj/argo-workflows/commit/2b6583dfb02911965183ef4b25ed68c867448e10) Add `argo wait` for waiting on workflows to complete. (#618) - * [cfc9801c4](https://github.com/argoproj/argo-workflows/commit/cfc9801c40528b6605823e1f4b4359600b6887df) Add option to print output of submit in json. - * [c20c0f995](https://github.com/argoproj/argo-workflows/commit/c20c0f9958ceeefd3597120fcb4013d857276076) Comply with semantic versioning. Include build metadata in `argo version` (resolves #594) - * [bb5ac7db5](https://github.com/argoproj/argo-workflows/commit/bb5ac7db52bff613c32b153b82953ec9c73c3b8a) minor change - * [91845d499](https://github.com/argoproj/argo-workflows/commit/91845d4990ff8fd97bd9404e4b37024be1ee0ba6) Added more documentation - * [4e8d69f63](https://github.com/argoproj/argo-workflows/commit/4e8d69f630bc0fd107b360ee9ad953ccb0b78f11) fixed install instructions - * [0557147dd](https://github.com/argoproj/argo-workflows/commit/0557147dd4bfeb2688b969293ae858a8391d78ad) Removed empty toolbar (#600) - * [bb2b29ff5](https://github.com/argoproj/argo-workflows/commit/bb2b29ff5e4178e2c8a9dfe666b699d75aa9ab3b) Added limit for number of steps in workflows list (#602) - * [3f57cc1d2](https://github.com/argoproj/argo-workflows/commit/3f57cc1d2ff9c0e7ec40da325c3478a8037a6ac0) fixed typo in examples/README - * [ebba60311](https://github.com/argoproj/argo-workflows/commit/ebba6031192b0a763bd94b1625a2ff6e242f112e) Updated examples/README.md with how to override entrypoint and parameters - * [81834db3c](https://github.com/argoproj/argo-workflows/commit/81834db3c0bd12758a95e8a5862d6dda6d0dceeb) Example with using an emptyDir volume. - * [4cd949d32](https://github.com/argoproj/argo-workflows/commit/4cd949d327ddb9d4f4592811c51e07bb53b30ef9) Remove apiserver - * [6a916ca44](https://github.com/argoproj/argo-workflows/commit/6a916ca447147e4aff364ce032c9db4530d49d11) `argo lint` did not split yaml files. `argo submit` was not ignoring non-workflow manifests - * [bf7d99797](https://github.com/argoproj/argo-workflows/commit/bf7d997970e967b2b238ce209ce823ea47de01d2) Include `make lint` and `make test` as part of CI - * [d1639ecfa](https://github.com/argoproj/argo-workflows/commit/d1639ecfabf73f73ebe040b832668bd6a7b60d20) Create example workflow using kubernetes secrets (resolves #592) - * [31c54af4b](https://github.com/argoproj/argo-workflows/commit/31c54af4ba4cb2a0db918fadf62cb0b854592ba5) Toolbar and filters on workflows list (#565) - * [bb4520a6f](https://github.com/argoproj/argo-workflows/commit/bb4520a6f65d4e8e765ce4d426befa583721c194) Add and improve the inlined comments in example YAMLs - * [a04707282](https://github.com/argoproj/argo-workflows/commit/a04707282cdeadf463b22b633fc00dba432f60bf) Fixed typo. - * [13366e324](https://github.com/argoproj/argo-workflows/commit/13366e32467a34a061435091589c90d04a84facb) Fix some wrong GOPATH assumptions in Makefile. Add `make test` target. Fix unit tests - * [9f4f1ee75](https://github.com/argoproj/argo-workflows/commit/9f4f1ee75705150a22dc68a3dd16fa90069219ed) Add 'misspell' to linters. Fix misspellings caught by linter - * [1b918aff2](https://github.com/argoproj/argo-workflows/commit/1b918aff29ff8e592247d14c52be06a0537f0734) Address all issues in code caught by golang linting tools (resolves #584) - * [903326d91](https://github.com/argoproj/argo-workflows/commit/903326d9103fa7dcab37835a9478f58aff51a5d1) Add manifest passing to do kubectl create with dynamic manifests (#588) - * [b1ec3a3fc](https://github.com/argoproj/argo-workflows/commit/b1ec3a3fc90a211f9afdb9090d4396c98ab3f71f) Create the argo-ui service with type ClusterIP as part of installation (resolves #582) - * [5b6271bc5](https://github.com/argoproj/argo-workflows/commit/5b6271bc56b46a82b0ee2bc0784315ffcddeb27f) Add validate names for various workflow specific fields and tests for them (#586) - * [b6e671318](https://github.com/argoproj/argo-workflows/commit/b6e671318a446f129740ce790f53425d65e436f3) Implementation for allowing access to global parameters in workflow (#571) - * [c5ac5bfb8](https://github.com/argoproj/argo-workflows/commit/c5ac5bfb89274fb5ee85f9cef346b7059b5d7641) Fix error message when key does not exist in secret (resolves #574). Improve s3 example and documentation. - * [4825c43b3](https://github.com/argoproj/argo-workflows/commit/4825c43b3e0c3c54b2313aa54e69520ed1b8a38d) Increate UI build memory limit (#580) - * [87a20c6bc](https://github.com/argoproj/argo-workflows/commit/87a20c6bce9a6bfe2a88edc581746ff5f7f006fc) Update input-artifacts-s3.yaml example to explain concepts and usage better - * [c16a9c871](https://github.com/argoproj/argo-workflows/commit/c16a9c87102fd5b66406737720204e5f17af0fd1) Rahuldhide patch 2 (#579) - * [f5d0e340b](https://github.com/argoproj/argo-workflows/commit/f5d0e340b3626658b435dd2ddd937e97af7676b2) Issue #549 - Prepare argo v1 build config (#575) - * [3b3a4c87b](https://github.com/argoproj/argo-workflows/commit/3b3a4c87bd3138961c948f869e2c5b7c932c8847) Argo logo - * [d1967443a](https://github.com/argoproj/argo-workflows/commit/d1967443a4943f685f6cb1649480765050bdcdaa) Skip e2e tests if Kubeconfig is not provided. - * [1ec231b69](https://github.com/argoproj/argo-workflows/commit/1ec231b69a1a7d985d1d587980c34588019b04aa) Create separate namespaces for tests. - * [5ea20d7eb](https://github.com/argoproj/argo-workflows/commit/5ea20d7eb5b9193c19f7c875c8fb2f4af8f68ef3) Add a deadline for workflow operation to prevent workqueue starvation and to enable state resync (#531) Tested with 6 x 1000 pod workflows. - * [346bafe63](https://github.com/argoproj/argo-workflows/commit/346bafe636281bca94695b285767f41ae71e6a69) Multiple scalability improvements to controller (resolves #531) - * [bbc56b59e](https://github.com/argoproj/argo-workflows/commit/bbc56b59e2ff9635244bcb091e92e257a508d147) Improve argo ui build performance and reduce image size (#572) - * [cdb1ce82b](https://github.com/argoproj/argo-workflows/commit/cdb1ce82bce9b103e433981d94bd911b0769350d) Upgrade ui-lib (#556) - * [0605ad7b3](https://github.com/argoproj/argo-workflows/commit/0605ad7b33fc4f9c0bbff79adf1d509d3b072703) Adjusted tabs content size to see horizontal and vertical scrolls. (#569) - * [a33162369](https://github.com/argoproj/argo-workflows/commit/a331623697e76a5e3497257e28fabe1995852339) Fix rendering 'Error' node status (#564) - * [8c3a7a939](https://github.com/argoproj/argo-workflows/commit/8c3a7a9393d619951a676324810d482d28dfe015) Issue #548 - UI terminal window (#563) - * [5ec6cc85a](https://github.com/argoproj/argo-workflows/commit/5ec6cc85aab63ea2277ce621d5de5b59a510d462) Implement API to ssh into pod (#561) - * [beeb65ddc](https://github.com/argoproj/argo-workflows/commit/beeb65ddcb7d2b5f8286f7881af1f5c00535161e) Don't mess the controller's arguments. - * [01f5db5a0](https://github.com/argoproj/argo-workflows/commit/01f5db5a0c3dc48541577b9d8b1d815399728070) Parameterize Install() and related methods. - * [85a2e2711](https://github.com/argoproj/argo-workflows/commit/85a2e2711beba8f2c891af396a3cc886c7b37542) Fix tests. - * [56f666e1b](https://github.com/argoproj/argo-workflows/commit/56f666e1bf69a7f5d8191637e8c7f384b91d98d0) Basic E2e tests. - * [9eafb9dd5](https://github.com/argoproj/argo-workflows/commit/9eafb9dd59166e76804b71c8df19fdca453cdd28) Issue #547 - Support filtering by status in API GET /workflows (#550) - * [37f41eb7b](https://github.com/argoproj/argo-workflows/commit/37f41eb7bf366cfe007d3ecce7b21f003d381e34) Update demo.md - * [ea8d5c113](https://github.com/argoproj/argo-workflows/commit/ea8d5c113d9245f47fe7b3d3f45e7891aa5f50e8) Update README.md - * [373f07106](https://github.com/argoproj/argo-workflows/commit/373f07106ab14e3772c94af5cc11f7f1c7099204) Add support for making a no_ui build. Base all build on no_ui build (#553) - * [ae65c57e5](https://github.com/argoproj/argo-workflows/commit/ae65c57e55f92fd8ff1edd099f659e9e97ce59f1) Update demo.md - * [f6f8334b2](https://github.com/argoproj/argo-workflows/commit/f6f8334b2b3ed1f498c19e4de25421f41807f893) V2 style adjustments and small fixes (#544) - * [12d5b7ca4](https://github.com/argoproj/argo-workflows/commit/12d5b7ca48c913e53b74708a35727d523dfa5355) Document argo ui service creation (#545) - * [3202d4fac](https://github.com/argoproj/argo-workflows/commit/3202d4fac2d5d2d2a3ad1d679c1b753b04aca796) Support all namespaces (#543) - * [b553c1bd9](https://github.com/argoproj/argo-workflows/commit/b553c1bd9a00499915dbe5926194d67c7392b944) Update demo.md to qualify the minio endpoint with the default namespace - * [4df7617c2](https://github.com/argoproj/argo-workflows/commit/4df7617c2e97f2336195d6764259537be648b89b) Fix artifacts downloading (#541) - * [12732200f](https://github.com/argoproj/argo-workflows/commit/12732200fb1ed95608cdc0b14bd0802c524c7fa2) Update demo.md with references to latest release - -### Contributors - - * Alexander Matyushentsev - * Anshuman Bhartiya - * Ed Lee - * Javier Castellanos - * Jesse Suen - * Rafal - * Rahul Dhide - * Sandeep Bhojwani - * Shri Javadekar - * Wojciech Kalemba - * gaganapplatix - * mukulikak - -## v2.0.0-alpha2 (2017-12-04) - - * [0e67b8616](https://github.com/argoproj/argo-workflows/commit/0e67b8616444cf637d5b68e58eb6e068b721d34c) Add 'release' make target. Improve CLI help and set version from git tag. Uninstaller for UI - * [8ab1d2e93](https://github.com/argoproj/argo-workflows/commit/8ab1d2e93ff969a1a01a06dcc3ac4aa04d3514aa) Install argo ui along with argo workflow controller (#540) - * [f4af881e5](https://github.com/argoproj/argo-workflows/commit/f4af881e55cff12888867bca9dff940c1bb16c26) Add make command to build argo ui (#539) - * [5bb858145](https://github.com/argoproj/argo-workflows/commit/5bb858145e1c603494d8202927197d38b121311a) Add example description in YAML. - * [fc23fcdae](https://github.com/argoproj/argo-workflows/commit/fc23fcdaebc9049748d57ab178517d18eed4af7d) edit example README - * [8dd294aa0](https://github.com/argoproj/argo-workflows/commit/8dd294aa003ee1ffaa70cd7735b7d62c069eeb0f) Add example of GIF processing using ImageMagick - * [ef8e9d5c2](https://github.com/argoproj/argo-workflows/commit/ef8e9d5c234b1f889c4a2accbc9f24d58ce553b9) Implement loader (#537) - * [2ac37361e](https://github.com/argoproj/argo-workflows/commit/2ac37361e6620b37af09cd3e50ecc0fb3fb62a12) Allow specifying CRD version (#536) - * [15b5542d7](https://github.com/argoproj/argo-workflows/commit/15b5542d7cff2b0812830b16bcc5ae490ecc7302) Installer was not using the argo serviceaccount with the workflow-controller deployment. Make progress messages consistent - * [f1471347d](https://github.com/argoproj/argo-workflows/commit/f1471347d96838e0e13e47d0bc7fc04b3018d6f7) Add Yaml viewer (#535) - * [685a576bd](https://github.com/argoproj/argo-workflows/commit/685a576bd28bb269d727a10bf617bd1b08ea4ff0) Fix Gopkg.lock file following rewrite of git history at github.com/minio/go-homedir - * [01ab3076f](https://github.com/argoproj/argo-workflows/commit/01ab3076fe68ef62a9e3cc89b0e367cbdb64ff37) Delete clusterRoleBinding and serviceAccount. - * [7bb99ae71](https://github.com/argoproj/argo-workflows/commit/7bb99ae713da51c9b9818027066f7ddd8efb92bb) Rename references from v1 to v1alpha1 in YAML - * [323439135](https://github.com/argoproj/argo-workflows/commit/3234391356ae0eaf88d348b564828c2df754a49e) Implement step artifacts tab (#534) - * [b2a58dad9](https://github.com/argoproj/argo-workflows/commit/b2a58dad98942ad06b0431968be00ebe588818ff) Workflow list (#533) - * [5dd1754b4](https://github.com/argoproj/argo-workflows/commit/5dd1754b4a41c7951829dbbd8e70a244cf627331) Guard controller from informer sending non workflow/pod objects (#505) - * [59e31c60f](https://github.com/argoproj/argo-workflows/commit/59e31c60f8675c2c678c50e9694ee993691b6e6a) Enable resync period in workflow/pod informers (resolves #532) - * [d5b06dcd4](https://github.com/argoproj/argo-workflows/commit/d5b06dcd4e52270a24f4f3b19497b9a9afaed4e9) Significantly increase efficiency of workflow control loop (resolves #505) - * [4b2098ee2](https://github.com/argoproj/argo-workflows/commit/4b2098ee271301eca52403e769f82f6d717400af) finished walkthrough sections - * [eb7292b02](https://github.com/argoproj/argo-workflows/commit/eb7292b02414ef6faca4f424f6b04ea444abb0e0) walkthrough - * [82b1c7d97](https://github.com/argoproj/argo-workflows/commit/82b1c7d97536baac7514d7cfe72d1be9309bef43) Add -o wide option to `argo get` to display artifacts and durations (resolves #526) - * [3427955d3](https://github.com/argoproj/argo-workflows/commit/3427955d35bf6babc0bfee958a2eb417553ed203) Use PATCH api from k8s go SDK for annotating/labeling pods - * [4842bbbc7](https://github.com/argoproj/argo-workflows/commit/4842bbbc7e40340de12c788cc770eaa811431818) Add support for nodeSelector at both the workflow and step level (resolves #458) - * [424fba5d4](https://github.com/argoproj/argo-workflows/commit/424fba5d4c26c448c8c8131b89113c4c5fbae08d) Rename apiVersion of workflows from v1 to v1alpha1 (resolves #517) - * [5286728a9](https://github.com/argoproj/argo-workflows/commit/5286728a98236c5a8883850389d286d67549966e) Propogate executor errors back to controller. Add error column in `argo get` (#522) - * [32b5e99bb](https://github.com/argoproj/argo-workflows/commit/32b5e99bb194e27a8a35d1d7e1378dd749cc546f) Simplify executor commands to just 'init' and 'wait'. Improve volumes examples - * [e2bfbc127](https://github.com/argoproj/argo-workflows/commit/e2bfbc127d03f5ef20763fe8a917c82e3f06638d) Update controller config automatically on configmap updates resolves #461 - * [c09b13f21](https://github.com/argoproj/argo-workflows/commit/c09b13f21eaec4bb78c040134a728d8e021b4d1e) Workflow validation detects when arguments were not supplied (#515) - * [705193d05](https://github.com/argoproj/argo-workflows/commit/705193d053cb8c0c799a0f636fc899e8b7f55bcc) Proper message for non-zero exits from main container. Indicate an Error phase/message when failing to load/save artifacts - * [e69b75101](https://github.com/argoproj/argo-workflows/commit/e69b7510196daba3a87dca0c8a9677abd8d74675) Update page title and favicon (#519) - * [4330232f5](https://github.com/argoproj/argo-workflows/commit/4330232f51d404a7546cf24b4b0eb608bf3113f5) Render workflow steps on workflow list page (#518) - * [87c447eaf](https://github.com/argoproj/argo-workflows/commit/87c447eaf2ca2230e9b24d6af38f3a0fd3c520c3) Implement kube api proxy. Add workflow node logs tab (#511) - * [0ab268837](https://github.com/argoproj/argo-workflows/commit/0ab268837cff2a1fd464673a45c3736178917be5) Rework/rename Makefile targets. Bake in image namespace/tag set during build, as part of argo install - * [3f13f5cab](https://github.com/argoproj/argo-workflows/commit/3f13f5cabe9dc54c7fbaddf7b0cfbcf91c3f26a7) Support for overriding/supplying entrypoint and parameters via argo CLI. Update examples - * [6f9f2adcd](https://github.com/argoproj/argo-workflows/commit/6f9f2adcd017954a72b2b867e6bc2bcba18972af) Support ListOptions in the WorkflowClient. Add flag to delete completed workflows - * [30d7fba12](https://github.com/argoproj/argo-workflows/commit/30d7fba1205e7f0b4318d6b03064ee647d16ce59) Check Kubernetes version. - * [a3909273c](https://github.com/argoproj/argo-workflows/commit/a3909273c435b23de865089b82b712e4d670a4ff) Give proper error for unamed steps - * [eed54f573](https://github.com/argoproj/argo-workflows/commit/eed54f5732a61922f6daff9e35073b33c1dc068e) Harden the IsURL check - * [bfa62afd8](https://github.com/argoproj/argo-workflows/commit/bfa62afd857704c53aef32f5ade7df86cf2c0769) Add phase,completed fields to workflow labels. Add startedAt,finishedAt,phase,message to workflow.status - * [9347619c7](https://github.com/argoproj/argo-workflows/commit/9347619c7c125950a9f17acfbd92a1286bca1a57) Create serviceAccount & roleBinding if necessary. - * [205e5cbce](https://github.com/argoproj/argo-workflows/commit/205e5cbce20a6e5e73c977f1e775671a19bf4434) Introduce 'completed' pod label and label selector so controller can ignore completed pods - * [199dbcbf1](https://github.com/argoproj/argo-workflows/commit/199dbcbf1c3fa2fd452e5c36035d0f0ae8cdde42) 476 jobs list page (#501) - * [058792945](https://github.com/argoproj/argo-workflows/commit/0587929453ac10d7318a91f2243aece08fe84129) Implement workflow tree tab draft (#494) - * [a2f034a06](https://github.com/argoproj/argo-workflows/commit/a2f034a063b30b0bb5d9e0f670a8bb38560880b4) Proper error reporting. Add message, startedAt, finishedAt to NodeStatus. Rename status to phase - * [645fedcaf](https://github.com/argoproj/argo-workflows/commit/645fedcaf532e052ef0bfc64cb56bfb3307479dd) Support loop step expansion from input parameters and previous step results - * [75c1c4822](https://github.com/argoproj/argo-workflows/commit/75c1c4822b4037176aa6d3702a5cf4eee590c7b7) Help page v2 (#492) - * [a4af6702d](https://github.com/argoproj/argo-workflows/commit/a4af6702d526e775c0aa31ee3612328e5d058c2b) Basic state of navigation, top-bar, tooltip for UI v2 (#491) - * [726e9fa09](https://github.com/argoproj/argo-workflows/commit/726e9fa0953fe91eb0401727743a04c8a02668ef) moved the service acct note - * [3a4cd9c4b](https://github.com/argoproj/argo-workflows/commit/3a4cd9c4ba46f586a3d26fbe017d4d3002e6b671) 477 job details page (#488) - * [8ba7b55cb](https://github.com/argoproj/argo-workflows/commit/8ba7b55cb59173ff7470be3451cd38333539b182) Edited the instructions - * [1e9dbdbab](https://github.com/argoproj/argo-workflows/commit/1e9dbdbabbe354f9798162854dd7d6ae4aa8539a) Added influxdb-ci example - * [bd5c0baad](https://github.com/argoproj/argo-workflows/commit/bd5c0baad83328f13f25ba59e15a5f607d2fb9eb) Added comment for entrypoint field - * [2fbecdf04](https://github.com/argoproj/argo-workflows/commit/2fbecdf0484a9e3c0d9242bdd7286f83b6e771eb) Argo V2 UI initial commit (#474) - * [9ce201230](https://github.com/argoproj/argo-workflows/commit/9ce2012303aa30623336f0dde72ad9b80a5409e3) added artifacts - * [caaa32a6b](https://github.com/argoproj/argo-workflows/commit/caaa32a6b3c28c4f5a43514799b26528b55197ee) Minor edit - * [ae72b5838](https://github.com/argoproj/argo-workflows/commit/ae72b583852e43f616d4c021a4e5646235d4c0b4) added more argo/kubectl examples - * [8df393ed7](https://github.com/argoproj/argo-workflows/commit/8df393ed78d1e4353ee30ba02cec0b12daea7eb0) added 2.0 - * [9e3a51b14](https://github.com/argoproj/argo-workflows/commit/9e3a51b14d78c3622543429a500a7d0367b10787) Update demo.md to have better instructions to restart controller after configuration changes - * [ba9f9277a](https://github.com/argoproj/argo-workflows/commit/ba9f9277a4a9a153a6f5b19862a73364f618e5cd) Add demo markdown file. Delete old demo.txt - * [d8de40bb1](https://github.com/argoproj/argo-workflows/commit/d8de40bb14167f30b17de81d6162d633a62e7a0d) added 2.0 - * [6c617599b](https://github.com/argoproj/argo-workflows/commit/6c617599bf4c91ccd3355068967824c1e8d7c107) added 2.0 - * [32af692ee](https://github.com/argoproj/argo-workflows/commit/32af692eeec765b13ee3d2b4ede9f5ff45527b4c) added 2.0 - * [802940be0](https://github.com/argoproj/argo-workflows/commit/802940be0d4ffd5048dd5307b97af442d82e9a83) added 2.0 - * [1d4434155](https://github.com/argoproj/argo-workflows/commit/1d44341553d95ac8192d4a80e178a9d72558829a) added new png - -### Contributors - - * Alexander Matyushentsev - * Ed Lee - * Jesse Suen - * Rafal - * Sandeep Bhojwani - * Shri Javadekar - * Wojciech Kalemba - * cyee88 - * mukulikak - -## v2.0.0-alpha1 (2017-11-16) - - -### Contributors - - -## v2.0.0 (2018-02-06) - - * [0978b9c61](https://github.com/argoproj/argo-workflows/commit/0978b9c61cb7435d31ef8d252b80e03708a70adc) Support setting UI base Url (#722) - * [b75cd98f6](https://github.com/argoproj/argo-workflows/commit/b75cd98f6c038481ec3d2253e6404952bcaf4bd5) updated argo-user slack link - * [b3598d845](https://github.com/argoproj/argo-workflows/commit/b3598d845c4cdb9ac7c4ae5eff5024ecd3fc5fd6) Add examples as functional and expected failure e2e tests - * [83966e609](https://github.com/argoproj/argo-workflows/commit/83966e6095e2468368b0929613e7371074ee972b) Fix regression where executor did not annotate errors correctly - * [751fd2702](https://github.com/argoproj/argo-workflows/commit/751fd27024d9f3bfc40051d2ca694b25a42307ea) Update UI references to v2.0.0. Update changelog - * [8b7e2e24e](https://github.com/argoproj/argo-workflows/commit/8b7e2e24e8cf7ae6b701f08b0702ac045e0336f8) Check-in the OpenAPI spec. Automate generation as part of `make update-codegen` - * [563bda756](https://github.com/argoproj/argo-workflows/commit/563bda756732802caeaa516fd0c493c6e07f6cf9) Fix update-openapigen.sh script to presume bash. Tweak documentation - * [5b9a602b4](https://github.com/argoproj/argo-workflows/commit/5b9a602b4a763ac633f7ede86f13253451855462) Add documentation to types. Add program to generate OpenAPI spec - * [d929e79f6](https://github.com/argoproj/argo-workflows/commit/d929e79f623017a923d1c4e120c363e08fe7a64a) Generate OpenAPI models for the workflow spec (issue #707) - * [1d5afee6e](https://github.com/argoproj/argo-workflows/commit/1d5afee6ea48743bb854e69ffa333f361e52e289) Shortened url - * [617d848da](https://github.com/argoproj/argo-workflows/commit/617d848da27d0035c20f21f3f6bddbe0e04550db) Added news to README - * [ae36b22b6](https://github.com/argoproj/argo-workflows/commit/ae36b22b6d0d0ce8c230aedcce0814489162ae5b) Fix typo s/Customer/Custom/ (#704) - * [5a589fcd9](https://github.com/argoproj/argo-workflows/commit/5a589fcd932116720411d53aeb6454e297456e06) Add ability to specify imagePullSecrets in the workflow.spec (resolves #699) - * [2f77bc1ed](https://github.com/argoproj/argo-workflows/commit/2f77bc1ed00042388d0492cfd480d7c22599112c) Add ability to specify affinity rules at both the workflow and template level (resolves #701) - * [fdafbe27e](https://github.com/argoproj/argo-workflows/commit/fdafbe27e5e2f4f2d58913328ae22db9a6c363b4) Sidecars unable to reference volume claim templates (resolves #697) - * [0b0b52c3b](https://github.com/argoproj/argo-workflows/commit/0b0b52c3b45cbe5ac62da7b26b30d19fc1f9eb3e) Referencing output artifacts from a container with retries was not functioning (resolves #698) - * [bf2b376a1](https://github.com/argoproj/argo-workflows/commit/bf2b376a142ed4fdf70ba4f3702533e7b75fc6b2) Update doc references to point to v2.0.0-beta1. Fix secrets example - * [549870c1e](https://github.com/argoproj/argo-workflows/commit/549870c1ee08138b20b8a4b0c026569cf1e6c19a) Fix argo-ui download links to point to v2.0.0-beta1 - * [a202049d3](https://github.com/argoproj/argo-workflows/commit/a202049d327c64e282a37d7598bddc1faa1a3c1a) Update CHANGELOG for v2.0.0-beta1 - * [a3739035f](https://github.com/argoproj/argo-workflows/commit/a3739035f8e1f517721489fc53b58a8e27a575e1) Remove dind requirement from argo-ci test steps - * [1bdd0c03d](https://github.com/argoproj/argo-workflows/commit/1bdd0c03dbb9d82ad841ca19be6e1ea93aeb82f7) Include completed pods when attempting to reconcile deleted pods Switch back to Patch (from Update) for persisting workflow changes - * [a4a438921](https://github.com/argoproj/argo-workflows/commit/a4a4389219cbe84e3bc7b3731cdfccb9ee5f5730) Sleep 1s after persisting workflow to give informer cache a chance to sync (resolves #686) - * [5bf49531f](https://github.com/argoproj/argo-workflows/commit/5bf49531f99ef9d8b8aefeac26a4a3fa0177e70d) Updated demo.md with link to ARTIFACT_REPO.md - * [863d547a1](https://github.com/argoproj/argo-workflows/commit/863d547a1a2a146a898c06c835187e0595af5689) Rely on controller generated timestamps for node.StartedAt instad of pod.CreationTimestamp - * [672542d1f](https://github.com/argoproj/argo-workflows/commit/672542d1f08c206f89f8747e9b14b675cdd77446) Re-apply workflow changes and reattempt update on resource conflicts. Make completed pod labeling asynchronous - * [81bd6d3d4](https://github.com/argoproj/argo-workflows/commit/81bd6d3d46d2fd7ea57aa095ae134116cfca90f2) Resource state retry (#690) - * [44dba889c](https://github.com/argoproj/argo-workflows/commit/44dba889cb743552557fcd7453ee81a89875142d) Tune controller to 20 QPS, 30 Burst, 8 wf workers, 8 pod workers - * [178b9d37c](https://github.com/argoproj/argo-workflows/commit/178b9d37cc452af214df7c9c41522124c117e7a3) Show running/completed pod counts in `argo list -o wide` - * [0c565f5f5](https://github.com/argoproj/argo-workflows/commit/0c565f5f5e9f69244e9828ced7c3916ac605f460) Switch to Updating workflow resources instead of Patching (resolves #686) - * [a571f592f](https://github.com/argoproj/argo-workflows/commit/a571f592fa131771b8d71126fc27809e24462cfe) Ensure sidecars get killed unequivocally. Final argoexec stats were not getting printed - * [a0b2d78c8](https://github.com/argoproj/argo-workflows/commit/a0b2d78c869f277c20c4cd3ba18b8d2688674e54) Show duration by default in `argo get`. --since flag should always include Running - * [101103136](https://github.com/argoproj/argo-workflows/commit/101103136287b8ee16a7afda94cc6ff59be07ef6) Executor hardening: add retries and memoization for executor k8s API calls Recover from unexpected panics and annotate the error. - * [f2b8f248a](https://github.com/argoproj/argo-workflows/commit/f2b8f248ab8d483e0ba41a287611393500c7b507) Regenerate deepcopy code after type changes for raw input artifacts - * [322e0e3aa](https://github.com/argoproj/argo-workflows/commit/322e0e3aa3cb2e650f3ad4b7ff9157f71a92e8b4) renamed file as per review comment - * [0a386ccaf](https://github.com/argoproj/argo-workflows/commit/0a386ccaf705a1abe1f9239adc966fceb7a808ae) changes from the review - renamed "contents" to "data" - lint issue - * [d9ebbdc1b](https://github.com/argoproj/argo-workflows/commit/d9ebbdc1b31721c8095d3c5426c1c811054a94a7) support for raw input as artifact - * [a1f821d58](https://github.com/argoproj/argo-workflows/commit/a1f821d589d47ca5b12b94ad09306a706a43d150) Introduce communication channel from workflow-controller to executor through pod annotations - * [b324f9f52](https://github.com/argoproj/argo-workflows/commit/b324f9f52109b9aa29bc89d63810be6e421eb54f) Artifactory repository was not using correct casing for repoURL field - * [3d45d25ac](https://github.com/argoproj/argo-workflows/commit/3d45d25ac497a09fa291d20f867a75f59b6abf92) Add `argo list --since` to filter workflows newer than a relative duration - * [cc2efdec3](https://github.com/argoproj/argo-workflows/commit/cc2efdec368c2f133c076a9eda9065f64762a9fa) Add ability to set loglevel of controller via CLI flag - * [60c124e5d](https://github.com/argoproj/argo-workflows/commit/60c124e5dddb6ebfee6300d36f6a3877838ec17c) Remove hack.go and use dep to install code-generators - * [d14755a7c](https://github.com/argoproj/argo-workflows/commit/d14755a7c5f583c1f3c8c762ae8628e780f566cf) `argo list` was not handling the default case correctly - * [472f5604e](https://github.com/argoproj/argo-workflows/commit/472f5604e27ca6310e016f846c97fda5d7bca9dd) Improvements to `argo list` * sort workflows by running vs. completed, then by finished time * add --running, --completed, --status XXX filters * add -o wide option to show parameters and -o name to show only the names - * [b063f938f](https://github.com/argoproj/argo-workflows/commit/b063f938f34f650333df6ec5a2e6a325a5b45299) Use minimal ClusterRoles for workflow-controller and argo-ui deployments - * [21bc2bd07](https://github.com/argoproj/argo-workflows/commit/21bc2bd07ebbfb478c87032e2ece9939ea436030) Added link to configuring artifact repo from main README - * [b54bc067b](https://github.com/argoproj/argo-workflows/commit/b54bc067bda02f95937774fb3345dc2010d3efc6) Added link to configuring artifact repo from main README - * [58ec51699](https://github.com/argoproj/argo-workflows/commit/58ec51699534e73d82c3f44027326b438cf5c063) Updated ARTIFACT_REPO.md - * [1057d0878](https://github.com/argoproj/argo-workflows/commit/1057d087838bcbdbffc70367e0fc02778907c9af) Added detailed instructions on configuring AWS and GCP artifact rpos - * [b0a7f0da8](https://github.com/argoproj/argo-workflows/commit/b0a7f0da85fabad34814ab129eaba43862a1d2dd) Issue 680 - Argo UI is failing to render workflow which has not been picked up by workflow controller (#681) - * [e91c227ac](https://github.com/argoproj/argo-workflows/commit/e91c227acc1f86b7e341aaac534930f9b529cd89) Document and clarify artifact passing (#676) - * [290f67997](https://github.com/argoproj/argo-workflows/commit/290f6799752ef602b27c193212495e27f40dd687) Allow containers to be retried. (#661) - * [80f9b1b63](https://github.com/argoproj/argo-workflows/commit/80f9b1b636704ebad6ebb8df97c5e81dc4f815f9) Improve the error message when insufficent RBAC privileges is detected (resolves #659) - * [3cf67df42](https://github.com/argoproj/argo-workflows/commit/3cf67df422f34257296d2de09d2ca3c8c87abf84) Regenerate autogenerated code after changes to types - * [baf370529](https://github.com/argoproj/argo-workflows/commit/baf37052976458401a6c0e44d06f30dc8d819680) Add support for resource template outputs. Remove output.parameters.path in favor of valueFrom - * [dc1256c20](https://github.com/argoproj/argo-workflows/commit/dc1256c2034f0add4bef3f82ce1a71b454d4eef5) Fix expected file name for issue template - * [a492ad141](https://github.com/argoproj/argo-workflows/commit/a492ad14177eb43cdd6c2a017c9aec87183682ed) Add a GitHub issues template - * [55be93a68](https://github.com/argoproj/argo-workflows/commit/55be93a68d8991f76a31adaf49f711436a35a9d0) Add a --dry-run option to `argo install`. Remove CRD creation from controller startup - * [fddc052df](https://github.com/argoproj/argo-workflows/commit/fddc052df8a3478aede67057f2b06938c2a6a7a4) Fix README.md to contain influxdb-client in the example (#669) - * [67236a594](https://github.com/argoproj/argo-workflows/commit/67236a5940231f7b9dc2ca2f4cb4cb70b7c18d45) Update getting started doc to use `brew install` and better instructions for RBAC clusters (resolves #654, #530) - * [5ac197538](https://github.com/argoproj/argo-workflows/commit/5ac19753846566d0069b76e3e6c6dd03f0e6950c) Support rendering retry steps (#670) - * [3cca0984c](https://github.com/argoproj/argo-workflows/commit/3cca0984c169ea59e8e2758a04550320b1981875) OpenID Connect auth support (#663) - * [c222cb53a](https://github.com/argoproj/argo-workflows/commit/c222cb53a168f9bd40b7731d0b2f70db977990c2) Clarify where the Minio secret comes from. - * [a78e2e8d5](https://github.com/argoproj/argo-workflows/commit/a78e2e8d551d6afad2e0fbce7a9f0a1bd023c11b) Remove parallel steps that use volumes. - * [355173857](https://github.com/argoproj/argo-workflows/commit/355173857f98a9a9704ab23235b3186bde8092b9) Prevent a potential k8s scheduler panic from incomplete setting of pod ownership reference (resolves #656) - * [1a8bc26d4](https://github.com/argoproj/argo-workflows/commit/1a8bc26d40597f2f0475aa9197a6b3912c5bbb56) Updated README - * [9721fca0e](https://github.com/argoproj/argo-workflows/commit/9721fca0e1ae9d1d57aa8d1872450ce8ee7487e2) Updated README - * [e31776061](https://github.com/argoproj/argo-workflows/commit/e3177606105a936da7eba29924fa49ad497703c9) Fix typos in READMEs - * [555d50b0e](https://github.com/argoproj/argo-workflows/commit/555d50b0ebeef1c753394de974dad2e0d4a5b787) Simplify some getting started instructions. Correct some usages of container resources field - * [4abc9c40e](https://github.com/argoproj/argo-workflows/commit/4abc9c40e7656a5783620e41b33e4ed3bb7249e2) Updated READMEs - * [a0add24f9](https://github.com/argoproj/argo-workflows/commit/a0add24f9778789473b2b097fb31a56ae11bfce9) Switch to k8s-codegen generated workflow client and informer - * [9b08b6e99](https://github.com/argoproj/argo-workflows/commit/9b08b6e997633d5f2e94392f000079cbe93ee023) Added link for argoproj slack channel - * [682bbdc09](https://github.com/argoproj/argo-workflows/commit/682bbdc09b66698090d309e91b5caf4483931e34) Update references to point to latest argo release - * [940dd56d9](https://github.com/argoproj/argo-workflows/commit/940dd56d98c75eb93da3b5de598882754cb74fc7) Fix artifactory unit test and linting issues - * [e7ba2b441](https://github.com/argoproj/argo-workflows/commit/e7ba2b44114fca8a3cb2b8635dc2fdfeaa440d9e) Update help page links (#651) - * [53dac4c74](https://github.com/argoproj/argo-workflows/commit/53dac4c74933c333124a0cb1d8cf6c9255f9199d) Add artifactory and UI fixes to 2.0.0-alpha3 CHANGELOG - * [4b4eff43f](https://github.com/argoproj/argo-workflows/commit/4b4eff43f20ed678d34efe567a4d61d1364d7124) Allow disabling web console feature (#649) - * [90b7f2e67](https://github.com/argoproj/argo-workflows/commit/90b7f2e67dddebba1678e215bde75d68867b4469) Added support for artifactory - * [849e916e5](https://github.com/argoproj/argo-workflows/commit/849e916e5bf98f320f1a65b12ffe246d9ebbb6f6) Adjusted styles for logs stream (#614) - * [a8a960303](https://github.com/argoproj/argo-workflows/commit/a8a960303423cde2e511d4af9c2c8ae834076b21) Update CHANGELOG for 2.0.0-alpha3 - * [e7c7678cc](https://github.com/argoproj/argo-workflows/commit/e7c7678cc605285e5b3224c757e5e4be57ab4d5c) Fix issue preventing ability to pass JSON as a command line param (resolves #646) - * [7f5e2b96b](https://github.com/argoproj/argo-workflows/commit/7f5e2b96bd96e0bccf4778383aa9b94a1768e9c0) Add validation checks for volumeMount/artifact path collision and activeDeadlineSeconds (#620) - * [dc4a94633](https://github.com/argoproj/argo-workflows/commit/dc4a94633c4d00d78a7ea53272e425962de405ba) Add the ability to specify the service account used by pods in the workflow (resolves #634) Also add argo CLI support for supplying/overriding spec.serviceAccountName from command line. - * [16f7000aa](https://github.com/argoproj/argo-workflows/commit/16f7000aa77b2759fa0a65d6e42456bcb660f824) Workflow operator will recover from unexpected panics and mark the workflow with error (resolves #633) - * [18dca7fe2](https://github.com/argoproj/argo-workflows/commit/18dca7fe21d57e6a5415c53bfdb87a889ac32456) Issue #629 - Add namespace to workflow list and workflow details page (#639) - * [e656bace7](https://github.com/argoproj/argo-workflows/commit/e656bace75aaa859f04121f2c1d95631b462fe62) Issue #637 - Implement Workflow list and workflow details page live update (#638) - * [1503ce3ae](https://github.com/argoproj/argo-workflows/commit/1503ce3aee40eba741819a1403847df4bbcb7b23) Issue #636 - Upgrade to ui-lib 2.0.3 to fix xterm incompatibility (#642) - * [f9170e8ab](https://github.com/argoproj/argo-workflows/commit/f9170e8abb7121b0d0cbc3e4c07b9bdc2224fb76) Remove manifest-passing.yaml example now that we have resource templates - * [25be5fd63](https://github.com/argoproj/argo-workflows/commit/25be5fd6368bac3fde8e4392b3cb9d4159983a1a) Implementation for resource templates and resource success/failure conditions - * [402ad565f](https://github.com/argoproj/argo-workflows/commit/402ad565f4a3b95c449ddd4c6dc468947aeb7192) Updated examples/README - * [8536c7fc8](https://github.com/argoproj/argo-workflows/commit/8536c7fc89a0ceb39208efe2076919d0390e3d2e) added secret example to examples/README - * [e5002b828](https://github.com/argoproj/argo-workflows/commit/e5002b8286af2c1f7ec64953114e1d97c889ca37) Add '--wait' to argo submit. - * [9646e55f8](https://github.com/argoproj/argo-workflows/commit/9646e55f8bb8fbac80d456853aa891c2ae069adb) Installer was not update configmap correctly with new executor image during upgrade - * [69d72913a](https://github.com/argoproj/argo-workflows/commit/69d72913a3a72bbf7b075be847303305b4bef1a5) Support private git repos using secret selector fields in the git artifact (resolves #626) - * [64e17244e](https://github.com/argoproj/argo-workflows/commit/64e17244ef04b9d2aa6abf6f18d4e7ef2d20ff37) Add argo ci workflow (#619) - * [e89984355](https://github.com/argoproj/argo-workflows/commit/e8998435598e8239d7b77a60cfda43e8f2869b4d) Resolve controller panic when a script template with an input artifact was submitted (resolves #617). Utilize the kubernetes.Interface and fake.Clientset to support unit test mocking. Added a unit test to reproduce the panic. Add an e2e test to verify functionality works. - * [52075b456](https://github.com/argoproj/argo-workflows/commit/52075b45611783d909609433bb44702888b5db37) Introduce controller instance IDs to support multiple workflow controllers in a cluster (resolves #508) - * [133a23ce2](https://github.com/argoproj/argo-workflows/commit/133a23ce20b4570ded81fac76a430f0399c1eea1) Add ability to timeout a container/script using activeDeadlineSeconds - * [b5b16e552](https://github.com/argoproj/argo-workflows/commit/b5b16e55260df018cc4de14bf298ce59714b4396) Support for workflow exit handlers - * [906b3e7c7](https://github.com/argoproj/argo-workflows/commit/906b3e7c7cac191f920016362b076a28f18d97c1) Update ROADMAP.md - * [5047422ae](https://github.com/argoproj/argo-workflows/commit/5047422ae71869672c84364d099e1488b29fbbe8) Update CHANGELOG.md - * [2b6583dfb](https://github.com/argoproj/argo-workflows/commit/2b6583dfb02911965183ef4b25ed68c867448e10) Add `argo wait` for waiting on workflows to complete. (#618) - * [cfc9801c4](https://github.com/argoproj/argo-workflows/commit/cfc9801c40528b6605823e1f4b4359600b6887df) Add option to print output of submit in json. - * [c20c0f995](https://github.com/argoproj/argo-workflows/commit/c20c0f9958ceeefd3597120fcb4013d857276076) Comply with semantic versioning. Include build metadata in `argo version` (resolves #594) - * [bb5ac7db5](https://github.com/argoproj/argo-workflows/commit/bb5ac7db52bff613c32b153b82953ec9c73c3b8a) minor change - * [91845d499](https://github.com/argoproj/argo-workflows/commit/91845d4990ff8fd97bd9404e4b37024be1ee0ba6) Added more documentation - * [4e8d69f63](https://github.com/argoproj/argo-workflows/commit/4e8d69f630bc0fd107b360ee9ad953ccb0b78f11) fixed install instructions - * [0557147dd](https://github.com/argoproj/argo-workflows/commit/0557147dd4bfeb2688b969293ae858a8391d78ad) Removed empty toolbar (#600) - * [bb2b29ff5](https://github.com/argoproj/argo-workflows/commit/bb2b29ff5e4178e2c8a9dfe666b699d75aa9ab3b) Added limit for number of steps in workflows list (#602) - * [3f57cc1d2](https://github.com/argoproj/argo-workflows/commit/3f57cc1d2ff9c0e7ec40da325c3478a8037a6ac0) fixed typo in examples/README - * [ebba60311](https://github.com/argoproj/argo-workflows/commit/ebba6031192b0a763bd94b1625a2ff6e242f112e) Updated examples/README.md with how to override entrypoint and parameters - * [81834db3c](https://github.com/argoproj/argo-workflows/commit/81834db3c0bd12758a95e8a5862d6dda6d0dceeb) Example with using an emptyDir volume. - * [4cd949d32](https://github.com/argoproj/argo-workflows/commit/4cd949d327ddb9d4f4592811c51e07bb53b30ef9) Remove apiserver - * [6a916ca44](https://github.com/argoproj/argo-workflows/commit/6a916ca447147e4aff364ce032c9db4530d49d11) `argo lint` did not split yaml files. `argo submit` was not ignoring non-workflow manifests - * [bf7d99797](https://github.com/argoproj/argo-workflows/commit/bf7d997970e967b2b238ce209ce823ea47de01d2) Include `make lint` and `make test` as part of CI - * [d1639ecfa](https://github.com/argoproj/argo-workflows/commit/d1639ecfabf73f73ebe040b832668bd6a7b60d20) Create example workflow using kubernetes secrets (resolves #592) - * [31c54af4b](https://github.com/argoproj/argo-workflows/commit/31c54af4ba4cb2a0db918fadf62cb0b854592ba5) Toolbar and filters on workflows list (#565) - * [bb4520a6f](https://github.com/argoproj/argo-workflows/commit/bb4520a6f65d4e8e765ce4d426befa583721c194) Add and improve the inlined comments in example YAMLs - * [a04707282](https://github.com/argoproj/argo-workflows/commit/a04707282cdeadf463b22b633fc00dba432f60bf) Fixed typo. - * [13366e324](https://github.com/argoproj/argo-workflows/commit/13366e32467a34a061435091589c90d04a84facb) Fix some wrong GOPATH assumptions in Makefile. Add `make test` target. Fix unit tests - * [9f4f1ee75](https://github.com/argoproj/argo-workflows/commit/9f4f1ee75705150a22dc68a3dd16fa90069219ed) Add 'misspell' to linters. Fix misspellings caught by linter - * [1b918aff2](https://github.com/argoproj/argo-workflows/commit/1b918aff29ff8e592247d14c52be06a0537f0734) Address all issues in code caught by golang linting tools (resolves #584) - * [903326d91](https://github.com/argoproj/argo-workflows/commit/903326d9103fa7dcab37835a9478f58aff51a5d1) Add manifest passing to do kubectl create with dynamic manifests (#588) - * [b1ec3a3fc](https://github.com/argoproj/argo-workflows/commit/b1ec3a3fc90a211f9afdb9090d4396c98ab3f71f) Create the argo-ui service with type ClusterIP as part of installation (resolves #582) - * [5b6271bc5](https://github.com/argoproj/argo-workflows/commit/5b6271bc56b46a82b0ee2bc0784315ffcddeb27f) Add validate names for various workflow specific fields and tests for them (#586) - * [b6e671318](https://github.com/argoproj/argo-workflows/commit/b6e671318a446f129740ce790f53425d65e436f3) Implementation for allowing access to global parameters in workflow (#571) - * [c5ac5bfb8](https://github.com/argoproj/argo-workflows/commit/c5ac5bfb89274fb5ee85f9cef346b7059b5d7641) Fix error message when key does not exist in secret (resolves #574). Improve s3 example and documentation. - * [4825c43b3](https://github.com/argoproj/argo-workflows/commit/4825c43b3e0c3c54b2313aa54e69520ed1b8a38d) Increate UI build memory limit (#580) - * [87a20c6bc](https://github.com/argoproj/argo-workflows/commit/87a20c6bce9a6bfe2a88edc581746ff5f7f006fc) Update input-artifacts-s3.yaml example to explain concepts and usage better - * [c16a9c871](https://github.com/argoproj/argo-workflows/commit/c16a9c87102fd5b66406737720204e5f17af0fd1) Rahuldhide patch 2 (#579) - * [f5d0e340b](https://github.com/argoproj/argo-workflows/commit/f5d0e340b3626658b435dd2ddd937e97af7676b2) Issue #549 - Prepare argo v1 build config (#575) - * [3b3a4c87b](https://github.com/argoproj/argo-workflows/commit/3b3a4c87bd3138961c948f869e2c5b7c932c8847) Argo logo - * [d1967443a](https://github.com/argoproj/argo-workflows/commit/d1967443a4943f685f6cb1649480765050bdcdaa) Skip e2e tests if Kubeconfig is not provided. - * [1ec231b69](https://github.com/argoproj/argo-workflows/commit/1ec231b69a1a7d985d1d587980c34588019b04aa) Create separate namespaces for tests. - * [5ea20d7eb](https://github.com/argoproj/argo-workflows/commit/5ea20d7eb5b9193c19f7c875c8fb2f4af8f68ef3) Add a deadline for workflow operation to prevent workqueue starvation and to enable state resync (#531) Tested with 6 x 1000 pod workflows. - * [346bafe63](https://github.com/argoproj/argo-workflows/commit/346bafe636281bca94695b285767f41ae71e6a69) Multiple scalability improvements to controller (resolves #531) - * [bbc56b59e](https://github.com/argoproj/argo-workflows/commit/bbc56b59e2ff9635244bcb091e92e257a508d147) Improve argo ui build performance and reduce image size (#572) - * [cdb1ce82b](https://github.com/argoproj/argo-workflows/commit/cdb1ce82bce9b103e433981d94bd911b0769350d) Upgrade ui-lib (#556) - * [0605ad7b3](https://github.com/argoproj/argo-workflows/commit/0605ad7b33fc4f9c0bbff79adf1d509d3b072703) Adjusted tabs content size to see horizontal and vertical scrolls. (#569) - * [a33162369](https://github.com/argoproj/argo-workflows/commit/a331623697e76a5e3497257e28fabe1995852339) Fix rendering 'Error' node status (#564) - * [8c3a7a939](https://github.com/argoproj/argo-workflows/commit/8c3a7a9393d619951a676324810d482d28dfe015) Issue #548 - UI terminal window (#563) - * [5ec6cc85a](https://github.com/argoproj/argo-workflows/commit/5ec6cc85aab63ea2277ce621d5de5b59a510d462) Implement API to ssh into pod (#561) - * [beeb65ddc](https://github.com/argoproj/argo-workflows/commit/beeb65ddcb7d2b5f8286f7881af1f5c00535161e) Don't mess the controller's arguments. - * [01f5db5a0](https://github.com/argoproj/argo-workflows/commit/01f5db5a0c3dc48541577b9d8b1d815399728070) Parameterize Install() and related methods. - * [85a2e2711](https://github.com/argoproj/argo-workflows/commit/85a2e2711beba8f2c891af396a3cc886c7b37542) Fix tests. - * [56f666e1b](https://github.com/argoproj/argo-workflows/commit/56f666e1bf69a7f5d8191637e8c7f384b91d98d0) Basic E2e tests. - * [9eafb9dd5](https://github.com/argoproj/argo-workflows/commit/9eafb9dd59166e76804b71c8df19fdca453cdd28) Issue #547 - Support filtering by status in API GET /workflows (#550) - * [37f41eb7b](https://github.com/argoproj/argo-workflows/commit/37f41eb7bf366cfe007d3ecce7b21f003d381e34) Update demo.md - * [ea8d5c113](https://github.com/argoproj/argo-workflows/commit/ea8d5c113d9245f47fe7b3d3f45e7891aa5f50e8) Update README.md - * [373f07106](https://github.com/argoproj/argo-workflows/commit/373f07106ab14e3772c94af5cc11f7f1c7099204) Add support for making a no_ui build. Base all build on no_ui build (#553) - * [ae65c57e5](https://github.com/argoproj/argo-workflows/commit/ae65c57e55f92fd8ff1edd099f659e9e97ce59f1) Update demo.md - * [f6f8334b2](https://github.com/argoproj/argo-workflows/commit/f6f8334b2b3ed1f498c19e4de25421f41807f893) V2 style adjustments and small fixes (#544) - * [12d5b7ca4](https://github.com/argoproj/argo-workflows/commit/12d5b7ca48c913e53b74708a35727d523dfa5355) Document argo ui service creation (#545) - * [3202d4fac](https://github.com/argoproj/argo-workflows/commit/3202d4fac2d5d2d2a3ad1d679c1b753b04aca796) Support all namespaces (#543) - * [b553c1bd9](https://github.com/argoproj/argo-workflows/commit/b553c1bd9a00499915dbe5926194d67c7392b944) Update demo.md to qualify the minio endpoint with the default namespace - * [4df7617c2](https://github.com/argoproj/argo-workflows/commit/4df7617c2e97f2336195d6764259537be648b89b) Fix artifacts downloading (#541) - * [12732200f](https://github.com/argoproj/argo-workflows/commit/12732200fb1ed95608cdc0b14bd0802c524c7fa2) Update demo.md with references to latest release - * [0e67b8616](https://github.com/argoproj/argo-workflows/commit/0e67b8616444cf637d5b68e58eb6e068b721d34c) Add 'release' make target. Improve CLI help and set version from git tag. Uninstaller for UI - * [8ab1d2e93](https://github.com/argoproj/argo-workflows/commit/8ab1d2e93ff969a1a01a06dcc3ac4aa04d3514aa) Install argo ui along with argo workflow controller (#540) - * [f4af881e5](https://github.com/argoproj/argo-workflows/commit/f4af881e55cff12888867bca9dff940c1bb16c26) Add make command to build argo ui (#539) - * [5bb858145](https://github.com/argoproj/argo-workflows/commit/5bb858145e1c603494d8202927197d38b121311a) Add example description in YAML. - * [fc23fcdae](https://github.com/argoproj/argo-workflows/commit/fc23fcdaebc9049748d57ab178517d18eed4af7d) edit example README - * [8dd294aa0](https://github.com/argoproj/argo-workflows/commit/8dd294aa003ee1ffaa70cd7735b7d62c069eeb0f) Add example of GIF processing using ImageMagick - * [ef8e9d5c2](https://github.com/argoproj/argo-workflows/commit/ef8e9d5c234b1f889c4a2accbc9f24d58ce553b9) Implement loader (#537) - * [2ac37361e](https://github.com/argoproj/argo-workflows/commit/2ac37361e6620b37af09cd3e50ecc0fb3fb62a12) Allow specifying CRD version (#536) - * [15b5542d7](https://github.com/argoproj/argo-workflows/commit/15b5542d7cff2b0812830b16bcc5ae490ecc7302) Installer was not using the argo serviceaccount with the workflow-controller deployment. Make progress messages consistent - * [f1471347d](https://github.com/argoproj/argo-workflows/commit/f1471347d96838e0e13e47d0bc7fc04b3018d6f7) Add Yaml viewer (#535) - * [685a576bd](https://github.com/argoproj/argo-workflows/commit/685a576bd28bb269d727a10bf617bd1b08ea4ff0) Fix Gopkg.lock file following rewrite of git history at github.com/minio/go-homedir - * [01ab3076f](https://github.com/argoproj/argo-workflows/commit/01ab3076fe68ef62a9e3cc89b0e367cbdb64ff37) Delete clusterRoleBinding and serviceAccount. - * [7bb99ae71](https://github.com/argoproj/argo-workflows/commit/7bb99ae713da51c9b9818027066f7ddd8efb92bb) Rename references from v1 to v1alpha1 in YAML - * [323439135](https://github.com/argoproj/argo-workflows/commit/3234391356ae0eaf88d348b564828c2df754a49e) Implement step artifacts tab (#534) - * [b2a58dad9](https://github.com/argoproj/argo-workflows/commit/b2a58dad98942ad06b0431968be00ebe588818ff) Workflow list (#533) - * [5dd1754b4](https://github.com/argoproj/argo-workflows/commit/5dd1754b4a41c7951829dbbd8e70a244cf627331) Guard controller from informer sending non workflow/pod objects (#505) - * [59e31c60f](https://github.com/argoproj/argo-workflows/commit/59e31c60f8675c2c678c50e9694ee993691b6e6a) Enable resync period in workflow/pod informers (resolves #532) - * [d5b06dcd4](https://github.com/argoproj/argo-workflows/commit/d5b06dcd4e52270a24f4f3b19497b9a9afaed4e9) Significantly increase efficiency of workflow control loop (resolves #505) - * [4b2098ee2](https://github.com/argoproj/argo-workflows/commit/4b2098ee271301eca52403e769f82f6d717400af) finished walkthrough sections - * [eb7292b02](https://github.com/argoproj/argo-workflows/commit/eb7292b02414ef6faca4f424f6b04ea444abb0e0) walkthrough - * [82b1c7d97](https://github.com/argoproj/argo-workflows/commit/82b1c7d97536baac7514d7cfe72d1be9309bef43) Add -o wide option to `argo get` to display artifacts and durations (resolves #526) - * [3427955d3](https://github.com/argoproj/argo-workflows/commit/3427955d35bf6babc0bfee958a2eb417553ed203) Use PATCH api from k8s go SDK for annotating/labeling pods - * [4842bbbc7](https://github.com/argoproj/argo-workflows/commit/4842bbbc7e40340de12c788cc770eaa811431818) Add support for nodeSelector at both the workflow and step level (resolves #458) - * [424fba5d4](https://github.com/argoproj/argo-workflows/commit/424fba5d4c26c448c8c8131b89113c4c5fbae08d) Rename apiVersion of workflows from v1 to v1alpha1 (resolves #517) - * [5286728a9](https://github.com/argoproj/argo-workflows/commit/5286728a98236c5a8883850389d286d67549966e) Propogate executor errors back to controller. Add error column in `argo get` (#522) - * [32b5e99bb](https://github.com/argoproj/argo-workflows/commit/32b5e99bb194e27a8a35d1d7e1378dd749cc546f) Simplify executor commands to just 'init' and 'wait'. Improve volumes examples - * [e2bfbc127](https://github.com/argoproj/argo-workflows/commit/e2bfbc127d03f5ef20763fe8a917c82e3f06638d) Update controller config automatically on configmap updates resolves #461 - * [c09b13f21](https://github.com/argoproj/argo-workflows/commit/c09b13f21eaec4bb78c040134a728d8e021b4d1e) Workflow validation detects when arguments were not supplied (#515) - * [705193d05](https://github.com/argoproj/argo-workflows/commit/705193d053cb8c0c799a0f636fc899e8b7f55bcc) Proper message for non-zero exits from main container. Indicate an Error phase/message when failing to load/save artifacts - * [e69b75101](https://github.com/argoproj/argo-workflows/commit/e69b7510196daba3a87dca0c8a9677abd8d74675) Update page title and favicon (#519) - * [4330232f5](https://github.com/argoproj/argo-workflows/commit/4330232f51d404a7546cf24b4b0eb608bf3113f5) Render workflow steps on workflow list page (#518) - * [87c447eaf](https://github.com/argoproj/argo-workflows/commit/87c447eaf2ca2230e9b24d6af38f3a0fd3c520c3) Implement kube api proxy. Add workflow node logs tab (#511) - * [0ab268837](https://github.com/argoproj/argo-workflows/commit/0ab268837cff2a1fd464673a45c3736178917be5) Rework/rename Makefile targets. Bake in image namespace/tag set during build, as part of argo install - * [3f13f5cab](https://github.com/argoproj/argo-workflows/commit/3f13f5cabe9dc54c7fbaddf7b0cfbcf91c3f26a7) Support for overriding/supplying entrypoint and parameters via argo CLI. Update examples - * [6f9f2adcd](https://github.com/argoproj/argo-workflows/commit/6f9f2adcd017954a72b2b867e6bc2bcba18972af) Support ListOptions in the WorkflowClient. Add flag to delete completed workflows - * [30d7fba12](https://github.com/argoproj/argo-workflows/commit/30d7fba1205e7f0b4318d6b03064ee647d16ce59) Check Kubernetes version. - * [a3909273c](https://github.com/argoproj/argo-workflows/commit/a3909273c435b23de865089b82b712e4d670a4ff) Give proper error for unamed steps - * [eed54f573](https://github.com/argoproj/argo-workflows/commit/eed54f5732a61922f6daff9e35073b33c1dc068e) Harden the IsURL check - * [bfa62afd8](https://github.com/argoproj/argo-workflows/commit/bfa62afd857704c53aef32f5ade7df86cf2c0769) Add phase,completed fields to workflow labels. Add startedAt,finishedAt,phase,message to workflow.status - * [9347619c7](https://github.com/argoproj/argo-workflows/commit/9347619c7c125950a9f17acfbd92a1286bca1a57) Create serviceAccount & roleBinding if necessary. - * [205e5cbce](https://github.com/argoproj/argo-workflows/commit/205e5cbce20a6e5e73c977f1e775671a19bf4434) Introduce 'completed' pod label and label selector so controller can ignore completed pods - * [199dbcbf1](https://github.com/argoproj/argo-workflows/commit/199dbcbf1c3fa2fd452e5c36035d0f0ae8cdde42) 476 jobs list page (#501) - * [058792945](https://github.com/argoproj/argo-workflows/commit/0587929453ac10d7318a91f2243aece08fe84129) Implement workflow tree tab draft (#494) - * [a2f034a06](https://github.com/argoproj/argo-workflows/commit/a2f034a063b30b0bb5d9e0f670a8bb38560880b4) Proper error reporting. Add message, startedAt, finishedAt to NodeStatus. Rename status to phase - * [645fedcaf](https://github.com/argoproj/argo-workflows/commit/645fedcaf532e052ef0bfc64cb56bfb3307479dd) Support loop step expansion from input parameters and previous step results - * [75c1c4822](https://github.com/argoproj/argo-workflows/commit/75c1c4822b4037176aa6d3702a5cf4eee590c7b7) Help page v2 (#492) - * [a4af6702d](https://github.com/argoproj/argo-workflows/commit/a4af6702d526e775c0aa31ee3612328e5d058c2b) Basic state of navigation, top-bar, tooltip for UI v2 (#491) - * [726e9fa09](https://github.com/argoproj/argo-workflows/commit/726e9fa0953fe91eb0401727743a04c8a02668ef) moved the service acct note - * [3a4cd9c4b](https://github.com/argoproj/argo-workflows/commit/3a4cd9c4ba46f586a3d26fbe017d4d3002e6b671) 477 job details page (#488) - * [8ba7b55cb](https://github.com/argoproj/argo-workflows/commit/8ba7b55cb59173ff7470be3451cd38333539b182) Edited the instructions - * [1e9dbdbab](https://github.com/argoproj/argo-workflows/commit/1e9dbdbabbe354f9798162854dd7d6ae4aa8539a) Added influxdb-ci example - * [bd5c0baad](https://github.com/argoproj/argo-workflows/commit/bd5c0baad83328f13f25ba59e15a5f607d2fb9eb) Added comment for entrypoint field - * [2fbecdf04](https://github.com/argoproj/argo-workflows/commit/2fbecdf0484a9e3c0d9242bdd7286f83b6e771eb) Argo V2 UI initial commit (#474) - * [9ce201230](https://github.com/argoproj/argo-workflows/commit/9ce2012303aa30623336f0dde72ad9b80a5409e3) added artifacts - * [caaa32a6b](https://github.com/argoproj/argo-workflows/commit/caaa32a6b3c28c4f5a43514799b26528b55197ee) Minor edit - * [ae72b5838](https://github.com/argoproj/argo-workflows/commit/ae72b583852e43f616d4c021a4e5646235d4c0b4) added more argo/kubectl examples - * [8df393ed7](https://github.com/argoproj/argo-workflows/commit/8df393ed78d1e4353ee30ba02cec0b12daea7eb0) added 2.0 - * [9e3a51b14](https://github.com/argoproj/argo-workflows/commit/9e3a51b14d78c3622543429a500a7d0367b10787) Update demo.md to have better instructions to restart controller after configuration changes - * [ba9f9277a](https://github.com/argoproj/argo-workflows/commit/ba9f9277a4a9a153a6f5b19862a73364f618e5cd) Add demo markdown file. Delete old demo.txt - * [d8de40bb1](https://github.com/argoproj/argo-workflows/commit/d8de40bb14167f30b17de81d6162d633a62e7a0d) added 2.0 - * [6c617599b](https://github.com/argoproj/argo-workflows/commit/6c617599bf4c91ccd3355068967824c1e8d7c107) added 2.0 - * [32af692ee](https://github.com/argoproj/argo-workflows/commit/32af692eeec765b13ee3d2b4ede9f5ff45527b4c) added 2.0 - * [802940be0](https://github.com/argoproj/argo-workflows/commit/802940be0d4ffd5048dd5307b97af442d82e9a83) added 2.0 - * [1d4434155](https://github.com/argoproj/argo-workflows/commit/1d44341553d95ac8192d4a80e178a9d72558829a) added new png - * [1069af4f3](https://github.com/argoproj/argo-workflows/commit/1069af4f3f12bae0e7c33e557ef479203d4adb7c) Support submission of manifests via URL - * [cc1f0caf7](https://github.com/argoproj/argo-workflows/commit/cc1f0caf72bb5e10b7ea087294bf48d0c1215c47) Add recursive coinflip example - * [90f37ad63](https://github.com/argoproj/argo-workflows/commit/90f37ad63f37500a7b661960ccb8367866054c51) Support configuration of the controller to match specified labels - * [f9c9673ac](https://github.com/argoproj/argo-workflows/commit/f9c9673ac8f7dd84eb249e02358ad13ab0a9849f) Filter non-workflow related pods in the controller's pod watch - * [9555a472b](https://github.com/argoproj/argo-workflows/commit/9555a472ba76d63ed4862c1ef2bb78dbc0d1cac3) Add install notes to support cluster with legacy authentication disabled. Add option to specify service account - * [837e0a2b5](https://github.com/argoproj/argo-workflows/commit/837e0a2b5e254218774579a1a9acfdba8af4aad2) Propogate deletion of controller replicaset/pod during uninstall - * [5a7fcec08](https://github.com/argoproj/argo-workflows/commit/5a7fcec08b86c8c618c5006a2299aa2d75441fab) Add parameter passing example yaml - * [2a34709da](https://github.com/argoproj/argo-workflows/commit/2a34709da544c77587438b22f41abd14b3fe004a) Passthrough --namespace flag to `kubectl logs` - * [3fc6af004](https://github.com/argoproj/argo-workflows/commit/3fc6af0046291e9020db496d072d4d702c02550a) Adding passing parameter example yaml - * [e275bd5ac](https://github.com/argoproj/argo-workflows/commit/e275bd5ac52872f5a940085759683c073fcfa021) Add support for output as parameter - * [5ee1819c7](https://github.com/argoproj/argo-workflows/commit/5ee1819c78e65a2686dbc9fc4d66622cfcbdad9c) Restore and formalize sidecar kill functionality as part of executor - * [dec978911](https://github.com/argoproj/argo-workflows/commit/dec9789115c0b659c3a838ba1d75ea6ee4dfa350) Proper workflow manifest validation during `argo lint` and `argo submit` - * [6ab0b6101](https://github.com/argoproj/argo-workflows/commit/6ab0b610170ae370bde53c62c38a7e6f707c09eb) Uninstall support via `argo uninstall` - * [3ba84082a](https://github.com/argoproj/argo-workflows/commit/3ba84082a80a55abff9bfcd9a29e5444c89eab61) Adding sidecar container - * [dba29bd9d](https://github.com/argoproj/argo-workflows/commit/dba29bd9dec34aa779d53b68206f4cf414c916bc) Support GCP - * [f30491056](https://github.com/argoproj/argo-workflows/commit/f3049105664999ec29e955c9ac73c8bd1dfd6730) Proper controller support for running workflows in arbitrary namespaces. Install controller into kube-system namespace by default - * [ffb3d1280](https://github.com/argoproj/argo-workflows/commit/ffb3d128070f2c6961d20ba2ea3c0d64f760b1bb) Add support for controller installation via `argo install` and demo instructions - * [dcfb27521](https://github.com/argoproj/argo-workflows/commit/dcfb2752172ad8c79da97a5a35895eb62f0d52eb) Add `argo delete` command to delete workflows - * [8e583afb0](https://github.com/argoproj/argo-workflows/commit/8e583afb0a2161d3565651abb1cf7d76d50af861) Add `argo logs` command as a convenience wrapper around `kubectl logs` - * [368193d50](https://github.com/argoproj/argo-workflows/commit/368193d5002cb2d50b02e397e2b98e09b427227c) Add argo `submit`, `list`, `get`, `lint` commands - * [8ef7a131c](https://github.com/argoproj/argo-workflows/commit/8ef7a131c966c080c8651de7bb08424e501f1c3d) Executor to load script source code as an artifact to main. Remove controller hack - * [736c5ec64](https://github.com/argoproj/argo-workflows/commit/736c5ec64930df2e25ee7698db9c04044c53ba6c) Annotate pod with outputs. Properly handle tar/gz of artifacts - * [cd415c9d5](https://github.com/argoproj/argo-workflows/commit/cd415c9d56fdd211405c7e5a20789e5f37b049db) Introduce Template.ArchiveLocation to store all related step level artifacts to a job, understood by executor - * [4241cacea](https://github.com/argoproj/argo-workflows/commit/4241cacea3f272146192c90322c9f780f55ef717) Support for saving s3 output artifacts - * [cd3a3f1e5](https://github.com/argoproj/argo-workflows/commit/cd3a3f1e57194fe61634a845ddee0be84b446cde) Bind mount docker.sock to wait container to use `docker wait` and `docker cp` - * [77d64a66a](https://github.com/argoproj/argo-workflows/commit/77d64a66a91e3cd39230714b355374a3d72d5233) Support the case where an input artifact path overlaps with a container volume mount - * [6a54b31f3](https://github.com/argoproj/argo-workflows/commit/6a54b31f3619e26fb5fcb98f897eed5392e546bd) Support for automatic termination for daemoned workflow steps - * [2435e6f75](https://github.com/argoproj/argo-workflows/commit/2435e6f75a94565217423d244a75170c47115cb8) Track children node IDs in workflow status nodes - * [227c19616](https://github.com/argoproj/argo-workflows/commit/227c19616fc1ebd1567cf483107d9323e04a6cc7) Initial support for daemon workflow steps (no termination yet) - * [738b02d20](https://github.com/argoproj/argo-workflows/commit/738b02d20495c06ee63b63261fae2b9e815fe578) Support for git/http input artifacts. hack together wait container logic as a shell script - * [de71cb5ba](https://github.com/argoproj/argo-workflows/commit/de71cb5baccff313d8aa372876f79ab1f8044921) Change according to jesse's comments - * [621d7ca98](https://github.com/argoproj/argo-workflows/commit/621d7ca98649feaacfdfd3a531f9ed45cd07a86c) Argo Executor init container - * [efe439270](https://github.com/argoproj/argo-workflows/commit/efe439270af68cd1cef44d7b6874f0ef0f195d9d) Switch representation of parallel steps as a list instead of a map. update examples - * [56ca947bb](https://github.com/argoproj/argo-workflows/commit/56ca947bb57fee22b19f3046873ab771a8637859) Introduce ability to add sidecars to run adjacent to workflow steps - * [b4d777017](https://github.com/argoproj/argo-workflows/commit/b4d777017c5bdd87db1b004aa8623c213acd3840) Controller support for overlapping artifact path to user specified volume mountPaths - * [3782badea](https://github.com/argoproj/argo-workflows/commit/3782badead84caff944dbe2bfc3a4f53b3113dd4) Get coinflip example to function - * [065a8f77f](https://github.com/argoproj/argo-workflows/commit/065a8f77f5f84bc4e9f5ddacc3fb630a5ea86d0b) Get python script example to function - * [8973204a5](https://github.com/argoproj/argo-workflows/commit/8973204a5a7f88b91b99f711c7e175be20f6dfc6) Move to list style of inputs and arguments (vs. maps). Reuse artifact datastructure - * [d98387496](https://github.com/argoproj/argo-workflows/commit/d983874969d40058fa7ca648d5bf17f11ea8c0fb) Improve example yamls - * [f83b26202](https://github.com/argoproj/argo-workflows/commit/f83b26202d4b896e9ac13e8d93109df3a3bc0c82) Support for volumeClaimTemplates (ephemeral volumes) in workflows - * [be3ad92e0](https://github.com/argoproj/argo-workflows/commit/be3ad92e0c420f22abb306eff33f85b2bbf6bffb) Support for using volumes within multiple steps in a workflow - * [4b4dc4a31](https://github.com/argoproj/argo-workflows/commit/4b4dc4a315a4b36f077a6bcc9647f04be5a083cb) Copy outputs from pod metadata to workflow node outputs - * [07f2c9654](https://github.com/argoproj/argo-workflows/commit/07f2c9654481d52869df41466aead42220765582) Initial support for conditionals as 'when' field in workflow step - * [fe639edd6](https://github.com/argoproj/argo-workflows/commit/fe639edd6dbbdb4a0405d8449cc2b9aa7bbc9dc0) Controller support for "script" templates (workflow step as code) - * [a896f03e9](https://github.com/argoproj/argo-workflows/commit/a896f03e9daf0bdd466ebe21e42ac5af37dc580c) Add example yamls for proposal for scripting steps - * [c782e2e1b](https://github.com/argoproj/argo-workflows/commit/c782e2e1b8ef9dcd1b2fc30d4d1f834ca2a22c70) Support looping with item maps - * [7dc58fce0](https://github.com/argoproj/argo-workflows/commit/7dc58fce04b45c49df953b90971e3138311c3106) Initial withItems loop support - * [f3010c1da](https://github.com/argoproj/argo-workflows/commit/f3010c1da94be33712941c7cba0a6820d4ffd762) Support for argument passing and substitution in templates - * [5e8ba8701](https://github.com/argoproj/argo-workflows/commit/5e8ba8701993bb3a1c86317d641ab5c98d69c0bf) Split individual workflow operation logic from controller - * [63a2c20c2](https://github.com/argoproj/argo-workflows/commit/63a2c20c20b1adfc6b3082a341faa72127ab84fd) Introduce sirupsen/logrus logging package - * [2058342f7](https://github.com/argoproj/argo-workflows/commit/2058342f7f8a48337f7dce8e45c22a1fed71babe) Annotate the template used by executor to include destination artifact information - * [52f8db217](https://github.com/argoproj/argo-workflows/commit/52f8db217581fde487c21dee09821d2c27878d0f) Sync workflow controller configuration from a configmap. Add config validation - * [d0a1748af](https://github.com/argoproj/argo-workflows/commit/d0a1748afa3c69886a55408d72024fdcecf25c97) Upgrade to golang 1.9.1. Get `make lint` target to function - * [ac58d8325](https://github.com/argoproj/argo-workflows/commit/ac58d8325fc253af0cd00e0d397a5ab60ade5188) Speed up rebuilds from within build container by bind mounting $GOPATH/pkg:/root/go/pkg - * [714456753](https://github.com/argoproj/argo-workflows/commit/714456753ae81e62f4cf3a563eed20d1b0d1be1a) Add installation manifests. Initial stubs for controller configuration - * [103720917](https://github.com/argoproj/argo-workflows/commit/103720917b689713ba9b963d00e4578fd6d21fb2) Introduce s3, git, http artifact sources in inputs.artifacts - * [a68001d31](https://github.com/argoproj/argo-workflows/commit/a68001d31fc4c2d55686a29abe7ace8f0bdf4644) Add debug tools to argoexec image. Remove privileged mode from sidekick. Disable linting - * [dc530232d](https://github.com/argoproj/argo-workflows/commit/dc530232d4595feb0ad01ef45a25bfec23db43a8) Create shared 'input-artifacts' volume and mount between init/main container - * [6ba84eb52](https://github.com/argoproj/argo-workflows/commit/6ba84eb5285efcacd1f460e11892bce175246799) Expose various pod metadata to argoexec via K8s downward API - * [1fc079de2](https://github.com/argoproj/argo-workflows/commit/1fc079de2fddf992e8d42abf3fe0e556ae7973c2) Add `argo yaml validate` command and `argoexec artifacts load` stub - * [9125058db](https://github.com/argoproj/argo-workflows/commit/9125058db7c3c45b907c767d040867b3e9c37063) Include scheduling of argoexec (init and sidekick) containers to the user's main - * [67f8353a0](https://github.com/argoproj/argo-workflows/commit/67f8353a045c6fcb713f8b6f534e1caf6fee2be2) Initial workflow operator logic - * [8137021ad](https://github.com/argoproj/argo-workflows/commit/8137021adc20adbb39debbbcdb41332eed7a5451) Reorganize all CLIs into a separate dir. Add stubs for executor and apiserver - * [74baac717](https://github.com/argoproj/argo-workflows/commit/74baac71754937c4f934be5321a8c24d172a5142) Introduce Argo errors package - * [37b7de800](https://github.com/argoproj/argo-workflows/commit/37b7de8008ab299e6db3d4616bac2d8af0bcb0fc) Add apiserver skeleton - * [3ed1dfeb0](https://github.com/argoproj/argo-workflows/commit/3ed1dfeb073829d3c4f92b95c9a74118caaec1b4) Initial project structure. CLI and Workflow CRD skeleton - -### Contributors - - * Alexander Matyushentsev - * Anshuman Bhartiya - * David Kale - * Ed Lee - * Edward Lee - * Javier Castellanos - * Jesse Suen - * Matt Hillsdon - * Rafal - * Rahul Dhide - * Rhys Parry - * Sandeep Bhojwani - * Shri Javadekar - * Tianhe Zhang - * Wojciech Kalemba - * cyee88 - * gaganapplatix - * mukulikak - -## v0.4.7 (2018-06-07) - - * [e4d0bd392](https://github.com/argoproj/argo-workflows/commit/e4d0bd3926d02fe3e89d6d9b8a02ecbb2db91eff) Take into account number of unavailable replicas to decided if deployment is healthy or not (#270) - * [18dc82d14](https://github.com/argoproj/argo-workflows/commit/18dc82d14d240485a266350c182560e2d2700ada) Remove hard requirement of initializing OIDC app during server startup (resolves #272) - * [e720abb58](https://github.com/argoproj/argo-workflows/commit/e720abb58b43f134518ce30239c2a4533effdbc7) Bump version to v0.4.7 - * [a2e9a9ee4](https://github.com/argoproj/argo-workflows/commit/a2e9a9ee49052dce05dc9718240dfb8202e5b2c2) Repo names containing underscores were not being accepted (resolves #258) - -### Contributors - - * Alexander Matyushentsev - * Jesse Suen - -## v0.4.6 (2018-06-06) - - * [cf3776903](https://github.com/argoproj/argo-workflows/commit/cf3776903d8d52af9c656c740601e53947d79609) Retry `argocd app wait` connection errors from EOF watch. Show detailed state changes - -### Contributors - - * Jesse Suen - -## v0.4.5 (2018-05-31) - - * [3acca5095](https://github.com/argoproj/argo-workflows/commit/3acca5095e1bdd028dfd0424abdeb3e5b3036b2d) Add `argocd app unset` command to unset parameter overrides. Bump version to v0.4.5 - * [5a6228612](https://github.com/argoproj/argo-workflows/commit/5a622861273da8ccf27bcfd12471b8a377e558e6) Cookie token was not parsed properly when mixed with other site cookies - -### Contributors - - * Jesse Suen - -## v0.4.4 (2018-05-30) - - * [5452aff0b](https://github.com/argoproj/argo-workflows/commit/5452aff0bebdbba3990f1cc2e300f6f37f634b8b) Add ability to show parameters and overrides in CLI (resolves #240) (#247) - * [0f4f1262a](https://github.com/argoproj/argo-workflows/commit/0f4f1262af8837748da06fdcc9accf4ced273dfd) Add Events API endpoint (#237) - * [4e7f68ccb](https://github.com/argoproj/argo-workflows/commit/4e7f68ccbae9b362178bcdaafc1c0c29fcc1ef19) Update version to 0.4.4 - * [96c05babe](https://github.com/argoproj/argo-workflows/commit/96c05babe026b998fb80033c76594585b869c8a2) Issue #238 - add upsert flag to 'argocd app create' command (#245) - * [6b78cddb1](https://github.com/argoproj/argo-workflows/commit/6b78cddb1921dad6c3f0fe53c85c51711ba8b2de) Add repo browsing endpoint (#229) - * [12596ff93](https://github.com/argoproj/argo-workflows/commit/12596ff9360366afbadfcd366586318b74e410ca) Issue #233 - Controller does not persist rollback operation result (#234) - * [a240f1b2b](https://github.com/argoproj/argo-workflows/commit/a240f1b2b9e7d870d556fb4420016852a733b9c5) Bump version to 0.5.0 - * [f6da19672](https://github.com/argoproj/argo-workflows/commit/f6da19672e6388ae481dc72b32703973c0ebe921) Support subscribing to settings updates and auto-restart of dex and API server (resolves #174) (#227) - * [e81d30be9](https://github.com/argoproj/argo-workflows/commit/e81d30be9b378312d626a3b5034f2f4d2d1f70d5) Update getting_started.md to point to v0.4.3 - * [13b090e3b](https://github.com/argoproj/argo-workflows/commit/13b090e3bd96dc984bc266c49c536511dff793d1) Issue #147 - App sync frequently fails due to concurrent app modification (#226) - * [d0479e6dd](https://github.com/argoproj/argo-workflows/commit/d0479e6ddcba5fe66ed2137935bcec51dedb4f27) Issue # 223 - Remove app finalizers during e2e fixture teardown (#225) - * [143282700](https://github.com/argoproj/argo-workflows/commit/1432827006855aa526966de93c88551ce049b5ce) Add error fields to cluster/repo, shell output (#200) - -### Contributors - - * Alexander Matyushentsev - * Andrew Merenbach - * Jesse Suen - -## v0.4.3 (2018-05-21) - - * [89bf4eac7](https://github.com/argoproj/argo-workflows/commit/89bf4eac7105ced9279203b7085f07ac76a13ee5) Bump version to 0.4.3 - * [07aac0bda](https://github.com/argoproj/argo-workflows/commit/07aac0bdae285201e36e73b88bd16f2318a04be8) Move local branch deletion as part of git Reset() (resolves #185) (#222) - * [61220b8d0](https://github.com/argoproj/argo-workflows/commit/61220b8d0d5b217866e5c2fa6f6d739eea234225) Fix exit code for app wait (#219) - -### Contributors - - * Andrew Merenbach - * Jesse Suen - -## v0.4.2 (2018-05-21) - - * [4e470aaf0](https://github.com/argoproj/argo-workflows/commit/4e470aaf096b7acadf646063781af811168276ea) Remove context name prompt during login. (#218) - * [76922b620](https://github.com/argoproj/argo-workflows/commit/76922b620b295897f8f86416cea1b41d558a0d24) Update version to 0.4.2 - -### Contributors - - * Jesse Suen - -## v0.4.1 (2018-05-18) - - * [ac0f623ed](https://github.com/argoproj/argo-workflows/commit/ac0f623eda0cd7d6adb5f8be8655a22e910a120d) Add `argocd app wait` command (#216) - * [afd545088](https://github.com/argoproj/argo-workflows/commit/afd5450882960f4f723197e56ea7c67dc65b8d10) Bump version to v0.4.1 - * [c17266fc2](https://github.com/argoproj/argo-workflows/commit/c17266fc2173246775cecfb6625d6d60eac2d2b8) Add documentation on how to configure SSO and Webhooks - * [f62c82549](https://github.com/argoproj/argo-workflows/commit/f62c825495211a738d11f9e95e1aec59a5031be0) Manifest endpoint (#207) - * [45f44dd4b](https://github.com/argoproj/argo-workflows/commit/45f44dd4be375002300f96386ffb3383c2119ff8) Add v0.4.0 changelog - * [9c0daebfe](https://github.com/argoproj/argo-workflows/commit/9c0daebfe088a1ac5145417df14d11769f266e82) Fix diff falsely reporting OutOfSync due to namespace/annotation defaulting - * [f2a0ca560](https://github.com/argoproj/argo-workflows/commit/f2a0ca560971680e21b20645d62462a29ac25721) Add intelligence in diff libray to perform three-way diff from last-applied-configuration annotation (resolves #199) - * [e04d31585](https://github.com/argoproj/argo-workflows/commit/e04d315853ec9ed25d8359136d6141e821fae5e1) Issue #118 - app delete should be done through controller using finalizers (#206) - * [daec69765](https://github.com/argoproj/argo-workflows/commit/daec697658352b9a607f5d4cc777eae24db0ed33) Update ksonnet to v0.10.2 (resolves #208) - * [7ad567071](https://github.com/argoproj/argo-workflows/commit/7ad56707102a31d64214f8fb47ab840fd2550826) Make sure api server started during fixture setup (#209) - * [803642337](https://github.com/argoproj/argo-workflows/commit/8036423373e79b48a52a34bd524f1cdf8bf2fd46) Implement App management and repo management e2e tests (#205) - * [8039228a9](https://github.com/argoproj/argo-workflows/commit/8039228a9d31a445461231de172425e911e9eaea) Add last update time to operation status, fix operation status patching (#204) - * [b1103af42](https://github.com/argoproj/argo-workflows/commit/b1103af4290e6e6134f2d4f62df32f90aa8448d5) Rename recent deployments to history (#201) - * [d67ad5acf](https://github.com/argoproj/argo-workflows/commit/d67ad5acfd598712c153f14a1c7946759dbc733c) Add connect timeouts when interacting with SSH git repos (resolves #131) (#203) - * [c9df9c17b](https://github.com/argoproj/argo-workflows/commit/c9df9c17b77688ac5725a9fa00222006a5fd9f4f) Default Spec.Source.TargetRevision to HEAD server-side if unspecified (issue #190) - * [8fa46b02b](https://github.com/argoproj/argo-workflows/commit/8fa46b02b0784a9922aa93be5896e65732a1729d) Remove SyncPolicy (issue #190) - * [92c481330](https://github.com/argoproj/argo-workflows/commit/92c481330d655697a6630813b63617de6789f403) App creation was not defaulting to server and namespace defined in app.yaml - * [2664db3e4](https://github.com/argoproj/argo-workflows/commit/2664db3e4072b96176d286f7a91f03d08e5cc715) Refactor application controller sync/apply loop (#202) - * [6b554e5f4](https://github.com/argoproj/argo-workflows/commit/6b554e5f4efa3473be217ebbcaf89acb22ded628) Add 0.3.0 to 0.4.0 migration utility (#186) - * [2bc0dff13](https://github.com/argoproj/argo-workflows/commit/2bc0dff1359031cc335769c3a742987cb1c4e7ba) Issue #146 - Render health status information in 'app list' and 'app get' commands (#198) - * [c61795f71](https://github.com/argoproj/argo-workflows/commit/c61795f71afd5705d75a4377c9265023aa7cec2c) Add 'database' library for CRUD operations against repos and clusters. Redact sensitive information (#196) - * [a8a7491bf](https://github.com/argoproj/argo-workflows/commit/a8a7491bf0b0534bbf63c08291a4966aa81403fa) Handle potential panic when `argo install settings` run against an empty configmap - -### Contributors - - * Alexander Matyushentsev - * Andrew Merenbach - * Jesse Suen - -## v0.4.0-alpha1 (2018-05-11) - - -### Contributors - - -## v0.4.0 (2018-05-17) - - * [9c0daebfe](https://github.com/argoproj/argo-workflows/commit/9c0daebfe088a1ac5145417df14d11769f266e82) Fix diff falsely reporting OutOfSync due to namespace/annotation defaulting - * [f2a0ca560](https://github.com/argoproj/argo-workflows/commit/f2a0ca560971680e21b20645d62462a29ac25721) Add intelligence in diff libray to perform three-way diff from last-applied-configuration annotation (resolves #199) - * [e04d31585](https://github.com/argoproj/argo-workflows/commit/e04d315853ec9ed25d8359136d6141e821fae5e1) Issue #118 - app delete should be done through controller using finalizers (#206) - * [daec69765](https://github.com/argoproj/argo-workflows/commit/daec697658352b9a607f5d4cc777eae24db0ed33) Update ksonnet to v0.10.2 (resolves #208) - * [7ad567071](https://github.com/argoproj/argo-workflows/commit/7ad56707102a31d64214f8fb47ab840fd2550826) Make sure api server started during fixture setup (#209) - * [803642337](https://github.com/argoproj/argo-workflows/commit/8036423373e79b48a52a34bd524f1cdf8bf2fd46) Implement App management and repo management e2e tests (#205) - * [8039228a9](https://github.com/argoproj/argo-workflows/commit/8039228a9d31a445461231de172425e911e9eaea) Add last update time to operation status, fix operation status patching (#204) - * [b1103af42](https://github.com/argoproj/argo-workflows/commit/b1103af4290e6e6134f2d4f62df32f90aa8448d5) Rename recent deployments to history (#201) - * [d67ad5acf](https://github.com/argoproj/argo-workflows/commit/d67ad5acfd598712c153f14a1c7946759dbc733c) Add connect timeouts when interacting with SSH git repos (resolves #131) (#203) - * [c9df9c17b](https://github.com/argoproj/argo-workflows/commit/c9df9c17b77688ac5725a9fa00222006a5fd9f4f) Default Spec.Source.TargetRevision to HEAD server-side if unspecified (issue #190) - * [8fa46b02b](https://github.com/argoproj/argo-workflows/commit/8fa46b02b0784a9922aa93be5896e65732a1729d) Remove SyncPolicy (issue #190) - * [92c481330](https://github.com/argoproj/argo-workflows/commit/92c481330d655697a6630813b63617de6789f403) App creation was not defaulting to server and namespace defined in app.yaml - * [2664db3e4](https://github.com/argoproj/argo-workflows/commit/2664db3e4072b96176d286f7a91f03d08e5cc715) Refactor application controller sync/apply loop (#202) - * [6b554e5f4](https://github.com/argoproj/argo-workflows/commit/6b554e5f4efa3473be217ebbcaf89acb22ded628) Add 0.3.0 to 0.4.0 migration utility (#186) - * [2bc0dff13](https://github.com/argoproj/argo-workflows/commit/2bc0dff1359031cc335769c3a742987cb1c4e7ba) Issue #146 - Render health status information in 'app list' and 'app get' commands (#198) - * [c61795f71](https://github.com/argoproj/argo-workflows/commit/c61795f71afd5705d75a4377c9265023aa7cec2c) Add 'database' library for CRUD operations against repos and clusters. Redact sensitive information (#196) - * [a8a7491bf](https://github.com/argoproj/argo-workflows/commit/a8a7491bf0b0534bbf63c08291a4966aa81403fa) Handle potential panic when `argo install settings` run against an empty configmap - * [d1c7c4fca](https://github.com/argoproj/argo-workflows/commit/d1c7c4fcafb66bac6553247d16a03863d25910e6) Issue #187 - implement `argo settings install` command (#193) - * [3dbbcf891](https://github.com/argoproj/argo-workflows/commit/3dbbcf891897f3c3889189016ae1f3fabcddca1f) Move sync logic to contoller (#180) - * [0cfd1ad05](https://github.com/argoproj/argo-workflows/commit/0cfd1ad05fe8ec0c78dfd85ba0f91027522dfe70) Update feature list with SSO and Webhook integration - * [bfa4e233b](https://github.com/argoproj/argo-workflows/commit/bfa4e233b72ef2863d1bfb010ba95fad519a9c43) cli will look to spec.destination.server and namespace when displaying apps - * [dc662da3d](https://github.com/argoproj/argo-workflows/commit/dc662da3d605bd7189ce6c06b0dbc1661d4bf2fb) Support OAuth2 login flow from CLI (resolves #172) (#181) - * [4107d2422](https://github.com/argoproj/argo-workflows/commit/4107d2422bb6331833f360f2cab01eb24500e173) Fix linting errors - * [b83eac5dc](https://github.com/argoproj/argo-workflows/commit/b83eac5dc2f9c026ad07258e4c01d5217e2992fe) Make ApplicationSpec.Destination non-optional, non-pointer (#177) - * [bb51837c5](https://github.com/argoproj/argo-workflows/commit/bb51837c56a82e486d68a350b3b4397ff930ec37) Do not delete namespace or CRD during uninstall unless explicitly stated (resolves #167) (#173) - * [5bbb4fe1a](https://github.com/argoproj/argo-workflows/commit/5bbb4fe1a131ed3380a857af3db5e9d708f3b7f6) Cache kubernetes API resource discovery (resolves #170) (#176) - * [b5c20e9b4](https://github.com/argoproj/argo-workflows/commit/b5c20e9b46ea19b63f3f894d784d5a25b97f0ebb) Trim spaces server-side in GitHub usernames (#171) - * [1e1ab636e](https://github.com/argoproj/argo-workflows/commit/1e1ab636e042da4d5f1ee4e47a01f301d6a458a7) Don't fail when new app has same spec as old (#168) - * [734855389](https://github.com/argoproj/argo-workflows/commit/7348553897af89b9c4366f2d445dd2d96fe4d655) Improve CI build stability - * [5f65a5128](https://github.com/argoproj/argo-workflows/commit/5f65a5128a3fa42f12a60908eee3fa5d11624305) Introduce caching layer to repo server to improve query response times (#165) - * [d9c12e727](https://github.com/argoproj/argo-workflows/commit/d9c12e72719dffaf6951b5fb71e4bae8a8ddda0d) Issue #146 - ArgoCD applications should have a rolled up health status (#164) - * [fb2d6b4af](https://github.com/argoproj/argo-workflows/commit/fb2d6b4afff1ba66880691d188c284a77f6ac99e) Refactor repo server and git client (#163) - * [3f4ec0ab2](https://github.com/argoproj/argo-workflows/commit/3f4ec0ab2263038ba91d3b594b2188fc108fc8d7) Expand Git repo URL normalization (#162) - * [ac938fe8a](https://github.com/argoproj/argo-workflows/commit/ac938fe8a3af46f7aac07d607bfdd0a375e74103) Add GitHub webhook handling to fast-track controller application reprocessing (#160) - * [dc1e8796f](https://github.com/argoproj/argo-workflows/commit/dc1e8796fb40013a7980e8bc18f8b2545c6e6cca) Disable authentication for settings service - * [8c5d59c60](https://github.com/argoproj/argo-workflows/commit/8c5d59c60c679ab6d35f8a6e51337c586dc4fdde) Issue #157 - If argocd token is expired server should return 401 instead of 500 (#158) - -### Contributors - - * Alexander Matyushentsev - * Andrew Merenbach - * Jesse Suen - -## v0.3.3 (2018-05-03) - - * [13558b7ce](https://github.com/argoproj/argo-workflows/commit/13558b7ce8d7bd9f8707a6a18f45af8662b1c60d) Revert change to redact credentials since logic is reused by controller - * [3b2b3dacf](https://github.com/argoproj/argo-workflows/commit/3b2b3dacf50f9b51dde08f1d1e1e757ed30c24a4) Update version - * [1b2f89995](https://github.com/argoproj/argo-workflows/commit/1b2f89995c970eb9fb5fe7bce4ac0253bddb9d7d) Issue #155 - Application update failes due to concurrent access (#156) - * [0479fcdf8](https://github.com/argoproj/argo-workflows/commit/0479fcdf82b1719fd97767ea74509063e9308b0a) Add settings endpoint so frontend can show/hide SSO login button. Rename config to settings (#153) - * [a04465466](https://github.com/argoproj/argo-workflows/commit/a04465466dfa4dc039222732cd9dbb84f9fdb3dd) Add workflow for blue-green deployments (#148) - * [670921df9](https://github.com/argoproj/argo-workflows/commit/670921df902855b209094b59f32ce3e051a847fd) SSO Support (#152) - * [18f7e17d7](https://github.com/argoproj/argo-workflows/commit/18f7e17d7a200a0dd1c8447acc2815981c0093a6) Added OWNERS file - * [a2aede044](https://github.com/argoproj/argo-workflows/commit/a2aede04412380b7853041fbce6dd6d377e483e9) Redact sensitive repo/cluster information upon retrieval (#150) - -### Contributors - - * Alexander Matyushentsev - * Andrew Merenbach - * Edward Lee - * Jesse Suen - -## v0.3.2 (2018-05-01) - - * [1d876c772](https://github.com/argoproj/argo-workflows/commit/1d876c77290bbfc830790bff977c8a65a0432e0c) Fix compilation error - * [70465a052](https://github.com/argoproj/argo-workflows/commit/70465a0520410cd4466d1feb4eb9baac98e94688) Issue #147 - Use patch to update recentDeployments field (#149) - * [3c9845719](https://github.com/argoproj/argo-workflows/commit/3c9845719f643948a5f1be83ee7039e7f33b8c65) Issue #139 - Application sync should delete 'unexpected' resources (#144) - * [a36cc8946](https://github.com/argoproj/argo-workflows/commit/a36cc8946c8479745f63c24df4a9289d70f0a773) Issue #136 - Use custom formatter to get desired state of deployment and service (#145) - * [9567b539d](https://github.com/argoproj/argo-workflows/commit/9567b539d1d2fcb9535cdb7c91f9060a7ac06d8f) Improve comparator to fall back to looking up a resource by name - * [fdf9515de](https://github.com/argoproj/argo-workflows/commit/fdf9515de2826d53f8b138f99c8896fdfa5f919e) Refactor git library: * store credentials in files (instead of encoded in URL) to prevent leakage during git errors * fix issue where HEAD would not track updates from origin/HEAD (resolves #133) * refactor git library to promote code reuse, and remove shell invocations - * [b32023848](https://github.com/argoproj/argo-workflows/commit/b320238487c339186f1e0be5e1bfbb35fa0036a4) ksonnet util was not locating a ksonnet app dir correctly - * [7872a6049](https://github.com/argoproj/argo-workflows/commit/7872a60499ebbda01cd31f859eba8e7209f16b9c) Update ksonnet to v0.10.1 - * [5fea3846d](https://github.com/argoproj/argo-workflows/commit/5fea3846d1c09bca9d0e68f1975598b29b5beb91) Adding clusters should always go through argocd-manager service account creation - * [86a4e0baa](https://github.com/argoproj/argo-workflows/commit/86a4e0baaa8932daeba38ac74535497e773f24b9) RoleBindings do not need to specify service account namespace in subject - * [917f1df25](https://github.com/argoproj/argo-workflows/commit/917f1df250013ec462f0108bfb85b54cb56c53c4) Populated 'unexpected' resources while comparing target and live states (#137) - * [11260f247](https://github.com/argoproj/argo-workflows/commit/11260f24763dab2e2364d8cb4c5789ac046666a8) Don't ask for user credentials if username and password are specified as arguments (#129) - * [38d20d0f0](https://github.com/argoproj/argo-workflows/commit/38d20d0f0406e354c6ca4d9f2776cbb8a322473c) Add `argocd ctx` command for switching between contexts. Better CLI descriptions (resolves #103) - * [938f40e81](https://github.com/argoproj/argo-workflows/commit/938f40e817a44eb1c806102dc90593af2adb5d88) Prompting for repo credentials was accepting newline as username - * [5f9c8b862](https://github.com/argoproj/argo-workflows/commit/5f9c8b862edbcba5d079621f0c4bba0e942add9b) Error properly when server address is unspecified (resolves #128) - * [d96d67bb9](https://github.com/argoproj/argo-workflows/commit/d96d67bb9a4eae425346298d513a1cf52e89da62) Generate a temporary kubeconfig instead of using kubectl flags when applying resources - * [19c3b8767](https://github.com/argoproj/argo-workflows/commit/19c3b876767571257fbadad35971d8f6eecd2d74) Bump version to 0.4.0. `argocd app sync --dry-run` was incorrectly appending items to history (resolves #127) - -### Contributors - - * Alexander Matyushentsev - * Jesse Suen - -## v0.3.1 (2018-04-24) - - * [7d08ab4e2](https://github.com/argoproj/argo-workflows/commit/7d08ab4e2b5028657c6536dc9007ac5b9da13b8d) Bump version to v0.3.1 - * [efea09d21](https://github.com/argoproj/argo-workflows/commit/efea09d2165e35b6b2176fd0ff6f5fcd0c4699e4) Fix linting issue in `app rollback` - * [2adaef547](https://github.com/argoproj/argo-workflows/commit/2adaef547be26b9911676ff048b0ea38d8e87df2) Introduce `argocd app history` and `argocd app rollback` CLI commands (resolves #125) - * [d71bbf0d9](https://github.com/argoproj/argo-workflows/commit/d71bbf0d9a00046622498200754f7ae6639edfc4) Allow overriding server or namespace separately (#126) - * [36b3b2b85](https://github.com/argoproj/argo-workflows/commit/36b3b2b8532142d50c3ada0d8d3cb2328c8a32e4) Switch to gogo/protobuf for golang code generation in order to use gogo extensions - * [63dafa08c](https://github.com/argoproj/argo-workflows/commit/63dafa08ccdef6141f83f26157bd32192c62f052) Issue #110 - Rollback ignores parameter overrides (#117) - * [afddbbe87](https://github.com/argoproj/argo-workflows/commit/afddbbe875863c8d33a85d2d2874f0703153c195) Issue #123 - Create .argocd directory before saving config file (#124) - * [34811cafc](https://github.com/argoproj/argo-workflows/commit/34811cafca3df45952677407ce5458d50f23e0fd) Update download instructions to getting started - -### Contributors - - * Alexander Matyushentsev - * Jesse Suen - -## v0.3.0 (2018-04-23) - - * [8a2851169](https://github.com/argoproj/argo-workflows/commit/8a2851169c84741d774818ec8943a444d523f082) Enable auth by default. Decrease app resync period from 10m to 3m - * [1a85a2d80](https://github.com/argoproj/argo-workflows/commit/1a85a2d8051ee64ad16b0487e2a3d14cf4fb01e6) Bump version file to 0.3.0. Add release target and cli-linux/darwin targets - * [cf2d00e1e](https://github.com/argoproj/argo-workflows/commit/cf2d00e1e04219ed99195488740189fbd6af997d) Add ability to set a parameter override from the CLI (`argo app set -p`) - * [266c948ad](https://github.com/argoproj/argo-workflows/commit/266c948adddab715ba2c60f082bd7e37aec6f814) Add documentation about ArgoCD tracking strategies - * [dd564ee9d](https://github.com/argoproj/argo-workflows/commit/dd564ee9dd483f3e19bceafd30e5842a005e04f1) Introduce `app set` command for updating an app (resolves #116) - * [b9d48cabb](https://github.com/argoproj/argo-workflows/commit/b9d48cabb99e336ea06e1a7af56f2e74e740a9cf) Add ability to set the tracking revision during app creation - * [276e0674c](https://github.com/argoproj/argo-workflows/commit/276e0674c37a975d903404b3e3bf747b7e99a787) Deployment of resources is performed using `kubectl apply` (resolves #106) - * [f3c4a6932](https://github.com/argoproj/argo-workflows/commit/f3c4a6932730c53ae1cf9de2df9e62c89e54ea53) Add watch verb to controller role - * [1c60a6986](https://github.com/argoproj/argo-workflows/commit/1c60a69866dae95c7bf4a0f912292a5a6714611f) Rename `argocd app add/rm` to `argocd app create/delete` (resolves #114) - * [050f937a2](https://github.com/argoproj/argo-workflows/commit/050f937a2409111194f6c4ff7cc75a3f2ed3fa0b) Update ksonnet to v0.10.0-alpha.3 - * [b24e47822](https://github.com/argoproj/argo-workflows/commit/b24e478224a359c883425f2640f4327f29b3ab80) Add application validation - * [e34380ed7](https://github.com/argoproj/argo-workflows/commit/e34380ed765bc8b802d60ab30c25a1389ebd33a8) Expose port 443 to proxy to port 8080 (#113) - * [338a1b826](https://github.com/argoproj/argo-workflows/commit/338a1b826fd597eafd0a654ca424a0c90b4647e0) `argo login` was not able to properly update boolean connection flags (insecure/plaintext) - * [b87c63c89](https://github.com/argoproj/argo-workflows/commit/b87c63c897dc0e7c11b311d9f6de6f6436186aeb) Re-add workaround for ksonnet bug - * [f6ed150bb](https://github.com/argoproj/argo-workflows/commit/f6ed150bb7e9f50854fe4f7e4d00cc7ab1ccd581) Issue #108 - App controller incorrectly report that app is out of sync (#109) - * [d5c683bc7](https://github.com/argoproj/argo-workflows/commit/d5c683bc76f6e3eb1b5570b50d795b387481087f) Add syncPolicy field to application CRD (#107) - * [3ac95f3f8](https://github.com/argoproj/argo-workflows/commit/3ac95f3f84c6b85aa8e0ff0c9c68e2ccbbaa8875) Fix null pointer error in controller (#105) - * [3be872ad3](https://github.com/argoproj/argo-workflows/commit/3be872ad32891cc7628b3717bff31deb687a556f) Rework local config to support multiple servers/credentials - * [80964a79b](https://github.com/argoproj/argo-workflows/commit/80964a79b2b8cd1383eb1cbf03eddb608c13b771) Set session cookies, errors appropriately (#100) - * [e719035ea](https://github.com/argoproj/argo-workflows/commit/e719035ea5ba3d08bc4118151989071befb127ac) Allow ignoring recource deletion related errors while deleting application (#98) - * [f2bcf63b2](https://github.com/argoproj/argo-workflows/commit/f2bcf63b26257bb83220d3a94ddbb394b591b659) Fix linting breakage in session mock from recent changes to session interface - * [2c9843f1a](https://github.com/argoproj/argo-workflows/commit/2c9843f1a083ce41ec3fa9aebf14fb5028a17765) Update ksonnet to v0.10.0-alpha.2 - * [0560406d8](https://github.com/argoproj/argo-workflows/commit/0560406d815f7012f4c45bda8d2a3d940457bd3a) Add server auth cookies (#96) - * [db8083c65](https://github.com/argoproj/argo-workflows/commit/db8083c6573ba4a514bbad11d73f5e65e9ed06a6) Lowercase repo names before using in secret (#94) - * [fcc9f50b3](https://github.com/argoproj/argo-workflows/commit/fcc9f50b3fe35f71ab2ead6181517bf16e06ac7f) Fix issue preventing uppercased repo and cluster URLs (resolves #81) - * [c1ffbad8d](https://github.com/argoproj/argo-workflows/commit/c1ffbad8d89ed0aad0ce680463fe38297afb09b8) Support manual token use for CLI commands (#90) - * [d7cdb1a5a](https://github.com/argoproj/argo-workflows/commit/d7cdb1a5af3aae50d67ff4d2346375ffe3bbf1af) Convert Kubernetes errors to gRPC errors (#89) - * [6c41ce5e0](https://github.com/argoproj/argo-workflows/commit/6c41ce5e086822529a37002878ab780778df26b9) Add session gateway (#84) - * [685a814f3](https://github.com/argoproj/argo-workflows/commit/685a814f3870237c560c83724af5fc214af158b8) Add `argocd login` command (#82) - * [06b64047a](https://github.com/argoproj/argo-workflows/commit/06b64047a4b5e6d7728ac6ca2eac03327f42ca37) Issue #69 - Auto-sync option in application CRD instance (#83) - * [8a90b3244](https://github.com/argoproj/argo-workflows/commit/8a90b324461ecc35a6d94296154e5aaa86e0adc5) Show more relevant information in `argocd cluster add` - * [7e47b1eba](https://github.com/argoproj/argo-workflows/commit/7e47b1ebae32b01b927c76c120cdab7be8084d13) TLS support. HTTP/HTTPS/gRPC all serving on single port - * [150b51a3a](https://github.com/argoproj/argo-workflows/commit/150b51a3ac43cac00aae886fe2c3ac5b1fb0a588) Fix linter warning - * [0002f8db9](https://github.com/argoproj/argo-workflows/commit/0002f8db9e9e96f2601ee4bd005864cd88e0ee50) Issue #75 - Implement delete pod API - * [59ed50d23](https://github.com/argoproj/argo-workflows/commit/59ed50d230d86946ed8a1d881771f24897dba305) Issue #74 - Implement stream logs API - * [820b4bac1](https://github.com/argoproj/argo-workflows/commit/820b4bac1afc7ce5c42779c80fc36fbe5fbf9893) Remove obsolete pods api - * [19c5ecdbf](https://github.com/argoproj/argo-workflows/commit/19c5ecdbfabd83a83f2b83a34b0b66b984c5cfa8) Check app label on client side before deleting app resource - * [66b0702c2](https://github.com/argoproj/argo-workflows/commit/66b0702c2437421a414b72b29d1322ad49be7884) Issue #65 - Delete all the kube object once app is removed - * [5b5dc0efc](https://github.com/argoproj/argo-workflows/commit/5b5dc0efc40637279d070cf5eb004a9378d25433) Issue #67 - Application controller should persist ksonnet app parameters in app CRD (#73) - * [0febf0516](https://github.com/argoproj/argo-workflows/commit/0febf0516005bbfd5de455d7a32c47b94bd1ca60) Issue #67 - Persist resources tree in application CRD (#68) - * [ee924bda6](https://github.com/argoproj/argo-workflows/commit/ee924bda6ecdc1076db564252d95d5b1e9a0f365) Update ksonnet binary in image to ks tip. Begin using ksonnet as library instead of parsing stdout - * [ecfe571e7](https://github.com/argoproj/argo-workflows/commit/ecfe571e758228f8e63c98c9d529941be31a0a20) update ksonnet dependency to tip. override some of ksonnet's dependencies - * [173ecd939](https://github.com/argoproj/argo-workflows/commit/173ecd9397a6a91c85931675874b0a9550be1346) Installer and settings management refactoring: - * [ba3db35ba](https://github.com/argoproj/argo-workflows/commit/ba3db35ba08e8b1c625c94107023f3c15235636a) Add authentication endpoints (#61) - * [074053dac](https://github.com/argoproj/argo-workflows/commit/074053dac77c67913a33f1cc894beccb9cc0553d) Update go-grpc-middleware version (#62) - * [6bc98f91b](https://github.com/argoproj/argo-workflows/commit/6bc98f91b146ab56cd9cbdd66d756cb281730c59) Add JWT support (#60) - -### Contributors - - * Alexander Matyushentsev - * Andrew Merenbach - * Jesse Suen - -## v0.2.0 (2018-03-28) - - * [59dbe8d7e](https://github.com/argoproj/argo-workflows/commit/59dbe8d7eace6f9b82fda59a0590f0f3e24cc514) Maintain list of recent deployments in app CRD (#59) - * [6d7936173](https://github.com/argoproj/argo-workflows/commit/6d793617399a2b1abed8e6cb561115f9311eafae) Issue #57 - Add configmaps into argocd server role (#58) - * [e1c7f9d6f](https://github.com/argoproj/argo-workflows/commit/e1c7f9d6f86f4a489c79e921f38f15ba02de6472) Fix deleting resources which do not support 'deletecollection' method but support 'delete' (#56) - * [5febea223](https://github.com/argoproj/argo-workflows/commit/5febea22354eb8b6b56e22096a3cddefcded34ad) Argo server should not fail if configmap name is not provided or config map does not exist (#55) - * [d093c8c3a](https://github.com/argoproj/argo-workflows/commit/d093c8c3a17d51a83514c7a355239409409d1e78) Add password hashing (#51) - * [10a8d521e](https://github.com/argoproj/argo-workflows/commit/10a8d521ef5b21ee139128dad33e0ad160cc56fd) Add application source and component parameters into recentDeployment field of application CRD (#53) - * [234ace173](https://github.com/argoproj/argo-workflows/commit/234ace173ed1b8de4ca1010e9b583cdb5ce6bf40) Replace ephemeral environments with override parameters (#52) - * [817b13ccb](https://github.com/argoproj/argo-workflows/commit/817b13ccbed93f41a851d2dd71040e2e2bc975a0) Add license and copyright. #49 - * [b1682cc44](https://github.com/argoproj/argo-workflows/commit/b1682cc44be8069642d7d0a0edab0137e69a15c7) Add install configmap override flag (#47) - * [74797a2ac](https://github.com/argoproj/argo-workflows/commit/74797a2ac80ca0375a02c4a8b38a972bfa19c9f2) Delete child dependents while deleting app resources (#48) - * [ca570c7ae](https://github.com/argoproj/argo-workflows/commit/ca570c7aeeb70df1c7d4ec75b1571038142ef714) Use ksonnet release version and fix app copy command (#46) - * [92b7c6b5f](https://github.com/argoproj/argo-workflows/commit/92b7c6b5f8773f1504f12245d5f77854621d2c2c) Disable strict host key checking while cloning repo in repo-server (#45) - * [4884c20d2](https://github.com/argoproj/argo-workflows/commit/4884c20d2bfaaf65c5e6a222d22fb684c9f72788) Issue #43 - Don't setup RBAC resources for clusters with basic authentication (#44) - * [363b9b352](https://github.com/argoproj/argo-workflows/commit/363b9b352c1de1e6a84d516e6812ed6fdac3f013) Don't overwrite application status in tryRefreshAppStatus (#42) - * [5c062bd3e](https://github.com/argoproj/argo-workflows/commit/5c062bd3e51bab46979040c79795c4872c2c0d2f) Support deploying/destroying ephemeral environments (#40) - * [98754c7fe](https://github.com/argoproj/argo-workflows/commit/98754c7fe1cbfc2f39890c976949d1540af75d9c) Persist parameters during deployment (Sync) (#39) - * [3927cc079](https://github.com/argoproj/argo-workflows/commit/3927cc0799456518f889dd9c53a40a2c746d546e) Add new dependency to CONTRIBUTING.md (#38) - * [611b0e48d](https://github.com/argoproj/argo-workflows/commit/611b0e48d7be40f6cb1b30d3e3da180a443e872f) Issue #34 - Support ssh git URLs and ssh key authentication (#37) - * [0368c2ead](https://github.com/argoproj/argo-workflows/commit/0368c2eadfe34a979973e0b40b6cb4c288e55f38) Allow use of public repos without prior registration (#36) - * [e7e3c5095](https://github.com/argoproj/argo-workflows/commit/e7e3c5095c0a1b4312993a234aceb0b90d69f90e) Support -f/--file flag in `argocd app add` (#35) - * [d256256de](https://github.com/argoproj/argo-workflows/commit/d256256defbf6dcc733424df9374a2dc32069875) Update CONTRIBUTING.md (#32) - -### Contributors - - * Alexander Matyushentsev - * Andrew Merenbach - * Edward Lee - diff --git a/vendor/github.com/argoproj/argo-workflows/v3/CODEOWNERS b/vendor/github.com/argoproj/argo-workflows/v3/CODEOWNERS deleted file mode 100644 index 1ad8fe2c8e0..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/CODEOWNERS +++ /dev/null @@ -1,2 +0,0 @@ -*.proto @jessesuen @alexec - diff --git a/vendor/github.com/argoproj/argo-workflows/v3/Dockerfile b/vendor/github.com/argoproj/argo-workflows/v3/Dockerfile deleted file mode 100644 index 7ad207b20e9..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/Dockerfile +++ /dev/null @@ -1,130 +0,0 @@ -#syntax=docker/dockerfile:1.2 - -FROM golang:1.18 as builder - -RUN apt-get update && apt-get --no-install-recommends install -y \ - git \ - make \ - apt-utils \ - apt-transport-https \ - ca-certificates \ - wget \ - gcc && \ - apt-get clean \ - && rm -rf \ - /var/lib/apt/lists/* \ - /tmp/* \ - /var/tmp/* \ - /usr/share/man \ - /usr/share/doc \ - /usr/share/doc-base - -WORKDIR /tmp - -# https://blog.container-solutions.com/faster-builds-in-docker-with-go-1-11 -WORKDIR /go/src/github.com/argoproj/argo-workflows -COPY go.mod . -COPY go.sum . -RUN go mod download - -COPY . . - -#################################################################################################### - -FROM node:16 as argo-ui - -COPY ui/package.json ui/yarn.lock ui/ - -RUN JOBS=max yarn --cwd ui install --network-timeout 1000000 - -COPY ui ui -COPY api api - -RUN NODE_OPTIONS="--max-old-space-size=2048" JOBS=max yarn --cwd ui build - -#################################################################################################### - -FROM builder as argoexec-build - -COPY hack/arch.sh hack/os.sh /bin/ - -# NOTE: kubectl version should be one minor version less than https://storage.googleapis.com/kubernetes-release/release/stable.txt -RUN curl -o /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.22.3/bin/$(os.sh)/$(arch.sh)/kubectl && \ - chmod +x /usr/local/bin/kubectl - -RUN curl -o /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && \ - chmod +x /usr/local/bin/jq - -# Tell git to forget about all of the files that were not included because of .dockerignore in order to ensure that -# the git state is "clean" even though said .dockerignore files are not present -RUN cat .dockerignore >> .gitignore -RUN git status --porcelain | cut -c4- | xargs git update-index --skip-worktree - -RUN --mount=type=cache,target=/root/.cache/go-build make dist/argoexec - -#################################################################################################### - -FROM builder as workflow-controller-build - -# Tell git to forget about all of the files that were not included because of .dockerignore in order to ensure that -# the git state is "clean" even though said .dockerignore files are not present -RUN cat .dockerignore >> .gitignore -RUN git status --porcelain | cut -c4- | xargs git update-index --skip-worktree - -RUN --mount=type=cache,target=/root/.cache/go-build make dist/workflow-controller - -#################################################################################################### - -FROM builder as argocli-build - -RUN mkdir -p ui/dist -COPY --from=argo-ui ui/dist/app ui/dist/app -# stop make from trying to re-build this without yarn installed -RUN touch ui/dist/node_modules.marker -RUN touch ui/dist/app/index.html - -# Tell git to forget about all of the files that were not included because of .dockerignore in order to ensure that -# the git state is "clean" even though said .dockerignore files are not present -RUN cat .dockerignore >> .gitignore -RUN git status --porcelain | cut -c4- | xargs git update-index --skip-worktree - -RUN --mount=type=cache,target=/root/.cache/go-build make dist/argo - -#################################################################################################### - -FROM gcr.io/distroless/static as argoexec - -COPY --from=argoexec-build /usr/local/bin/kubectl /bin/ -COPY --from=argoexec-build /usr/local/bin/jq /bin/ -COPY --from=argoexec-build /go/src/github.com/argoproj/argo-workflows/dist/argoexec /bin/ -COPY --from=argoexec-build /etc/mime.types /etc/mime.types -COPY hack/ssh_known_hosts /etc/ssh/ -COPY hack/nsswitch.conf /etc/ - -ENTRYPOINT [ "argoexec" ] - -#################################################################################################### - -FROM gcr.io/distroless/static as workflow-controller - -USER 8737 - -COPY hack/ssh_known_hosts /etc/ssh/ -COPY hack/nsswitch.conf /etc/ -COPY --chown=8737 --from=workflow-controller-build /go/src/github.com/argoproj/argo-workflows/dist/workflow-controller /bin/ - -ENTRYPOINT [ "workflow-controller" ] - -#################################################################################################### - -FROM gcr.io/distroless/static as argocli - -USER 8737 - -WORKDIR /home/argo - -COPY hack/ssh_known_hosts /etc/ssh/ -COPY hack/nsswitch.conf /etc/ -COPY --from=argocli-build /go/src/github.com/argoproj/argo-workflows/dist/argo /bin/ - -ENTRYPOINT [ "argo" ] diff --git a/vendor/github.com/argoproj/argo-workflows/v3/Dockerfile.windows b/vendor/github.com/argoproj/argo-workflows/v3/Dockerfile.windows deleted file mode 100644 index fcdc1aa06e5..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/Dockerfile.windows +++ /dev/null @@ -1,64 +0,0 @@ -#################################################################################################### -# Builder image -# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image -# Also used as the image in CI jobs so needs all dependencies -#################################################################################################### - -ARG IMAGE_OS_VERSION=1809 - -# had issues with official golange image for windows so I'm using plain servercore -FROM mcr.microsoft.com/windows/servercore:${IMAGE_OS_VERSION} as builder -ENV GOLANG_VERSION=1.18 -SHELL ["powershell", "-Command"] - -# install chocolatey package manager -ENV chocolateyUseWindowsCompression=false -RUN iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')); \ - choco feature disable --name showDownloadProgress ; \ - choco feature enable -n allowGlobalConfirmation - -# install golang, dep and other tools -RUN choco install golang --version=$env:GOLANG_VERSION ; \ - choco install make dep git.portable 7zip.portable - -#################################################################################################### -# argoexec-base -# Used as the base for both the release and development version of argoexec -#################################################################################################### -FROM mcr.microsoft.com/windows/nanoserver:${IMAGE_OS_VERSION} as argoexec-base -COPY --from=builder /windows/system32/netapi32.dll /windows/system32/netapi32.dll - -# NOTE: kubectl version should be one minor version less than https://storage.googleapis.com/kubernetes-release/release/stable.txt -ENV KUBECTL_VERSION=1.22.3 -ENV JQ_VERSION=1.6 - -RUN mkdir C:\app && \ - curl -L -o C:\app\kubectl.exe "https://storage.googleapis.com/kubernetes-release/release/v%KUBECTL_VERSION%/bin/windows/amd64/kubectl.exe" && \ - curl -L -o C:\app\jq.exe "https://github.com/stedolan/jq/releases/download/jq-%JQ_VERSION%/jq-win64.exe" - -COPY --from=builder C:/ProgramData/chocolatey/lib/7zip.portable/tools/7z-extra/x64/7za.exe C:/app/7za.exe - -# add binaries to path -USER Administrator -RUN SETX /m path C:\app;%path% - -#################################################################################################### -# Argo Build stage which performs the actual build of Argo binaries -#################################################################################################### -FROM builder as argo-build - -# Perform the build -WORKDIR C:/Users/ContainerAdministrator/go/src/github.com/argoproj/argo-workflows -COPY . . -# check we can use Git -RUN git rev-parse HEAD -# run in git bash for all the shell commands in Makefile to work -RUN bash -c 'make dist/argoexec' - -#################################################################################################### -# argoexec -#################################################################################################### -FROM argoexec-base as argoexec -COPY --from=argo-build C:/Users/ContainerAdministrator/go/src/github.com/argoproj/argo-workflows/dist/argoexec C:/app/argoexec.exe -RUN argoexec version -ENTRYPOINT [ "argoexec" ] diff --git a/vendor/github.com/argoproj/argo-workflows/v3/Makefile b/vendor/github.com/argoproj/argo-workflows/v3/Makefile deleted file mode 100644 index 1a269afbc77..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/Makefile +++ /dev/null @@ -1,680 +0,0 @@ -export SHELL:=/bin/bash -export SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit - -# https://stackoverflow.com/questions/4122831/disable-make-builtin-rules-and-variables-from-inside-the-make-file -MAKEFLAGS += --no-builtin-rules -.SUFFIXES: - -BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ') -GIT_COMMIT := $(shell git rev-parse HEAD) -GIT_REMOTE := origin -GIT_BRANCH := $(shell git rev-parse --symbolic-full-name --verify --quiet --abbrev-ref HEAD) -GIT_TAG := $(shell git describe --exact-match --tags --abbrev=0 2> /dev/null || echo untagged) -GIT_TREE_STATE := $(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) -RELEASE_TAG := $(shell if [[ "$(GIT_TAG)" =~ ^v[0-9]+\.[0-9]+\.[0-9]+.*$$ ]]; then echo "true"; else echo "false"; fi) -DEV_BRANCH := $(shell [ $(GIT_BRANCH) = master ] || [ `echo $(GIT_BRANCH) | cut -c -8` = release- ] || [ `echo $(GIT_BRANCH) | cut -c -4` = dev- ] || [ $(RELEASE_TAG) = true ] && echo false || echo true) -SRC := $(GOPATH)/src/github.com/argoproj/argo-workflows - -GREP_LOGS := "" - - -# docker image publishing options -IMAGE_NAMESPACE ?= quay.io/argoproj -DEV_IMAGE ?= $(shell [ `uname -s` = Darwin ] && echo true || echo false) - -# declares which cluster to import to in case it's not the default name -K3D_CLUSTER_NAME ?= k3s-default - -# The name of the namespace where Kubernetes resources/RBAC will be installed -KUBE_NAMESPACE ?= argo -MANAGED_NAMESPACE ?= $(KUBE_NAMESPACE) - -# Timeout for wait conditions -E2E_WAIT_TIMEOUT ?= 1m - -E2E_PARALLEL ?= 20 -E2E_SUITE_TIMEOUT ?= 15m - -VERSION := latest -DOCKER_PUSH := false - -# VERSION is the version to be used for files in manifests and should always be latest unless we are releasing -# we assume HEAD means you are on a tag -ifeq ($(RELEASE_TAG),true) -VERSION := $(GIT_TAG) -endif - -# should we build the static files? -ifneq (,$(filter $(MAKECMDGOALS),codegen lint test docs start)) -STATIC_FILES := false -else -STATIC_FILES ?= $(shell [ $(DEV_BRANCH) = true ] && echo false || echo true) -endif - -# start the Controller -CTRL ?= true -# tail logs -LOGS ?= $(CTRL) -# start the UI -UI ?= $(shell [ $(CTRL) = true ] && echo false || echo true) -# start the Argo Server -API ?= $(UI) -GOTEST ?= go test -v -p 20 -PROFILE ?= minimal -PLUGINS ?= $(shell [ $PROFILE = plugins ] && echo false || echo true) -# by keeping this short we speed up the tests -DEFAULT_REQUEUE_TIME ?= 1s -# whether or not to start the Argo Service in TLS mode -SECURE := false -AUTH_MODE := hybrid -ifeq ($(PROFILE),sso) -AUTH_MODE := sso -endif - -# Which mode to run in: -# * `local` run the workflow–controller and argo-server as single replicas on the local machine (default) -# * `kubernetes` run the workflow-controller and argo-server on the Kubernetes cluster -RUN_MODE := local -KUBECTX := $(shell [[ "`which kubectl`" != '' ]] && kubectl config current-context || echo none) -DOCKER_DESKTOP := $(shell [[ "$(KUBECTX)" == "docker-desktop" ]] && echo true || echo false) -K3D := $(shell [[ "$(KUBECTX)" == "k3d-"* ]] && echo true || echo false) -LOG_LEVEL := debug -UPPERIO_DB_DEBUG := 0 -NAMESPACED := true -ifeq ($(PROFILE),prometheus) -RUN_MODE := kubernetes -endif -ifeq ($(PROFILE),stress) -RUN_MODE := kubernetes -endif - -ALWAYS_OFFLOAD_NODE_STATUS := false - -$(info GIT_COMMIT=$(GIT_COMMIT) GIT_BRANCH=$(GIT_BRANCH) GIT_TAG=$(GIT_TAG) GIT_TREE_STATE=$(GIT_TREE_STATE) RELEASE_TAG=$(RELEASE_TAG) DEV_BRANCH=$(DEV_BRANCH) VERSION=$(VERSION)) -$(info KUBECTX=$(KUBECTX) DOCKER_DESKTOP=$(DOCKER_DESKTOP) K3D=$(K3D) DOCKER_PUSH=$(DOCKER_PUSH)) -$(info RUN_MODE=$(RUN_MODE) PROFILE=$(PROFILE) AUTH_MODE=$(AUTH_MODE) SECURE=$(SECURE) STATIC_FILES=$(STATIC_FILES) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) LOG_LEVEL=$(LOG_LEVEL) NAMESPACED=$(NAMESPACED)) - -override LDFLAGS += \ - -X github.com/argoproj/argo-workflows/v3.version=$(VERSION) \ - -X github.com/argoproj/argo-workflows/v3.buildDate=${BUILD_DATE} \ - -X github.com/argoproj/argo-workflows/v3.gitCommit=${GIT_COMMIT} \ - -X github.com/argoproj/argo-workflows/v3.gitTreeState=${GIT_TREE_STATE} - -ifneq ($(GIT_TAG),) -override LDFLAGS += -X github.com/argoproj/argo-workflows/v3.gitTag=${GIT_TAG} -endif - -ifndef $(GOPATH) - GOPATH=$(shell go env GOPATH) - export GOPATH -endif - -ARGOEXEC_PKGS := $(shell echo cmd/argoexec && go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) -CLI_PKGS := $(shell echo cmd/argo && go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) -CONTROLLER_PKGS := $(shell echo cmd/workflow-controller && go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-) -TYPES := $(shell find pkg/apis/workflow/v1alpha1 -type f -name '*.go' -not -name openapi_generated.go -not -name '*generated*' -not -name '*test.go') -CRDS := $(shell find manifests/base/crds -type f -name 'argoproj.io_*.yaml') -SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \ - pkg/apiclient/_.secondary.swagger.json \ - pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \ - pkg/apiclient/cronworkflow/cron-workflow.swagger.json \ - pkg/apiclient/event/event.swagger.json \ - pkg/apiclient/eventsource/eventsource.swagger.json \ - pkg/apiclient/info/info.swagger.json \ - pkg/apiclient/sensor/sensor.swagger.json \ - pkg/apiclient/workflow/workflow.swagger.json \ - pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ - pkg/apiclient/workflowtemplate/workflow-template.swagger.json -PROTO_BINARIES := $(GOPATH)/bin/protoc-gen-gogo $(GOPATH)/bin/protoc-gen-gogofast $(GOPATH)/bin/goimports $(GOPATH)/bin/protoc-gen-grpc-gateway $(GOPATH)/bin/protoc-gen-swagger /usr/local/bin/clang-format - -# protoc,my.proto -define protoc - # protoc $(1) - [ -e ./vendor ] || go mod vendor - protoc \ - -I /usr/local/include \ - -I $(CURDIR) \ - -I $(CURDIR)/vendor \ - -I $(GOPATH)/src \ - -I $(GOPATH)/pkg/mod/github.com/gogo/protobuf@v1.3.1/gogoproto \ - -I $(GOPATH)/pkg/mod/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/third_party/googleapis \ - --gogofast_out=plugins=grpc:$(GOPATH)/src \ - --grpc-gateway_out=logtostderr=true:$(GOPATH)/src \ - --swagger_out=logtostderr=true,fqn_for_swagger_name=true:. \ - $(1) - perl -i -pe 's|argoproj/argo-workflows/|argoproj/argo-workflows/v3/|g' `echo "$(1)" | sed 's/proto/pb.go/g'` - -endef - -# cli - -.PHONY: cli -cli: dist/argo - -ui/dist/app/index.html: $(shell find ui/src -type f && find ui -maxdepth 1 -type f) - # `yarn install` is fast (~2s), so you can call it safely. - JOBS=max yarn --cwd ui install - # `yarn build` is slow, so we guard it with a up-to-date check. - JOBS=max yarn --cwd ui build - -$(GOPATH)/bin/staticfiles: - go install bou.ke/staticfiles@dd04075 - -ifeq ($(STATIC_FILES),true) -server/static/files.go: $(GOPATH)/bin/staticfiles ui/dist/app/index.html - # Pack UI into a Go file - $(GOPATH)/bin/staticfiles -o server/static/files.go ui/dist/app -else -server/static/files.go: - # Building without static files - cp ./server/static/files.go.stub ./server/static/files.go -endif - -dist/argo-linux-amd64: GOARGS = GOOS=linux GOARCH=amd64 -dist/argo-linux-arm64: GOARGS = GOOS=linux GOARCH=arm64 -dist/argo-linux-ppc64le: GOARGS = GOOS=linux GOARCH=ppc64le -dist/argo-linux-s390x: GOARGS = GOOS=linux GOARCH=s390x -dist/argo-darwin-amd64: GOARGS = GOOS=darwin GOARCH=amd64 -dist/argo-darwin-arm64: GOARGS = GOOS=darwin GOARCH=arm64 -dist/argo-windows-amd64: GOARGS = GOOS=windows GOARCH=amd64 - -dist/argo-windows-%.gz: dist/argo-windows-% - gzip --force --keep dist/argo-windows-$*.exe - -dist/argo-windows-%: server/static/files.go $(CLI_PKGS) go.sum - CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@.exe ./cmd/argo - -dist/argo-%.gz: dist/argo-% - gzip --force --keep dist/argo-$* - -dist/argo-%: server/static/files.go $(CLI_PKGS) go.sum - CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo - -dist/argo: server/static/files.go $(CLI_PKGS) go.sum -ifeq ($(shell uname -s),Darwin) - # if local, then build fast: use CGO and dynamic-linking - go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS}' -o $@ ./cmd/argo -else - CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo -endif - -argocli-image: - -.PHONY: clis -clis: dist/argo-linux-amd64.gz dist/argo-linux-arm64.gz dist/argo-linux-ppc64le.gz dist/argo-linux-s390x.gz dist/argo-darwin-amd64.gz dist/argo-darwin-arm64.gz dist/argo-windows-amd64.gz - -# controller - -.PHONY: controller -controller: dist/workflow-controller - -dist/workflow-controller: $(CONTROLLER_PKGS) go.sum -ifeq ($(shell uname -s),Darwin) - # if local, then build fast: use CGO and dynamic-linking - go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS}' -o $@ ./cmd/workflow-controller -else - CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/workflow-controller -endif - -workflow-controller-image: - -# argoexec - -dist/argoexec: $(ARGOEXEC_PKGS) go.sum -ifeq ($(shell uname -s),Darwin) - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec -else - CGO_ENABLED=0 go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec -endif - -argoexec-image: - -%-image: - [ ! -e dist/$* ] || mv dist/$* . - docker build \ - -t $(IMAGE_NAMESPACE)/$*:$(VERSION) \ - --target $* \ - . - [ ! -e $* ] || mv $* dist/ - docker run --rm -t $(IMAGE_NAMESPACE)/$*:$(VERSION) version - if [ $(K3D) = true ]; then k3d image import -c $(K3D_CLUSTER_NAME) $(IMAGE_NAMESPACE)/$*:$(VERSION); fi - if [ $(DOCKER_PUSH) = true ] && [ $(IMAGE_NAMESPACE) != argoproj ] ; then docker push $(IMAGE_NAMESPACE)/$*:$(VERSION) ; fi - -.PHONY: codegen -codegen: types swagger manifests $(GOPATH)/bin/mockery docs/fields.md docs/cli/argo.md - go generate ./... - make --directory sdks/java generate - make --directory sdks/python generate - -.PHONY: check-pwd -check-pwd: - -ifneq ($(SRC),$(PWD)) - @echo "⚠️ Code generation will not work if code in not checked out into $(SRC)" >&2 -endif - -.PHONY: types -types: check-pwd pkg/apis/workflow/v1alpha1/generated.proto pkg/apis/workflow/v1alpha1/openapi_generated.go pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go - -.PHONY: swagger -swagger: \ - pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \ - pkg/apiclient/cronworkflow/cron-workflow.swagger.json \ - pkg/apiclient/event/event.swagger.json \ - pkg/apiclient/eventsource/eventsource.swagger.json \ - pkg/apiclient/info/info.swagger.json \ - pkg/apiclient/sensor/sensor.swagger.json \ - pkg/apiclient/workflow/workflow.swagger.json \ - pkg/apiclient/workflowarchive/workflow-archive.swagger.json \ - pkg/apiclient/workflowtemplate/workflow-template.swagger.json \ - manifests/base/crds/full/argoproj.io_workflows.yaml \ - manifests \ - api/openapi-spec/swagger.json \ - api/jsonschema/schema.json - - -$(GOPATH)/bin/mockery: - go install github.com/vektra/mockery/v2@v2.10.0 -$(GOPATH)/bin/controller-gen: - go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1 -$(GOPATH)/bin/go-to-protobuf: - go install k8s.io/code-generator/cmd/go-to-protobuf@v0.21.5 -$(GOPATH)/src/github.com/gogo/protobuf: - [ -e $(GOPATH)/src/github.com/gogo/protobuf ] || git clone --depth 1 https://github.com/gogo/protobuf.git -b v1.3.2 $(GOPATH)/src/github.com/gogo/protobuf -$(GOPATH)/bin/protoc-gen-gogo: - go install github.com/gogo/protobuf/protoc-gen-gogo@v1.3.2 -$(GOPATH)/bin/protoc-gen-gogofast: - go install github.com/gogo/protobuf/protoc-gen-gogofast@v1.3.2 -$(GOPATH)/bin/protoc-gen-grpc-gateway: - go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.16.0 -$(GOPATH)/bin/protoc-gen-swagger: - go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.16.0 -$(GOPATH)/bin/openapi-gen: - go install k8s.io/kube-openapi/cmd/openapi-gen@v0.0.0-20220124234850-424119656bbf -$(GOPATH)/bin/swagger: - go install github.com/go-swagger/go-swagger/cmd/swagger@v0.28.0 -$(GOPATH)/bin/goimports: - go install golang.org/x/tools/cmd/goimports@v0.1.7 - -/usr/local/bin/clang-format: -ifeq (, $(shell which clang-format)) -ifeq ($(shell uname),Darwin) - brew install clang-format -else - sudo apt-get install clang-format -endif -endif - -pkg/apis/workflow/v1alpha1/generated.proto: $(GOPATH)/bin/go-to-protobuf $(PROTO_BINARIES) $(TYPES) $(GOPATH)/src/github.com/gogo/protobuf - # These files are generated on a v3/ folder by the tool. Link them to the root folder - [ -e ./v3 ] || ln -s . v3 - # Format proto files. Formatting changes generated code, so we do it here, rather that at lint time. - # Why clang-format? Google uses it. - find pkg/apiclient -name '*.proto'|xargs clang-format -i - $(GOPATH)/bin/go-to-protobuf \ - --go-header-file=./hack/custom-boilerplate.go.txt \ - --packages=github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ - --apimachinery-packages=+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1beta1 \ - --proto-import $(GOPATH)/src - # Delete the link - [ -e ./v3 ] && rm -rf v3 - touch pkg/apis/workflow/v1alpha1/generated.proto - -# this target will also create a .pb.go and a .pb.gw.go file, but in Make 3 we cannot use _grouped target_, instead we must choose -# on file to represent all of them -pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto - $(call protoc,pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto) - -pkg/apiclient/cronworkflow/cron-workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/cronworkflow/cron-workflow.proto - $(call protoc,pkg/apiclient/cronworkflow/cron-workflow.proto) - -pkg/apiclient/event/event.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/event/event.proto - $(call protoc,pkg/apiclient/event/event.proto) - -pkg/apiclient/eventsource/eventsource.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/eventsource/eventsource.proto - $(call protoc,pkg/apiclient/eventsource/eventsource.proto) - -pkg/apiclient/info/info.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/info/info.proto - $(call protoc,pkg/apiclient/info/info.proto) - -pkg/apiclient/sensor/sensor.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/sensor/sensor.proto - $(call protoc,pkg/apiclient/sensor/sensor.proto) - -pkg/apiclient/workflow/workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflow/workflow.proto - $(call protoc,pkg/apiclient/workflow/workflow.proto) - -pkg/apiclient/workflowarchive/workflow-archive.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowarchive/workflow-archive.proto - $(call protoc,pkg/apiclient/workflowarchive/workflow-archive.proto) - -pkg/apiclient/workflowtemplate/workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowtemplate/workflow-template.proto - $(call protoc,pkg/apiclient/workflowtemplate/workflow-template.proto) - -# generate other files for other CRDs -manifests/base/crds/full/argoproj.io_workflows.yaml: $(GOPATH)/bin/controller-gen $(TYPES) ./hack/crdgen.sh ./hack/crds.go - ./hack/crdgen.sh - -.PHONY: manifests -manifests: \ - manifests/install.yaml \ - manifests/namespace-install.yaml \ - manifests/quick-start-minimal.yaml \ - manifests/quick-start-mysql.yaml \ - manifests/quick-start-postgres.yaml \ - dist/manifests/install.yaml \ - dist/manifests/namespace-install.yaml \ - dist/manifests/quick-start-minimal.yaml \ - dist/manifests/quick-start-mysql.yaml \ - dist/manifests/quick-start-postgres.yaml - -.PHONY: manifests/install.yaml -manifests/install.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/cluster-install | ./hack/auto-gen-msg.sh > manifests/install.yaml - -.PHONY: manifests/namespace-install.yaml -manifests/namespace-install.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/namespace-install | ./hack/auto-gen-msg.sh > manifests/namespace-install.yaml - -.PHONY: manifests/quick-start-minimal.yaml -manifests/quick-start-minimal.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/minimal | ./hack/auto-gen-msg.sh > manifests/quick-start-minimal.yaml - -.PHONY: manifests/quick-start-mysql.yaml -manifests/quick-start-mysql.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/mysql | ./hack/auto-gen-msg.sh > manifests/quick-start-mysql.yaml - -.PHONY: manifests/quick-start-postgres.yaml -manifests/quick-start-postgres.yaml: /dev/null - kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/postgres | ./hack/auto-gen-msg.sh > manifests/quick-start-postgres.yaml - -dist/manifests/%: manifests/% - @mkdir -p dist/manifests - sed 's/:latest/:$(VERSION)/' manifests/$* > $@ - -# lint/test/etc - -$(GOPATH)/bin/golangci-lint: - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.49.0 - -.PHONY: lint -lint: server/static/files.go $(GOPATH)/bin/golangci-lint - rm -Rf v3 vendor - # If you're using `woc.wf.Spec` or `woc.execWf.Status` your code probably won't work with WorkflowTemplate. - # * Change `woc.wf.Spec` to `woc.execWf.Spec`. - # * Change `woc.execWf.Status` to `woc.wf.Status`. - @awk '(/woc.wf.Spec/ || /woc.execWf.Status/) && !/not-woc-misuse/ {print FILENAME ":" FNR "\t" $0 ; exit 1}' $(shell find workflow/controller -type f -name '*.go' -not -name '*test*') - # Tidy Go modules - go mod tidy - # Lint Go files - $(GOPATH)/bin/golangci-lint run --fix --verbose - # Lint the UI - if [ -e ui/node_modules ]; then yarn --cwd ui lint ; fi - -# for local we have a faster target that prints to stdout, does not use json, and can cache because it has no coverage -.PHONY: test -test: server/static/files.go - go build ./... - env KUBECONFIG=/dev/null $(GOTEST) ./... - # marker file, based on it's modification time, we know how long ago this target was run - @mkdir -p dist - touch dist/test - -.PHONY: install -install: githooks - kubectl get ns $(KUBE_NAMESPACE) || kubectl create ns $(KUBE_NAMESPACE) - kubectl config set-context --current --namespace=$(KUBE_NAMESPACE) - @echo "installing PROFILE=$(PROFILE)" - kubectl kustomize --load-restrictor=LoadRestrictionsNone test/e2e/manifests/$(PROFILE) | sed 's|quay.io/argoproj/|$(IMAGE_NAMESPACE)/|' | sed 's/namespace: argo/namespace: $(KUBE_NAMESPACE)/' | kubectl -n $(KUBE_NAMESPACE) apply --prune -l app.kubernetes.io/part-of=argo -f - -ifeq ($(PROFILE),stress) - kubectl -n $(KUBE_NAMESPACE) apply -f test/stress/massive-workflow.yaml -endif -ifeq ($(RUN_MODE),kubernetes) - kubectl -n $(KUBE_NAMESPACE) scale deploy/workflow-controller --replicas 1 - kubectl -n $(KUBE_NAMESPACE) scale deploy/argo-server --replicas 1 -endif - -.PHONY: argosay -argosay: - cd test/e2e/images/argosay/v2 && docker build . -t argoproj/argosay:v2 -ifeq ($(K3D),true) - k3d image import -c $(K3D_CLUSTER_NAME) argoproj/argosay:v2 -endif -ifeq ($(DOCKER_PUSH),true) - docker push argoproj/argosay:v2 -endif - -dist/argosay: - mkdir -p dist - cp test/e2e/images/argosay/v2/argosay dist/ - -$(GOPATH)/bin/goreman: - go install github.com/mattn/goreman@v0.3.11 - -.PHONY: start -ifeq ($(RUN_MODE),local) -ifeq ($(API),true) -start: install controller cli $(GOPATH)/bin/goreman -else -start: install controller $(GOPATH)/bin/goreman -endif -else -start: install -endif - @echo "starting STATIC_FILES=$(STATIC_FILES) (DEV_BRANCH=$(DEV_BRANCH), GIT_BRANCH=$(GIT_BRANCH)), AUTH_MODE=$(AUTH_MODE), RUN_MODE=$(RUN_MODE), MANAGED_NAMESPACE=$(MANAGED_NAMESPACE)" -ifneq ($(CTRL),true) - @echo "⚠️️ not starting controller. If you want to test the controller, use 'make start CTRL=true' to start it" -endif -ifneq ($(LOGS),true) - @echo "⚠️️ not starting logs. If you want to tail logs, use 'make start LOGS=true' to start it" -endif -ifneq ($(API),true) - @echo "⚠️️ not starting API. If you want to test the API, use 'make start API=true' to start it" -endif -ifneq ($(UI),true) - @echo "⚠️ not starting UI. If you want to test the UI, run 'make start UI=true' to start it" -endif -ifneq ($(PLUGINS),true) - @echo "⚠️ not starting plugins. If you want to test plugins, run 'make start PROFILE=plugins' to start it" -endif - # Check dex, minio, postgres and mysql are in hosts file -ifeq ($(AUTH_MODE),sso) - grep '127.0.0.1.*dex' /etc/hosts -endif - grep '127.0.0.1.*azurite' /etc/hosts - grep '127.0.0.1.*minio' /etc/hosts - grep '127.0.0.1.*postgres' /etc/hosts - grep '127.0.0.1.*mysql' /etc/hosts - ./hack/port-forward.sh -ifeq ($(RUN_MODE),local) - env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) LOG_LEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) IMAGE_NAMESPACE=$(IMAGE_NAMESPACE) VERSION=$(VERSION) AUTH_MODE=$(AUTH_MODE) NAMESPACED=$(NAMESPACED) NAMESPACE=$(KUBE_NAMESPACE) MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) CTRL=$(CTRL) LOGS=$(LOGS) UI=$(UI) API=$(API) PLUGINS=$(PLUGINS) $(GOPATH)/bin/goreman -set-ports=false -logtime=false start $(shell if [ -z $GREP_LOGS ]; then echo; else echo "| grep \"$(GREP_LOGS)\""; fi) -endif - -$(GOPATH)/bin/stern: - go install github.com/stern/stern@latest - -.PHONY: logs -logs: $(GOPATH)/bin/stern - $(GOPATH)/bin/stern -l workflows.argoproj.io/workflow 2>&1 - -.PHONY: wait -wait: - # Wait for workflow controller - until lsof -i :9090 > /dev/null ; do sleep 10s ; done -ifeq ($(API),true) - # Wait for Argo Server - until lsof -i :2746 > /dev/null ; do sleep 10s ; done -endif - -.PHONY: postgres-cli -postgres-cli: - kubectl exec -ti `kubectl get pod -l app=postgres -o name|cut -c 5-` -- psql -U postgres - -.PHONY: mysql-cli -mysql-cli: - kubectl exec -ti `kubectl get pod -l app=mysql -o name|cut -c 5-` -- mysql -u mysql -ppassword argo - -test-cli: ./dist/argo - -test-%: - go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags $* -parallel $(E2E_PARALLEL) ./test/e2e - -.PHONY: test-examples -test-examples: - ./hack/test-examples.sh - -.PHONY: test-%-sdk -test-%-sdk: - make --directory sdks/$* install test -B - -Test%: - go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags api,cli,cron,executor,examples,corefunctional,functional,plugins -parallel $(E2E_PARALLEL) ./test/e2e -run='.*/$*' - - -# clean - -.PHONY: clean -clean: - go clean - rm -Rf test-results node_modules vendor v2 v3 argoexec-linux-amd64 dist/* ui/dist - -# swagger - -pkg/apis/workflow/v1alpha1/openapi_generated.go: $(GOPATH)/bin/openapi-gen $(TYPES) - # These files are generated on a v3/ folder by the tool. Link them to the root folder - [ -e ./v3 ] || ln -s . v3 - $(GOPATH)/bin/openapi-gen \ - --go-header-file ./hack/custom-boilerplate.go.txt \ - --input-dirs github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ - --output-package github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \ - --report-filename pkg/apis/api-rules/violation_exceptions.list - # Delete the link - [ -e ./v3 ] && rm -rf v3 - - -# generates many other files (listers, informers, client etc). -pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go: $(TYPES) - # These files are generated on a v3/ folder by the tool. Link them to the root folder - [ -e ./v3 ] || ln -s . v3 - bash $(GOPATH)/pkg/mod/k8s.io/code-generator@v0.21.5/generate-groups.sh \ - "deepcopy,client,informer,lister" \ - github.com/argoproj/argo-workflows/v3/pkg/client github.com/argoproj/argo-workflows/v3/pkg/apis \ - workflow:v1alpha1 \ - --go-header-file ./hack/custom-boilerplate.go.txt - # Delete the link - [ -e ./v3 ] && rm -rf v3 - -dist/kubernetes.swagger.json: - @mkdir -p dist - ./hack/recurl.sh dist/kubernetes.swagger.json https://raw.githubusercontent.com/kubernetes/kubernetes/v1.23.3/api/openapi-spec/swagger.json - -pkg/apiclient/_.secondary.swagger.json: hack/swagger/secondaryswaggergen.go pkg/apis/workflow/v1alpha1/openapi_generated.go dist/kubernetes.swagger.json - rm -Rf v3 vendor - # We have `hack/swagger` so that most hack script do not depend on the whole code base and are therefore slow. - go run ./hack/swagger secondaryswaggergen - -# we always ignore the conflicts, so lets automated figuring out how many there will be and just use that -dist/swagger-conflicts: $(GOPATH)/bin/swagger $(SWAGGER_FILES) - swagger mixin $(SWAGGER_FILES) 2>&1 | grep -c skipping > dist/swagger-conflicts || true - -dist/mixed.swagger.json: $(GOPATH)/bin/swagger $(SWAGGER_FILES) dist/swagger-conflicts - swagger mixin -c $(shell cat dist/swagger-conflicts) $(SWAGGER_FILES) -o dist/mixed.swagger.json - -dist/swaggifed.swagger.json: dist/mixed.swagger.json hack/swaggify.sh - cat dist/mixed.swagger.json | ./hack/swaggify.sh > dist/swaggifed.swagger.json - -dist/kubeified.swagger.json: dist/swaggifed.swagger.json dist/kubernetes.swagger.json - go run ./hack/swagger kubeifyswagger dist/swaggifed.swagger.json dist/kubeified.swagger.json - -dist/swagger.0.json: $(GOPATH)/bin/swagger dist/kubeified.swagger.json - swagger flatten --with-flatten minimal --with-flatten remove-unused dist/kubeified.swagger.json -o dist/swagger.0.json - -api/openapi-spec/swagger.json: $(GOPATH)/bin/swagger dist/swagger.0.json - swagger flatten --with-flatten remove-unused dist/swagger.0.json -o api/openapi-spec/swagger.json - -api/jsonschema/schema.json: api/openapi-spec/swagger.json hack/jsonschema/main.go - go run ./hack/jsonschema - -go-diagrams/diagram.dot: ./hack/diagram/main.go - rm -Rf go-diagrams - go run ./hack/diagram - -docs/assets/diagram.png: go-diagrams/diagram.dot - cd go-diagrams && dot -Tpng diagram.dot -o ../docs/assets/diagram.png - -docs/fields.md: api/openapi-spec/swagger.json $(shell find examples -type f) hack/docgen.go - env ARGO_SECURE=false ARGO_INSECURE_SKIP_VERIFY=false ARGO_SERVER= ARGO_INSTANCEID= go run ./hack docgen - -# generates several other files -docs/cli/argo.md: $(CLI_PKGS) go.sum server/static/files.go hack/cli/main.go - go run ./hack/cli - -# docs - -/usr/local/bin/mdspell: - npm i -g markdown-spellcheck - -.PHONY: docs-spellcheck -docs-spellcheck: /usr/local/bin/mdspell - # check docs for spelling mistakes - mdspell --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name fields.md -not -name upgrading.md -not -name executor_swagger.md -not -path '*/cli/*') - -/usr/local/bin/markdown-link-check: - npm i -g markdown-link-check - -.PHONY: docs-linkcheck -docs-linkcheck: /usr/local/bin/markdown-link-check - # check docs for broken links - markdown-link-check -q -c .mlc_config.json $(shell find docs -name '*.md' -not -name fields.md -not -name executor_swagger.md) - -/usr/local/bin/markdownlint: - npm i -g markdownlint-cli - -.PHONY: docs-lint -docs-lint: /usr/local/bin/markdownlint - # lint docs - markdownlint docs --fix --ignore docs/fields.md --ignore docs/executor_swagger.md --ignore docs/cli --ignore docs/walk-through/the-structure-of-workflow-specs.md - -/usr/local/bin/mkdocs: - python -m pip install mkdocs==1.2.4 mkdocs_material==8.1.9 mkdocs-spellcheck==0.2.1 - -.PHONY: docs -docs: /usr/local/bin/mkdocs \ - docs-spellcheck \ - docs-lint \ - docs-linkcheck - # check environment-variables.md contains all variables mentioned in the code - ./hack/check-env-doc.sh - # check all docs are listed in mkdocs.yml - ./hack/check-mkdocs.sh - # build the docs - mkdocs build - # fix the fields.md document - go run -tags fields ./hack parseexamples - # tell the user the fastest way to edit docs - @echo "ℹ️ If you want to preview you docs, open site/index.html. If you want to edit them with hot-reload, run 'make docs-serve' to start mkdocs on port 8000" - -.PHONY: docs-serve -docs-serve: docs - mkdocs serve - -# pre-commit checks - -.git/hooks/%: hack/git/hooks/% - @mkdir -p .git/hooks - cp hack/git/hooks/$* .git/hooks/$* - -.PHONY: githooks -githooks: .git/hooks/pre-commit .git/hooks/commit-msg - -.PHONY: pre-commit -pre-commit: codegen lint docs - # marker file, based on it's modification time, we know how long ago this target was run - touch dist/pre-commit - -# release - -release-notes: /dev/null - version=$(VERSION) envsubst < hack/release-notes.md > release-notes - -.PHONY: checksums -checksums: - for f in ./dist/argo-*.gz; do openssl dgst -sha256 "$$f" | awk ' { print $$2 }' > "$$f".sha256 ; done diff --git a/vendor/github.com/argoproj/argo-workflows/v3/OWNERS b/vendor/github.com/argoproj/argo-workflows/v3/OWNERS deleted file mode 100644 index 1b1c0889d15..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/OWNERS +++ /dev/null @@ -1,16 +0,0 @@ -owners: -- alexec -- sarabala1979 - -reviewers: -- tczhao -- xianlubird - -approvers: -- alexec -- alexmt -- dtaniwaki -- edlee2121 -- jessesuen -- simster7 -- terrytangyuan diff --git a/vendor/github.com/argoproj/argo-workflows/v3/Procfile b/vendor/github.com/argoproj/argo-workflows/v3/Procfile deleted file mode 100644 index bdf3714af7b..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/Procfile +++ /dev/null @@ -1,4 +0,0 @@ -controller: [ "$CTRL" = "true" ] && ./hack/free-port.sh 9090 && ARGO_EXECUTOR_PLUGINS=${PLUGINS} ARGO_REMOVE_PVC_PROTECTION_FINALIZER=true ARGO_PROGRESS_PATCH_TICK_DURATION=7s DEFAULT_REQUEUE_TIME=${DEFAULT_REQUEUE_TIME} LEADER_ELECTION_IDENTITY=local ALWAYS_OFFLOAD_NODE_STATUS=${ALWAYS_OFFLOAD_NODE_STATUS} OFFLOAD_NODE_STATUS_TTL=30s WORKFLOW_GC_PERIOD=30s UPPERIO_DB_DEBUG=${UPPERIO_DB_DEBUG} ARCHIVED_WORKFLOW_GC_PERIOD=30s ./dist/workflow-controller --executor-image ${IMAGE_NAMESPACE}/argoexec:${VERSION} --namespaced=${NAMESPACED} --namespace ${NAMESPACE} --managed-namespace=${MANAGED_NAMESPACE} --loglevel ${LOG_LEVEL} -argo-server: [ "$API" = "true" ] &&./hack/free-port.sh 2746 && UPPERIO_DB_DEBUG=${UPPERIO_DB_DEBUG} ./dist/argo --loglevel ${LOG_LEVEL} server --namespaced=${NAMESPACED} --namespace ${NAMESPACE} --auth-mode ${AUTH_MODE} --secure=$SECURE --x-frame-options=SAMEORIGIN -ui: [ "$UI" = "true" ] && ./hack/free-port.sh 8080 && yarn --cwd ui install && yarn --cwd ui start -logs: [ "$LOGS" = "true" ] && make logs \ No newline at end of file diff --git a/vendor/github.com/argoproj/argo-workflows/v3/README.md b/vendor/github.com/argoproj/argo-workflows/v3/README.md deleted file mode 100644 index b569abe6244..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/README.md +++ /dev/null @@ -1,165 +0,0 @@ -[![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) -[![CI](https://github.com/argoproj/argo-workflows/workflows/CI/badge.svg)](https://github.com/argoproj/argo-workflows/actions?query=event%3Apush+branch%3Amaster) -[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830) -[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows) -[![Twitter Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://twitter.com/argoproj) - -## What is Argo Workflows? - -Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. Argo -Workflows is implemented as a Kubernetes CRD (Custom Resource Definition). - -* Define workflows where each step in the workflow is a container. -* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic - graph (DAG). -* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo - Workflows on Kubernetes. - -Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) hosted project. - -[![Argo Workflows in 5 minutes](https://img.youtube.com/vi/TZgLkCFQ2tk/0.jpg)](https://www.youtube.com/watch?v=TZgLkCFQ2tk) - -## Use Cases - -* Machine Learning pipelines -* Data and batch processing -* ETL -* Infrastructure automation -* CI/CD - -## Why Argo Workflows? - -* Argo Workflows is the most popular workflow execution engine for Kubernetes. -* It can run 1000s of workflows a day, each with 1000s of concurrent tasks. -* Our users say it is lighter-weight, faster, more powerful, and easier to use -* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based - environments. -* Cloud agnostic and can run on any Kubernetes cluster. - -[Read what people said in our latest survey](https://blog.argoproj.io/argo-workflows-2021-survey-results-d6fa890030ee) - -## Try Argo Workflows - -[Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo) (login using Github) - -![Screenshot](docs/assets/screenshot.png) - -## Documentation - -[View the docs](https://argoproj.github.io/argo-workflows/) - -## Ecosystem - -Just some of the projects that use or rely on Argo Workflows: - -* [Argo Events](https://github.com/argoproj/argo-events) -* [Couler](https://github.com/couler-proj/couler) -* [Katib](https://github.com/kubeflow/katib) -* [Kedro](https://kedro.readthedocs.io/en/stable/) -* [Kubeflow Pipelines](https://github.com/kubeflow/pipelines) -* [Netflix Metaflow](https://metaflow.org) -* [Onepanel](https://www.onepanel.ai/) -* [Ploomber](https://github.com/ploomber/ploomber) -* [Seldon](https://github.com/SeldonIO/seldon-core) -* [SQLFlow](https://github.com/sql-machine-learning/sqlflow) -* [Orchest](https://github.com/orchest/orchest/) - -## Client Libraries - -Check out our [Java, Golang and Python clients](docs/client-libraries.md). - -## Quickstart - -The following commands install Argo Workflows as well as some commmonly used components: - -```bash -kubectl create ns argo -kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo-workflows/master/manifests/quick-start-postgres.yaml -``` - -> **These manifests are intended to help you get started quickly. They contain hard-coded passwords that are publicly available and are not suitable in production.** - -## Who uses Argo Workflows? - -[Official Argo Workflows user list](USERS.md) - -## Documentation - -* [Get started here](docs/quick-start.md) -* [How to write Argo Workflow specs](https://github.com/argoproj/argo-workflows/blob/master/examples/README.md) -* [How to configure your artifact repository](docs/configure-artifact-repository.md) - -## Features - -* UI to visualize and manage Workflows -* Artifact support (S3, Artifactory, Alibaba Cloud OSS, Azure Blob Storage, HTTP, Git, GCS, raw) -* Workflow templating to store commonly used Workflows in the cluster -* Archiving Workflows after executing for later access -* Scheduled workflows using cron -* Server interface with REST API (HTTP and GRPC) -* DAG or Steps based declaration of workflows -* Step level input & outputs (artifacts/parameters) -* Loops -* Parameterization -* Conditionals -* Timeouts (step & workflow level) -* Retry (step & workflow level) -* Resubmit (memoized) -* Suspend & Resume -* Cancellation -* K8s resource orchestration -* Exit Hooks (notifications, cleanup) -* Garbage collection of completed workflow -* Scheduling (affinity/tolerations/node selectors) -* Volumes (ephemeral/existing) -* Parallelism limits -* Daemoned steps -* DinD (docker-in-docker) -* Script steps -* Event emission -* Prometheus metrics -* Multiple executors -* Multiple pod and workflow garbage collection strategies -* Automatically calculated resource usage per step -* Java/Golang/Python SDKs -* Pod Disruption Budget support -* Single-sign on (OAuth2/OIDC) -* Webhook triggering -* CLI -* Out-of-the box and custom Prometheus metrics -* Windows container support -* Embedded widgets -* Multiplex log viewer - -## Community Meetings - -We host monthly community meetings where we and the community showcase demos and discuss the current and future state of -the project. Feel free to join us! For Community Meeting information, minutes and recordings -please [see here](https://bit.ly/argo-wf-cmty-mtng). - -Participation in the Argo Workflows project is governed by -the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) - -## Community Blogs and Presentations - -* [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo) -* [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY) -* [Argo Workflows and Pipelines - CI/CD, Machine Learning, and Other Kubernetes Workflows](https://youtu.be/UMaivwrAyTA) -* [Argo Ansible role: Provisioning Argo Workflows on OpenShift](https://medium.com/@marekermk/provisioning-argo-on-openshift-with-ansible-and-kustomize-340a1fda8b50) -* [Argo Workflows vs Apache Airflow](http://bit.ly/30YNIvT) -* [CI/CD with Argo on Kubernetes](https://medium.com/@bouwe.ceunen/ci-cd-with-argo-on-kubernetes-28c1a99616a9) -* [Running Argo Workflows Across Multiple Kubernetes Clusters](https://admiralty.io/blog/running-argo-workflows-across-multiple-kubernetes-clusters/) -* [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/) -* [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) -* [Argo integration review](http://dev.matt.hillsdon.net/2018/03/24/argo-integration-review.html) -* TGI Kubernetes with Joe Beda: [Argo workflow system](https://www.youtube.com/watch?v=M_rxPPLG8pU&start=859) - -## Project Resources - -* Argo GitHub: https://github.com/argoproj -* Argo Website: https://argoproj.github.io/ -* Argo Slack: [click here to join](https://argoproj.github.io/community/join-slack) - -## Security - -See [SECURITY.md](SECURITY.md). diff --git a/vendor/github.com/argoproj/argo-workflows/v3/SECURITY.md b/vendor/github.com/argoproj/argo-workflows/v3/SECURITY.md deleted file mode 100644 index acb4b3ad0af..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/SECURITY.md +++ /dev/null @@ -1,30 +0,0 @@ -# Security - -## Reporting a Vulnerability - -If you find a security related bug in Argo Workflows, we kindly ask you for responsible -disclosure and for giving us appropriate time to react, analyze and develop a -fix to mitigate the found security vulnerability. - -Please report vulnerabilities by e-mail to the following address: - -* cncf-argo-security@lists.cncf.io - -All vulnerabilities and associated information will be treated with full confidentiality. - -## Public Disclosure - -Security vulnerabilities will be disclosed via [release notes](docs/releasing.md) and using the -[GitHub Security Advisories](https://github.com/argoproj/argo-workflows/security/advisories) -feature to keep our community well informed, and will credit you for your findings (unless you prefer to stay anonymous, of course). - -## Vulnerability Scanning - -See [static code analysis](docs/static-code-analysis.md). - -## Securing Argo Workflows - -See [docs/security.md](docs/security.md) for information about securing your Argo Workflows instance. - - - diff --git a/vendor/github.com/argoproj/argo-workflows/v3/USERS.md b/vendor/github.com/argoproj/argo-workflows/v3/USERS.md deleted file mode 100644 index 1f51b18b2c1..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/USERS.md +++ /dev/null @@ -1,199 +0,0 @@ -## Argo Workflows User Community Surveys & Feedback - -Please find [here](https://github.com/argoproj/argoproj/blob/master/community/user_surveys/ArgoWorkflows2020SurveySummary.pdf) Argo Workflows user community 2020 survey results and the 2021 results [here](https://blog.argoproj.io/argo-workflows-2021-survey-results-d6fa890030ee?gi=857daaa1faa9). - -## Who uses Argo Workflows? - -As the Argo Community grows, we'd like to keep track of our users. Please send a PR with your organization or project name in the following sections. - -### Organizations Using Argo - -Currently, the following organizations are **officially** using Argo Workflows: - -1. [23mofang](https://www.23mofang.com/) -1. [4intelligence](https://4intelligence.com.br/) -1. [7shifts](https://www.7shifts.com) -1. [Acquia](https://www.acquia.com/) -1. [Adevinta](https://www.adevinta.com/) -1. [Admiralty](https://admiralty.io/) -1. [Adobe](https://www.adobe.com/) -1. [AKRA](https://www.akra.de/) -1. [Akuity](https://akuity.io/) -1. [Alibaba Cloud](https://www.alibabacloud.com/about) -1. [Alibaba Group](https://www.alibabagroup.com/) -1. [Ant Group](https://www.antgroup.com/) -1. [AppDirect](https://www.appdirect.com/) -1. [Arabesque](https://www.arabesque.com/) -1. [Argonaut](https://www.argonaut.dev/) -1. [ArthurAI](https://arthur.ai/) -1. [Astraea](https://astraea.earth/) -1. [BasisAI](https://basis-ai.com/) -1. [BEI.RE](https://www.bei.re/) -1. [bimspot](https://bimspot.io) -1. [BioBox Analytics](https://biobox.io) -1. [BlackRock](https://www.blackrock.com/) -1. [Bloomberg](https://www.bloomberg.com/) -1. [bonprix](https://en.bonprix.de/corporate/our-company/) -1. [Botkeeper](https://www.botkeeper.com/) -1. [ByteDance](https://www.bytedance.com/en/) -1. [Canva](https://www.canva.com/) -1. [Capact](https://capact.io/) -1. [Capital One](https://www.capitalone.com/tech/) -1. [Carrefour](https://www.carrefour.com/) -1. [CarTrack](https://www.cartrack.com/) -1. [Casavo](https://casavo.com/) -1. [CCRi](https://www.ccri.com/) -1. [Cisco](https://www.cisco.com/) -1. [CloudSeeds](https://www.cloudseeds.de/) -1. [Codec](https://www.codec.ai/) -1. [Codefresh](https://www.codefresh.io/) -1. [Commodus Tech](https://www.commodus.tech) -1. [Concierge Render](https://www.conciergerender.com) -1. [Cookpad](https://cookpad.com/) -1. [CoreFiling](https://www.corefiling.com/) -1. [CoreWeave Cloud](https://www.coreweave.com) -1. [Cratejoy](https://www.cratejoy.com/) -1. [Cruise](https://getcruise.com/) -1. [CVision AI](https://www.cvisionai.com) -1. [CyberAgent](https://www.cyberagent.co.jp/en/) -1. [Cyrus Biotechnology](https://cyrusbio.com/) -1. [Data4Risk](https://www.data4risk.com/) -1. [Datable](https://datable.jp/) -1. [Datadog](https://www.datadoghq.com/) -1. [DataRobot](https://www.datarobot.com/) -1. [DataStax](https://www.datastax.com/) -1. [DDEV](https://www.ddev.com/) -1. [DevSamurai](https://www.devsamurai.com/) -1. [Devtron Labs](https://github.com/devtron-labs/devtron) -1. [DLR](https://www.dlr.de/eoc/) -1. [Dyno Therapeutics](https://dynotx.com) -1. [EBSCO Information Services](https://www.ebsco.com/) -1. [Enso Finance](https://enso.finance/) -1. [Equinor](https://www.equinor.com/) -1. [Elastic](https://www.elastic.co/) -1. [Fairwinds](https://fairwinds.com/) -1. [FOLIO](http://corp.folio-sec.com/) -1. [FreeWheel](https://freewheel.com/) -1. [Fynd Trak](https://trak.fynd.com/) -1. [Galixir](https://www.galixir.com/) -1. [Gardener](https://gardener.cloud/) -2. [Gepardec](https://gepardec.com/) -1. [GitHub](https://github.com/) -1. [Gitpod](https://www.gitpod.io/) -1. [Gladly](https://gladly.com/) -1. [Gllue](https://gllue.com/) -1. [Glovo](https://www.glovoapp.com) -1. [Google](https://www.google.com/intl/en/about/our-company/) -1. [Graviti](https://www.graviti.com) -1. [Greenhouse](https://greenhouse.io) -1. [H2O.ai](https://h2o.ai/) -1. [Habx](https://www.habx.com/) -1. [Helio](https://helio.exchange) -1. [Hemisphere Digital](https://hemisphere.digital) -1. [HOVER](https://hover.to) -1. [HSBC](https://hsbc.com) -1. [IBM](https://ibm.com) -1. [Iflytek](https://www.iflytek.com/) -1. [Inceptio Technology](https://www.inceptio.ai/) -1. [incrmntal](https://incrmntal.com/) -1. [InsideBoard](https://www.insideboard.com) -1. [Interline Technologies](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/) -1. [Intralinks](https://www.intralinks.com/) -1. [Intuit](https://www.intuit.com/) -1. [InVision](https://www.invisionapp.com/) -1. [İşbank](https://www.isbank.com.tr/en) -1. [Jungle](https://www.jungle.ai/) -1. [Karius](https://www.kariusdx.com/) -1. [KarrotPay](https://www.daangnpay.com/) -1. [Kasa](https://www.kasa.co.kr/) -1. [KintoHub](https://www.kintohub.com/) -1. [Localytics](https://www.localytics.com/) -1. [Lumin Digital](https://lumindigital.com/) -1. [Maersk](https://www.maersk.com/solutions/digital-solutions) -1. [MariaDB](https://mariadb.com/) -1. [Marmalade](https://www.marmalade.co/) -1. [Max Kelsen](https://maxkelsen.com/) -1. [Microba](https://www.microba.com/) -1. [Microblink](https://microblink.com/) -1. [Mirantis](https://mirantis.com/) -1. [Mixpanel](https://mixpanel.com) -1. [Motus](https://www.motus.com) -1. [New Relic](https://newrelic.com/) -1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/) -1. [Norwegian Refugee Council](https://www.nrc.no/) -1. [nrd.io](https://nrd.io/) -1. [NVIDIA](https://www.nvidia.com/) -1. [One Concern](https://oneconcern.com/) -1. [Onepanel](https://docs.onepanel.ai) -1. [Oracle](https://www.oracle.com/) -1. [Orchest](https://www.orchest.io/) -1. [OVH](https://www.ovh.com/) -1. [PathAI](https://www.pathai.com) -1. [PDOK](https://www.pdok.nl/) -1. [Peak AI](https://www.peak.ai/) -1. [Pipekit](https://pipeit.io) -1. [Pismo](https://pismo.io/) -1. [Polarpoint.io](https://polarpoint.io) -1. [Pollination](https://pollination.cloud) -1. [Preferred Networks](https://www.preferred-networks.jp/en/) -1. [Promaton](https://www.promaton.com/) -1. [Prudential](https://www.prudential.com.sg/) -1. [Quantibio](http://quantibio.com/us/en/) -1. [QuantumBlack](https://quantumblack.com/) -1. [Raccoon Digital Marketing](https://raccoon.ag/) -1. [Ramboll Shair](https://ramboll-shair.com/) -1. [Ravelin](https://www.ravelin.com/) -1. [Reco](https://reco.ai) -1. [Red Hat](https://www.redhat.com/en) -1. [Reserved AI](https://reserved.ai/) -1. [Riskified](https://www.riskified.com) -1. [Robinhood](https://robinhood.com/) -1. [Sage (Sage AI Labs)](https://sage.com/) -1. [SAP Concur](https://www.concur.com/) -1. [SAP Fieldglass](https://www.fieldglass.com/) -1. [SAP Hybris](https://cx.sap.com/) -1. [SAS](https://www.sas.com/) -1. [Schlumberger](https://slb.com/) -1. [Securitas](https://securitas.com/) -1. [SegmentStream](https://segmentstream.com) -1. [Sendible](https://sendible.com) -1. [Sidecar Technologies](https://hello.getsidecar.com/) -1. [smallcase](https://smallcase.com/) -1. [Softonic](https://hello.softonic.com/) -1. [Sohu](https://www.sohu.com/) -1. [Stillwater Supercomputing, Inc](http://www.stillwater-sc.com/) -1. [strongDM](https://www.strongdm.com/) -1. [Styra](https://www.styra.com/) -1. [Splunk](https://www.splunk.com/) -1. [Sutpc](http://www.sutpc.com/) -1. [Threekit](https://www.threekit.com/) -1. [Tiger Analytics](https://www.tigeranalytics.com/) -1. [Tradeshift](https://tradeshift.com/) -1. [Trendyol](https://trendyol.com) -1. [Tulip](https://tulip.com/) -1. [Ubie](https://ubie.life/) -1. [UFirstGroup](https://www.ufirstgroup.com) -1. [Vispera](https://www.vispera.co) -1. [VMware](https://www.vmware.com/) -1. [Voyager](https://investvoyager.com/) -1. [Wavefront](https://www.wavefront.com/) -1. [Wellcome Trust](https://wellcome.ac.uk/) -1. [WooliesX](https://wooliesx.com.au/) -1. [Woolworths Group](https://www.woolworthsgroup.com.au/) -1. [Workiva](https://www.workiva.com/) -1. [Xueqiu](https://www.xueqiu.com/) -1. [Yubo](https://www.yubo.live/) -1. [Zhihu](https://www.zhihu.com/) - -### Projects Using Argo - -In addition, the following projects are **officially** using Argo Workflows: - -1. [Couler](https://github.com/couler-proj/couler) -1. [Hera Workflows](https://github.com/argoproj-labs/hera-workflows) -1. [Kubeflow](https://www.kubeflow.org/) -1. [Metaflow](https://www.metaflow.org) -1. [Onepanel](https://github.com/onepanelio/onepanel) -1. [SQLFlow](https://github.com/sql-machine-learning/sqlflow) -1. [BisQue](https://github.com/UCSB-VRL/bisqueUCSB) -1. [Tator](https://www.tator.io) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/config.go b/vendor/github.com/argoproj/argo-workflows/v3/config/config.go deleted file mode 100644 index 96f34ea584b..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/config.go +++ /dev/null @@ -1,292 +0,0 @@ -package config - -import ( - "fmt" - "math" - "net/url" - "time" - - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -type ResourceRateLimit struct { - Limit float64 `json:"limit"` - Burst int `json:"burst"` -} - -// Config contain the configuration settings for the workflow controller -type Config struct { - - // NodeEvents configures how node events are emitted - NodeEvents NodeEvents `json:"nodeEvents,omitempty"` - - // Executor holds container customizations for the executor to use when running pods - Executor *apiv1.Container `json:"executor,omitempty"` - - // MainContainer holds container customization for the main container - MainContainer *apiv1.Container `json:"mainContainer,omitempty"` - - // KubeConfig specifies a kube config file for the wait & init containers - KubeConfig *KubeConfig `json:"kubeConfig,omitempty"` - - // ArtifactRepository contains the default location of an artifact repository for container artifacts - ArtifactRepository wfv1.ArtifactRepository `json:"artifactRepository,omitempty"` - - // Namespace is a label selector filter to limit the controller's watch to a specific namespace - Namespace string `json:"namespace,omitempty"` - - // InstanceID is a label selector to limit the controller's watch to a specific instance. It - // contains an arbitrary value that is carried forward into its pod labels, under the key - // workflows.argoproj.io/controller-instanceid, for the purposes of workflow segregation. This - // enables a controller to only receive workflow and pod events that it is interested about, - // in order to support multiple controllers in a single cluster, and ultimately allows the - // controller itself to be bundled as part of a higher level application. If omitted, the - // controller watches workflows and pods that *are not* labeled with an instance id. - InstanceID string `json:"instanceID,omitempty"` - - // MetricsConfig specifies configuration for metrics emission. Metrics are enabled and emitted on localhost:9090/metrics - // by default. - MetricsConfig MetricsConfig `json:"metricsConfig,omitempty"` - - // TelemetryConfig specifies configuration for telemetry emission. Telemetry is enabled and emitted in the same endpoint - // as metrics by default, but can be overridden using this config. - TelemetryConfig MetricsConfig `json:"telemetryConfig,omitempty"` - - // Parallelism limits the max total parallel workflows that can execute at the same time - Parallelism int `json:"parallelism,omitempty"` - - // NamespaceParallelism limits the max workflows that can execute at the same time in a namespace - NamespaceParallelism int `json:"namespaceParallelism,omitempty"` - - // ResourceRateLimit limits the rate at which pods are created - ResourceRateLimit *ResourceRateLimit `json:"resourceRateLimit,omitempty"` - - // Persistence contains the workflow persistence DB configuration - Persistence *PersistConfig `json:"persistence,omitempty"` - - // Links to related apps. - Links []*wfv1.Link `json:"links,omitempty"` - - // WorkflowDefaults are values that will apply to all Workflows from this controller, unless overridden on the Workflow-level - WorkflowDefaults *wfv1.Workflow `json:"workflowDefaults,omitempty"` - - // PodSpecLogStrategy enables the logging of podspec on controller log. - PodSpecLogStrategy PodSpecLogStrategy `json:"podSpecLogStrategy,omitempty"` - - // PodGCGracePeriodSeconds specifies the duration in seconds before a terminating pod is forcefully killed. - // Value must be non-negative integer. A zero value indicates that the pod will be forcefully terminated immediately. - // Defaults to the Kubernetes default of 30 seconds. - PodGCGracePeriodSeconds *int64 `json:"podGCGracePeriodSeconds,omitempty"` - - // PodGCDeleteDelayDuration specifies the duration in seconds before the pods in the GC queue get deleted. - // Value must be non-negative integer. A zero value indicates that the pods will be deleted immediately. - // Defaults to 5 seconds. - PodGCDeleteDelayDuration *metav1.Duration `json:"podGCDeleteDelayDuration,omitempty"` - - // WorkflowRestrictions restricts the controller to executing Workflows that meet certain restrictions - WorkflowRestrictions *WorkflowRestrictions `json:"workflowRestrictions,omitempty"` - - // Adding configurable initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC. - InitialDelay metav1.Duration `json:"initialDelay,omitempty"` - - // The command/args for each image, needed when the command is not specified and the emissary executor is used. - // https://argoproj.github.io/argo-workflows/workflow-executors/#emissary-emissary - Images map[string]Image `json:"images,omitempty"` - - RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` - - // NavColor is an ui navigation bar background color - NavColor string `json:"navColor,omitempty"` - - // SSO in settings for single-sign on - SSO SSOConfig `json:"sso,omitempty"` -} - -func (c Config) GetExecutor() *apiv1.Container { - if c.Executor != nil { - return c.Executor - } - return &apiv1.Container{} -} - -func (c Config) GetResourceRateLimit() ResourceRateLimit { - if c.ResourceRateLimit != nil { - return *c.ResourceRateLimit - } - return ResourceRateLimit{ - Limit: math.MaxFloat32, - Burst: math.MaxInt32, - } -} - -func (c Config) GetPodGCDeleteDelayDuration() time.Duration { - if c.PodGCDeleteDelayDuration == nil { - return 5 * time.Second - } - - return c.PodGCDeleteDelayDuration.Duration -} - -func (c Config) ValidateProtocol(inputProtocol string, allowedProtocol []string) error { - for _, protocol := range allowedProtocol { - if inputProtocol == protocol { - return nil - } - } - return fmt.Errorf("protocol %s is not allowed", inputProtocol) -} - -func (c *Config) Sanitize(allowedProtocol []string) error { - links := c.Links - - for _, link := range links { - u, err := url.Parse(link.URL) - if err != nil { - return err - } - err = c.ValidateProtocol(u.Scheme, allowedProtocol) - if err != nil { - return err - } - link.URL = u.String() // reassembles the URL into a valid URL string - } - return nil -} - -// PodSpecLogStrategy contains the configuration for logging the pod spec in controller log for debugging purpose -type PodSpecLogStrategy struct { - FailedPod bool `json:"failedPod,omitempty"` - AllPods bool `json:"allPods,omitempty"` -} - -// KubeConfig is used for wait & init sidecar containers to communicate with a k8s apiserver by a outofcluster method, -// it is used when the workflow controller is in a different cluster with the workflow workloads -type KubeConfig struct { - // SecretName of the kubeconfig secret - // may not be empty if kuebConfig specified - SecretName string `json:"secretName"` - // SecretKey of the kubeconfig in the secret - // may not be empty if kubeConfig specified - SecretKey string `json:"secretKey"` - // VolumeName of kubeconfig, default to 'kubeconfig' - VolumeName string `json:"volumeName,omitempty"` - // MountPath of the kubeconfig secret, default to '/kube/config' - MountPath string `json:"mountPath,omitempty"` -} - -type PersistConfig struct { - NodeStatusOffload bool `json:"nodeStatusOffLoad,omitempty"` - // Archive workflows to persistence. - Archive bool `json:"archive,omitempty"` - // ArchivelabelSelector holds LabelSelector to determine workflow persistence. - ArchiveLabelSelector *metav1.LabelSelector `json:"archiveLabelSelector,omitempty"` - // in days - ArchiveTTL TTL `json:"archiveTTL,omitempty"` - ClusterName string `json:"clusterName,omitempty"` - ConnectionPool *ConnectionPool `json:"connectionPool,omitempty"` - PostgreSQL *PostgreSQLConfig `json:"postgresql,omitempty"` - MySQL *MySQLConfig `json:"mysql,omitempty"` - SkipMigration bool `json:"skipMigration,omitempty"` -} - -func (c PersistConfig) GetArchiveLabelSelector() (labels.Selector, error) { - if c.ArchiveLabelSelector == nil { - return labels.Everything(), nil - } - return metav1.LabelSelectorAsSelector(c.ArchiveLabelSelector) -} - -func (c PersistConfig) GetClusterName() string { - if c.ClusterName != "" { - return c.ClusterName - } - return "default" -} - -type ConnectionPool struct { - MaxIdleConns int `json:"maxIdleConns,omitempty"` - MaxOpenConns int `json:"maxOpenConns,omitempty"` - ConnMaxLifetime TTL `json:"connMaxLifetime,omitempty"` -} - -type DatabaseConfig struct { - Host string `json:"host"` - Port int `json:"port,omitempty"` - Database string `json:"database"` - TableName string `json:"tableName,omitempty"` - UsernameSecret apiv1.SecretKeySelector `json:"userNameSecret,omitempty"` - PasswordSecret apiv1.SecretKeySelector `json:"passwordSecret,omitempty"` -} - -func (c DatabaseConfig) GetHostname() string { - if c.Port == 0 { - return c.Host - } - return fmt.Sprintf("%s:%v", c.Host, c.Port) -} - -type PostgreSQLConfig struct { - DatabaseConfig - SSL bool `json:"ssl,omitempty"` - SSLMode string `json:"sslMode,omitempty"` -} - -type MySQLConfig struct { - DatabaseConfig - Options map[string]string `json:"options,omitempty"` -} - -// MetricsConfig defines a config for a metrics server -type MetricsConfig struct { - // Enabled controls metric emission. Default is true, set "enabled: false" to turn off - Enabled *bool `json:"enabled,omitempty"` - // DisableLegacy turns off legacy metrics - // DEPRECATED: Legacy metrics are now removed, this field is ignored - DisableLegacy bool `json:"disableLegacy,omitempty"` - // MetricsTTL sets how often custom metrics are cleared from memory - MetricsTTL TTL `json:"metricsTTL,omitempty"` - // Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics" - Path string `json:"path,omitempty"` - // Port is the port where metrics are emitted. Default is "9090" - Port int `json:"port,omitempty"` - // IgnoreErrors is a flag that instructs prometheus to ignore metric emission errors - IgnoreErrors bool `json:"ignoreErrors,omitempty"` - // Secure is a flag that starts the metrics servers using TLS - Secure *bool `json:"secure,omitempty"` -} - -func (mc MetricsConfig) GetSecure(defaultValue bool) bool { - if mc.Secure != nil { - return *mc.Secure - } - return defaultValue -} - -type WorkflowRestrictions struct { - TemplateReferencing TemplateReferencing `json:"templateReferencing,omitempty"` -} - -type TemplateReferencing string - -const ( - TemplateReferencingStrict TemplateReferencing = "Strict" - TemplateReferencingSecure TemplateReferencing = "Secure" -) - -func (req *WorkflowRestrictions) MustUseReference() bool { - if req == nil { - return false - } - return req.TemplateReferencing == TemplateReferencingStrict || req.TemplateReferencing == TemplateReferencingSecure -} - -func (req *WorkflowRestrictions) MustNotChangeSpec() bool { - if req == nil { - return false - } - return req.TemplateReferencing == TemplateReferencingSecure -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/controller.go b/vendor/github.com/argoproj/argo-workflows/v3/config/controller.go deleted file mode 100644 index 66b916e55c7..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/controller.go +++ /dev/null @@ -1,62 +0,0 @@ -package config - -import ( - "context" - "fmt" - "strings" - - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/yaml" -) - -type Controller interface { - Get(context.Context) (*Config, error) -} - -type controller struct { - namespace string - // name of the config map - configMap string - kubeclientset kubernetes.Interface -} - -func NewController(namespace, name string, kubeclientset kubernetes.Interface) Controller { - return &controller{ - namespace: namespace, - configMap: name, - kubeclientset: kubeclientset, - } -} - -func parseConfigMap(cm *apiv1.ConfigMap, config *Config) error { - // The key in the configmap to retrieve workflow configuration from. - // Content encoding is expected to be YAML. - rawConfig, ok := cm.Data["config"] - if ok && len(cm.Data) != 1 { - return fmt.Errorf("if you have an item in your config map named 'config', you must only have one item") - } - if !ok { - for name, value := range cm.Data { - if strings.Contains(value, "\n") { - // this mucky code indents with two spaces - rawConfig = rawConfig + name + ":\n " + strings.Join(strings.Split(strings.Trim(value, "\n"), "\n"), "\n ") + "\n" - } else { - rawConfig = rawConfig + name + ": " + value + "\n" - } - } - } - err := yaml.UnmarshalStrict([]byte(rawConfig), config) - return err -} - -func (cc *controller) Get(ctx context.Context) (*Config, error) { - config := &Config{} - cmClient := cc.kubeclientset.CoreV1().ConfigMaps(cc.namespace) - cm, err := cmClient.Get(ctx, cc.configMap, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return config, parseConfigMap(cm, config) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/image.go b/vendor/github.com/argoproj/argo-workflows/v3/config/image.go deleted file mode 100644 index be061068da4..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/image.go +++ /dev/null @@ -1,6 +0,0 @@ -package config - -type Image struct { - Entrypoint []string `json:"entrypoint,omitempty"` - Cmd []string `json:"cmd,omitempty"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/node_events.go b/vendor/github.com/argoproj/argo-workflows/v3/config/node_events.go deleted file mode 100644 index bda0a7ffef2..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/node_events.go +++ /dev/null @@ -1,10 +0,0 @@ -package config - -type NodeEvents struct { - Enabled *bool `json:"enabled,omitempty"` - SendAsPod bool `json:"sendAsPod,omitempty"` -} - -func (e NodeEvents) IsEnabled() bool { - return e.Enabled == nil || *e.Enabled -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/rbac.go b/vendor/github.com/argoproj/argo-workflows/v3/config/rbac.go deleted file mode 100644 index 8cdf3e8d325..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/rbac.go +++ /dev/null @@ -1,9 +0,0 @@ -package config - -type RBACConfig struct { - Enabled bool `json:"enabled,omitempty"` -} - -func (c *RBACConfig) IsEnabled() bool { - return c != nil && c.Enabled -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/retention_policy.go b/vendor/github.com/argoproj/argo-workflows/v3/config/retention_policy.go deleted file mode 100644 index 564c082b043..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/retention_policy.go +++ /dev/null @@ -1,7 +0,0 @@ -package config - -type RetentionPolicy struct { - Completed int `json:"completed,omitempty"` - Failed int `json:"failed,omitempty"` - Errored int `json:"errored,omitempty"` -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/sso.go b/vendor/github.com/argoproj/argo-workflows/v3/config/sso.go deleted file mode 100644 index 4c1a18254e2..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/sso.go +++ /dev/null @@ -1,31 +0,0 @@ -package config - -import ( - "time" - - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type SSOConfig struct { - Issuer string `json:"issuer"` - IssuerAlias string `json:"issuerAlias,omitempty"` - ClientID apiv1.SecretKeySelector `json:"clientId"` - ClientSecret apiv1.SecretKeySelector `json:"clientSecret"` - RedirectURL string `json:"redirectUrl"` - RBAC *RBACConfig `json:"rbac,omitempty"` - // additional scopes (on top of "openid") - Scopes []string `json:"scopes,omitempty"` - SessionExpiry metav1.Duration `json:"sessionExpiry,omitempty"` - // customGroupClaimName will override the groups claim name - CustomGroupClaimName string `json:"customGroupClaimName,omitempty"` - UserInfoPath string `json:"userInfoPath,omitempty"` - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` -} - -func (c SSOConfig) GetSessionExpiry() time.Duration { - if c.SessionExpiry.Duration > 0 { - return c.SessionExpiry.Duration - } - return 10 * time.Hour -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/config/ttl.go b/vendor/github.com/argoproj/argo-workflows/v3/config/ttl.go deleted file mode 100644 index d26f00fcb80..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/config/ttl.go +++ /dev/null @@ -1,59 +0,0 @@ -package config - -import ( - "encoding/json" - "errors" - "strconv" - "strings" - "time" -) - -// time.Duration forces you to specify in millis, and does not support days -// see https://stackoverflow.com/questions/48050945/how-to-unmarshal-json-into-durations -type TTL time.Duration - -func (l TTL) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Duration(l).String()) -} - -func (l *TTL) UnmarshalJSON(b []byte) error { - var v interface{} - if err := json.Unmarshal(b, &v); err != nil { - return err - } - switch value := v.(type) { - case string: - if value == "" { - *l = 0 - return nil - } - if strings.HasSuffix(value, "d") { - days, err := strconv.Atoi(strings.TrimSuffix(value, "d")) - *l = TTL(time.Duration(days) * 24 * time.Hour) - return err - } - if strings.HasSuffix(value, "h") { - hours, err := strconv.Atoi(strings.TrimSuffix(value, "h")) - *l = TTL(time.Duration(hours) * time.Hour) - return err - } - if strings.HasSuffix(value, "m") { - minutes, err := strconv.Atoi(strings.TrimSuffix(value, "m")) - *l = TTL(time.Duration(minutes) * time.Minute) - return err - } - if strings.HasSuffix(value, "s") { - seconds, err := strconv.Atoi(strings.TrimSuffix(value, "s")) - *l = TTL(time.Duration(seconds) * time.Second) - return err - } - d, err := time.ParseDuration(value) - if err != nil { - return err - } - *l = TTL(d) - return nil - default: - return errors.New("invalid TTL") - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/mkdocs.yml b/vendor/github.com/argoproj/argo-workflows/v3/mkdocs.yml deleted file mode 100644 index d636b814415..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/mkdocs.yml +++ /dev/null @@ -1,246 +0,0 @@ -site_name: Argo Workflows - The workflow engine for Kubernetes -repo_url: https://github.com/argoproj/argo-workflows -strict: true -theme: - name: material - custom_dir: docs/overrides - font: - text: Roboto - code: Roboto Mono - logo: assets/logo.png - palette: - - scheme: default - primary: indigo - toggle: - icon: material/toggle-switch-off-outline - name: Switch to dark mode - - scheme: slate - toggle: - icon: material/toggle-switch - name: Switch to light mode - features: - - navigation.tabs - - navigation.tabs.sticky - - navigation.top -extra: - analytics: - provider: google - property: G-5Z1VTPDL73 -markdown_extensions: - - codehilite - - admonition - - pymdownx.superfences - - pymdownx.details - - toc: - permalink: true -nav: - - Home: README.md - - Getting Started: - - quick-start.md - - training.md - - Walk Through: - - walk-through/index.md - - walk-through/argo-cli.md - - walk-through/hello-world.md - - walk-through/parameters.md - - walk-through/steps.md - - walk-through/dag.md - - walk-through/artifacts.md - - walk-through/the-structure-of-workflow-specs.md - - walk-through/secrets.md - - walk-through/scripts-and-results.md - - walk-through/output-parameters.md - - walk-through/loops.md - - walk-through/conditionals.md - - walk-through/retrying-failed-or-errored-steps.md - - walk-through/recursion.md - - walk-through/exit-handlers.md - - walk-through/timeouts.md - - walk-through/volumes.md - - walk-through/suspending.md - - walk-through/daemon-containers.md - - walk-through/sidecars.md - - walk-through/hardwired-artifacts.md - - walk-through/kubernetes-resources.md - - walk-through/docker-in-docker-using-sidecars.md - - walk-through/custom-template-variable-reference.md - - walk-through/continuous-integration-examples.md - - User Guide: - - workflow-concepts.md - - Custom Resource Kinds: - - workflow-templates.md - - cluster-workflow-templates.md - - cron-workflows.md - - Template Types: - - http-template.md - - container-set-template.md - - data-sourcing-and-transformation.md - - inline-templates.md - - Artifacts: - - workflow-inputs.md - - key-only-artifacts.md - - artifact-repository-ref.md - - conditional-artifacts-parameters.md - - Access Control: - - service-accounts.md - - workflow-rbac.md - - Features: - # this is a bit of a dumping ground, I've tried to order with key features first - - variables.md - - retries.md - - lifecyclehook.md - - synchronization.md - - memoization.md - - template-defaults.md - - enhanced-depends-logic.md - - node-field-selector.md - - Status: - - resource-duration.md - - estimated-duration.md - - progress.md - - workflow-creator.md - - Patterns: - - empty-dir.md - - cron-backfill.md - - workflow-of-workflows.md - - workflow-notifications.md - - work-avoidance.md - - UI Features: - - artifact-visualization.md - - widgets.md - - intermediate-inputs.md - - Debugging Tools: - - workflow-events.md - - debug-pause.md - - API: - - rest-api.md - - access-token.md - - rest-examples.md - - events.md - - webhooks.md - - workflow-submitting-workflow.md - - async-pattern.md - - client-libraries.md - - swagger.md - - Plugins: - - plugins.md - - executor_plugins.md - - executor_swagger.md - - plugin-directory.md - - Best Practices: - - workflow-pod-security-context.md - - tolerating-pod-deletion.md - - running-at-massive-scale.md - - Use Cases: - - use-cases/ci-cd.md - - use-cases/data-processing.md - - use-cases/infrastructure-automation.md - - use-cases/machine-learning.md - - use-cases/other.md - - use-cases/stream-processing.md - - use-cases/webhdfs.md - - FAQ: faq.md - - kubectl.md - - ide-setup.md - - Field Reference: fields.md - - CLI Reference: - - argo: cli/argo.md - - argo archive: cli/argo_archive.md - - argo archive delete: cli/argo_archive_delete.md - - argo archive get: cli/argo_archive_get.md - - argo archive list: cli/argo_archive_list.md - - argo archive list-label-keys: cli/argo_archive_list-label-keys.md - - argo archive list-label-values: cli/argo_archive_list-label-values.md - - argo archive resubmit: cli/argo_archive_resubmit.md - - argo archive retry: cli/argo_archive_retry.md - - argo auth: cli/argo_auth.md - - argo auth token: cli/argo_auth_token.md - - argo cluster-template: cli/argo_cluster-template.md - - argo cluster-template create: cli/argo_cluster-template_create.md - - argo cluster-template delete: cli/argo_cluster-template_delete.md - - argo cluster-template get: cli/argo_cluster-template_get.md - - argo cluster-template lint: cli/argo_cluster-template_lint.md - - argo cluster-template list: cli/argo_cluster-template_list.md - - argo completion: cli/argo_completion.md - - argo cp: cli/argo_cp.md - - argo cron: cli/argo_cron.md - - argo cron create: cli/argo_cron_create.md - - argo cron delete: cli/argo_cron_delete.md - - argo cron get: cli/argo_cron_get.md - - argo cron lint: cli/argo_cron_lint.md - - argo cron list: cli/argo_cron_list.md - - argo cron resume: cli/argo_cron_resume.md - - argo cron suspend: cli/argo_cron_suspend.md - - argo delete: cli/argo_delete.md - - argo executor-plugin: cli/argo_executor-plugin.md - - argo executor-plugin build: cli/argo_executor-plugin_build.md - - argo get: cli/argo_get.md - - argo lint: cli/argo_lint.md - - argo list: cli/argo_list.md - - argo logs: cli/argo_logs.md - - argo node: cli/argo_node.md - - argo resubmit: cli/argo_resubmit.md - - argo resume: cli/argo_resume.md - - argo retry: cli/argo_retry.md - - argo server: cli/argo_server.md - - argo stop: cli/argo_stop.md - - argo submit: cli/argo_submit.md - - argo suspend: cli/argo_suspend.md - - argo template: cli/argo_template.md - - argo template create: cli/argo_template_create.md - - argo template delete: cli/argo_template_delete.md - - argo template get: cli/argo_template_get.md - - argo template lint: cli/argo_template_lint.md - - argo template list: cli/argo_template_list.md - - argo terminate: cli/argo_terminate.md - - argo version: cli/argo_version.md - - argo wait: cli/argo_wait.md - - argo watch: cli/argo_watch.md - - Operator Manual: - - installation.md - - releases.md - - upgrading.md - - security.md - - Configuration: - - managed-namespace.md - - workflow-controller-configmap.md - - configure-artifact-repository.md - - configure-archive-logs.md - - links.md - - environment-variables.md - - default-workflow-specs.md - - offloading-large-workflows.md - - workflow-archive.md - - metrics.md - - workflow-executors.md - - workflow-restrictions.md - - sidecar-injection.md - - manually-create-secrets.md - - Argo Server: - - argo-server.md - - argo-server-auth-mode.md - - tls.md - - argo-server-sso.md - - argo-server-sso-argocd.md - - Best Practices: - - high-availability.md - - disaster-recovery.md - - scaling.md - - cost-optimisation.md - - windows.md - - Developer Guide: - - CONTRIBUTING.md - - architecture.md - - running-locally.md - - doc-changes.md - - mentoring.md - - public-api.md - - static-code-analysis.md - - stress-testing.md - - releasing.md - - survey-data-privacy.md - - Roadmap: roadmap.md - - Blog: https://blog.argoproj.io/ - - Slack: https://argoproj.github.io/community/join-slack - - Twitter: https://twitter.com/argoproj - - LinkedIn: https://www.linkedin.com/company/argoproj/ diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/ansi_sql_change.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/ansi_sql_change.go deleted file mode 100644 index 50682fd1ad3..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/ansi_sql_change.go +++ /dev/null @@ -1,11 +0,0 @@ -package sqldb - -import "upper.io/db.v3/lib/sqlbuilder" - -// represent a straight forward change that is compatible with all database providers -type ansiSQLChange string - -func (s ansiSQLChange) apply(session sqlbuilder.Database) error { - _, err := session.Exec(string(s)) - return err -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/archived_workflow_labels.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/archived_workflow_labels.go deleted file mode 100644 index e20b4c28876..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/archived_workflow_labels.go +++ /dev/null @@ -1,99 +0,0 @@ -package sqldb - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "upper.io/db.v3" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -// ListWorkflowsLabelKeys returns distinct name from argo_archived_workflows_labels table -// SELECT DISTINCT name FROM argo_archived_workflows_labels -func (r *workflowArchive) ListWorkflowsLabelKeys() (*wfv1.LabelKeys, error) { - var archivedWfLabels []archivedWorkflowLabelRecord - - err := r.session. - Select(db.Raw("DISTINCT name")). - From(archiveLabelsTableName). - All(&archivedWfLabels) - if err != nil { - return nil, err - } - labelKeys := make([]string, len(archivedWfLabels)) - for i, md := range archivedWfLabels { - labelKeys[i] = md.Key - } - - return &wfv1.LabelKeys{Items: labelKeys}, nil -} - -// ListWorkflowsLabelValues returns distinct value from argo_archived_workflows_labels table -// SELECT DISTINCT value FROM argo_archived_workflows_labels WHERE name=labelkey -func (r *workflowArchive) ListWorkflowsLabelValues(key string) (*wfv1.LabelValues, error) { - var archivedWfLabels []archivedWorkflowLabelRecord - err := r.session. - Select(db.Raw("DISTINCT value")). - From(archiveLabelsTableName). - Where(db.Cond{"name": key}). - All(&archivedWfLabels) - if err != nil { - return nil, err - } - labels := make([]string, len(archivedWfLabels)) - for i, md := range archivedWfLabels { - labels[i] = md.Value - } - - return &wfv1.LabelValues{Items: labels}, nil -} - -func labelsClause(t dbType, requirements labels.Requirements) (db.Compound, error) { - var conds []db.Compound - for _, r := range requirements { - cond, err := requirementToCondition(t, r) - if err != nil { - return nil, err - } - conds = append(conds, cond) - } - return db.And(conds...), nil -} - -func requirementToCondition(t dbType, r labels.Requirement) (db.Compound, error) { - // Should we "sanitize our inputs"? No. - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - // Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. - // https://kb.objectrocket.com/postgresql/casting-in-postgresql-570#string+to+integer+casting - switch r.Operator() { - case selection.DoesNotExist: - return db.Raw(fmt.Sprintf("not exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key())), nil - case selection.Equals, selection.DoubleEquals: - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), r.Values().List()[0])), nil - case selection.In: - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value in ('%s'))", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), strings.Join(r.Values().List(), "', '"))), nil - case selection.NotEquals: - return db.Raw(fmt.Sprintf("not exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), r.Values().List()[0])), nil - case selection.NotIn: - return db.Raw(fmt.Sprintf("not exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and value in ('%s'))", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), strings.Join(r.Values().List(), "', '"))), nil - case selection.Exists: - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s')", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key())), nil - case selection.GreaterThan: - i, err := strconv.Atoi(r.Values().List()[0]) - if err != nil { - return nil, err - } - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and cast(value as %s) > %d)", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), t.intType(), i)), nil - case selection.LessThan: - i, err := strconv.Atoi(r.Values().List()[0]) - if err != nil { - return nil, err - } - return db.Raw(fmt.Sprintf("exists (select 1 from %s where clustername = %s.clustername and uid = %s.uid and name = '%s' and cast(value as %s) < %d)", archiveLabelsTableName, archiveTableName, archiveTableName, r.Key(), t.intType(), i)), nil - } - return nil, fmt.Errorf("operation %v is not supported", r.Operator()) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/backfill_nodes.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/backfill_nodes.go deleted file mode 100644 index c63d67272b3..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/backfill_nodes.go +++ /dev/null @@ -1,77 +0,0 @@ -package sqldb - -import ( - "encoding/json" - "fmt" - - log "github.com/sirupsen/logrus" - "upper.io/db.v3" - "upper.io/db.v3/lib/sqlbuilder" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -type backfillNodes struct { - tableName string -} - -func (s backfillNodes) String() string { - return fmt.Sprintf("backfillNodes{%s}", s.tableName) -} - -func (s backfillNodes) apply(session sqlbuilder.Database) (err error) { - log.Info("Backfill node status") - rs, err := session.SelectFrom(s.tableName). - Columns("workflow"). - Where(db.Cond{"version": nil}). - Query() - if err != nil { - return err - } - - defer func() { - tmpErr := rs.Close() - if err == nil { - err = tmpErr - } - }() - - for rs.Next() { - if err := rs.Err(); err != nil { - return err - } - workflow := "" - err := rs.Scan(&workflow) - if err != nil { - return err - } - var wf *wfv1.Workflow - err = json.Unmarshal([]byte(workflow), &wf) - if err != nil { - return err - } - marshalled, version, err := nodeStatusVersion(wf.Status.Nodes) - if err != nil { - return err - } - logCtx := log.WithFields(log.Fields{"name": wf.Name, "namespace": wf.Namespace, "version": version}) - logCtx.Info("Back-filling node status") - res, err := session.Update(archiveTableName). - Set("version", wf.ResourceVersion). - Set("nodes", marshalled). - Where(db.Cond{"name": wf.Name}). - And(db.Cond{"namespace": wf.Namespace}). - Exec() - if err != nil { - return err - } - rowsAffected, err := res.RowsAffected() - if err != nil { - return err - } - if rowsAffected != 1 { - logCtx.WithField("rowsAffected", rowsAffected).Warn("Expected exactly one row affected") - } - } - return nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/db_type.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/db_type.go deleted file mode 100644 index 364e388ed96..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/db_type.go +++ /dev/null @@ -1,30 +0,0 @@ -package sqldb - -import ( - "database/sql" - - "github.com/go-sql-driver/mysql" - "upper.io/db.v3" -) - -type dbType string - -const ( - MySQL dbType = "mysql" - Postgres dbType = "postgres" -) - -func dbTypeFor(session db.Database) dbType { - switch session.Driver().(*sql.DB).Driver().(type) { - case *mysql.MySQLDriver: - return MySQL - } - return Postgres -} - -func (t dbType) intType() string { - if t == MySQL { - return "signed" - } - return "int" -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/explosive_offload_node_status_repo.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/explosive_offload_node_status_repo.go deleted file mode 100644 index d9e1816af2c..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/explosive_offload_node_status_repo.go +++ /dev/null @@ -1,38 +0,0 @@ -package sqldb - -import ( - "fmt" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -var ( - ExplosiveOffloadNodeStatusRepo OffloadNodeStatusRepo = &explosiveOffloadNodeStatusRepo{} - OffloadNotSupportedError = fmt.Errorf("offload node status is not supported") -) - -type explosiveOffloadNodeStatusRepo struct{} - -func (n *explosiveOffloadNodeStatusRepo) IsEnabled() bool { - return false -} - -func (n *explosiveOffloadNodeStatusRepo) Save(string, string, wfv1.Nodes) (string, error) { - return "", OffloadNotSupportedError -} - -func (n *explosiveOffloadNodeStatusRepo) Get(string, string) (wfv1.Nodes, error) { - return nil, OffloadNotSupportedError -} - -func (n *explosiveOffloadNodeStatusRepo) List(string) (map[UUIDVersion]wfv1.Nodes, error) { - return nil, OffloadNotSupportedError -} - -func (n *explosiveOffloadNodeStatusRepo) Delete(string, string) error { - return OffloadNotSupportedError -} - -func (n *explosiveOffloadNodeStatusRepo) ListOldOffloads(string) (map[string][]string, error) { - return nil, OffloadNotSupportedError -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/migrate.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/migrate.go deleted file mode 100644 index 6d91cf650e9..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/migrate.go +++ /dev/null @@ -1,293 +0,0 @@ -package sqldb - -import ( - "context" - - log "github.com/sirupsen/logrus" - "upper.io/db.v3/lib/sqlbuilder" -) - -type Migrate interface { - Exec(ctx context.Context) error -} - -func NewMigrate(session sqlbuilder.Database, clusterName string, tableName string) Migrate { - return migrate{session, clusterName, tableName} -} - -type migrate struct { - session sqlbuilder.Database - clusterName string - tableName string -} - -type change interface { - apply(session sqlbuilder.Database) error -} - -func ternary(condition bool, left, right change) change { - if condition { - return left - } else { - return right - } -} - -func (m migrate) Exec(ctx context.Context) (err error) { - { - // poor mans SQL migration - _, err = m.session.Exec("create table if not exists schema_history(schema_version int not null)") - if err != nil { - return err - } - rs, err := m.session.Query("select schema_version from schema_history") - if err != nil { - return err - } - defer func() { - tmpErr := rs.Close() - if err == nil { - err = tmpErr - } - }() - if !rs.Next() { - _, err := m.session.Exec("insert into schema_history values(-1)") - if err != nil { - return err - } - } else if err := rs.Err(); err != nil { - return err - } - } - dbType := dbTypeFor(m.session) - - log.WithFields(log.Fields{"clusterName": m.clusterName, "dbType": dbType}).Info("Migrating database schema") - - // try and make changes idempotent, as it is possible for the change to apply, but the archive update to fail - // and therefore try and apply again next try - - for changeSchemaVersion, change := range []change{ - ansiSQLChange(`create table if not exists ` + m.tableName + ` ( - id varchar(128) , - name varchar(256), - phase varchar(25), - namespace varchar(256), - workflow text, - startedat timestamp default CURRENT_TIMESTAMP, - finishedat timestamp default CURRENT_TIMESTAMP, - primary key (id, namespace) -)`), - ansiSQLChange(`create unique index idx_name on ` + m.tableName + ` (name)`), - ansiSQLChange(`create table if not exists argo_workflow_history ( - id varchar(128) , - name varchar(256), - phase varchar(25), - namespace varchar(256), - workflow text, - startedat timestamp default CURRENT_TIMESTAMP, - finishedat timestamp default CURRENT_TIMESTAMP, - primary key (id, namespace) -)`), - ansiSQLChange(`alter table argo_workflow_history rename to argo_archived_workflows`), - ternary(dbType == MySQL, - ansiSQLChange(`drop index idx_name on `+m.tableName), - ansiSQLChange(`drop index idx_name`), - ), - ansiSQLChange(`create unique index idx_name on ` + m.tableName + `(name, namespace)`), - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` drop primary key`), - ansiSQLChange(`alter table `+m.tableName+` drop constraint `+m.tableName+`_pkey`), - ), - ansiSQLChange(`alter table ` + m.tableName + ` add primary key(name,namespace)`), - // huh - why does the pkey not have the same name as the table - history - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows drop primary key`), - ansiSQLChange(`alter table argo_archived_workflows drop constraint argo_workflow_history_pkey`), - ), - ansiSQLChange(`alter table argo_archived_workflows add primary key(id)`), - // *** - // THE CHANGES ABOVE THIS LINE MAY BE IN PER-PRODUCTION SYSTEMS - DO NOT CHANGE THEM - // *** - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows change column id uid varchar(128)`), - ansiSQLChange(`alter table argo_archived_workflows rename column id to uid`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column uid varchar(128) not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column uid set not null`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column phase varchar(25) not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column phase set not null`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column namespace varchar(256) not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column namespace set not null`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column workflow text not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column workflow set not null`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column startedat timestamp not null default CURRENT_TIMESTAMP`), - ansiSQLChange(`alter table argo_archived_workflows alter column startedat set not null`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column finishedat timestamp not null default CURRENT_TIMESTAMP`), - ansiSQLChange(`alter table argo_archived_workflows alter column finishedat set not null`), - ), - ansiSQLChange(`alter table argo_archived_workflows add clustername varchar(64)`), // DNS entry can only be max 63 bytes - ansiSQLChange(`update argo_archived_workflows set clustername = '` + m.clusterName + `' where clustername is null`), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column clustername varchar(64) not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column clustername set not null`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows drop primary key`), - ansiSQLChange(`alter table argo_archived_workflows drop constraint argo_archived_workflows_pkey`), - ), - ansiSQLChange(`alter table argo_archived_workflows add primary key(clustername,uid)`), - ansiSQLChange(`create index argo_archived_workflows_i1 on argo_archived_workflows (clustername,namespace)`), - // argo_archived_workflows now looks like: - // clustername(not null) | uid(not null) | | name (null) | phase(not null) | namespace(not null) | workflow(not null) | startedat(not null) | finishedat(not null) - // remove unused columns - ansiSQLChange(`alter table ` + m.tableName + ` drop column phase`), - ansiSQLChange(`alter table ` + m.tableName + ` drop column startedat`), - ansiSQLChange(`alter table ` + m.tableName + ` drop column finishedat`), - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` change column id uid varchar(128)`), - ansiSQLChange(`alter table `+m.tableName+` rename column id to uid`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` modify column uid varchar(128) not null`), - ansiSQLChange(`alter table `+m.tableName+` alter column uid set not null`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` modify column namespace varchar(256) not null`), - ansiSQLChange(`alter table `+m.tableName+` alter column namespace set not null`), - ), - ansiSQLChange(`alter table ` + m.tableName + ` add column clustername varchar(64)`), // DNS cannot be longer than 64 bytes - ansiSQLChange(`update ` + m.tableName + ` set clustername = '` + m.clusterName + `' where clustername is null`), - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` modify column clustername varchar(64) not null`), - ansiSQLChange(`alter table `+m.tableName+` alter column clustername set not null`), - ), - ansiSQLChange(`alter table ` + m.tableName + ` add column version varchar(64)`), - ansiSQLChange(`alter table ` + m.tableName + ` add column nodes text`), - backfillNodes{tableName: m.tableName}, - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` modify column nodes text not null`), - ansiSQLChange(`alter table `+m.tableName+` alter column nodes set not null`), - ), - ansiSQLChange(`alter table ` + m.tableName + ` drop column workflow`), - // add a timestamp column to indicate updated time - ansiSQLChange(`alter table ` + m.tableName + ` add column updatedat timestamp not null default current_timestamp`), - // remove the old primary key and add a new one - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` drop primary key`), - ansiSQLChange(`alter table `+m.tableName+` drop constraint `+m.tableName+`_pkey`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`drop index idx_name on `+m.tableName), - ansiSQLChange(`drop index idx_name`), - ), - ansiSQLChange(`alter table ` + m.tableName + ` drop column name`), - ansiSQLChange(`alter table ` + m.tableName + ` add primary key(clustername,uid,version)`), - ansiSQLChange(`create index ` + m.tableName + `_i1 on ` + m.tableName + ` (clustername,namespace)`), - // argo_workflows now looks like: - // clustername(not null) | uid(not null) | namespace(not null) | version(not null) | nodes(not null) | updatedat(not null) - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column workflow json not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column workflow type json using workflow::json`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column name varchar(256) not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column name set not null`), - ), - // clustername(not null) | uid(not null) | | name (not null) | phase(not null) | namespace(not null) | workflow(not null) | startedat(not null) | finishedat(not null) - ansiSQLChange(`create index ` + m.tableName + `_i2 on ` + m.tableName + ` (clustername,namespace,updatedat)`), - // The argo_archived_workflows_labels is really provided as a way to create queries on labels that are fast because they - // use indexes. When displaying, it might be better to look at the `workflow` column. - // We could have added a `labels` column to argo_archived_workflows, but then we would have had to do free-text - // queries on it which would be slow due to having to table scan. - // The key has an optional prefix(253 chars) + '/' + name(63 chars) - // Why is the key called "name" not "key"? Key is an SQL reserved word. - ansiSQLChange(`create table if not exists argo_archived_workflows_labels ( - clustername varchar(64) not null, - uid varchar(128) not null, - name varchar(317) not null, - value varchar(63) not null, - primary key (clustername, uid, name), - foreign key (clustername, uid) references argo_archived_workflows(clustername, uid) on delete cascade -)`), - // MySQL can only store 64k in a TEXT field, both MySQL and Posgres can store 1GB in JSON. - ternary(dbType == MySQL, - ansiSQLChange(`alter table `+m.tableName+` modify column nodes json not null`), - ansiSQLChange(`alter table `+m.tableName+` alter column nodes type json using nodes::json`), - ), - // add instanceid column to table argo_archived_workflows - ansiSQLChange(`alter table argo_archived_workflows add column instanceid varchar(64)`), - ansiSQLChange(`update argo_archived_workflows set instanceid = '' where instanceid is null`), - ternary(dbType == MySQL, - ansiSQLChange(`alter table argo_archived_workflows modify column instanceid varchar(64) not null`), - ansiSQLChange(`alter table argo_archived_workflows alter column instanceid set not null`), - ), - // drop argo_archived_workflows index - ternary(dbType == MySQL, - ansiSQLChange(`drop index argo_archived_workflows_i1 on argo_archived_workflows`), - ansiSQLChange(`drop index argo_archived_workflows_i1`), - ), - // add argo_archived_workflows index - ansiSQLChange(`create index argo_archived_workflows_i1 on argo_archived_workflows (clustername,instanceid,namespace)`), - // drop m.tableName indexes - // xxx_i1 is not needed because xxx_i2 already covers it, drop both and recreat an index named xxx_i1 - ternary(dbType == MySQL, - ansiSQLChange(`drop index `+m.tableName+`_i1 on `+m.tableName), - ansiSQLChange(`drop index `+m.tableName+`_i1`), - ), - ternary(dbType == MySQL, - ansiSQLChange(`drop index `+m.tableName+`_i2 on `+m.tableName), - ansiSQLChange(`drop index `+m.tableName+`_i2`), - ), - // add m.tableName index - ansiSQLChange(`create index ` + m.tableName + `_i1 on ` + m.tableName + ` (clustername,namespace,updatedat)`), - // index to find records that need deleting, this omits namespaces as this might be null - ansiSQLChange(`create index argo_archived_workflows_i2 on argo_archived_workflows (clustername,instanceid,finishedat)`), - // add argo_archived_workflows name index for prefix searching performance - ansiSQLChange(`create index argo_archived_workflows_i3 on argo_archived_workflows (clustername,instanceid,name)`), - // add indexes for list archived workflow performance. #8836 - ansiSQLChange(`create index argo_archived_workflows_i4 on argo_archived_workflows (startedat)`), - ansiSQLChange(`create index argo_archived_workflows_labels_i1 on argo_archived_workflows_labels (name,value)`), - } { - err := m.applyChange(ctx, changeSchemaVersion, change) - if err != nil { - return err - } - } - - return nil -} - -func (m migrate) applyChange(ctx context.Context, changeSchemaVersion int, c change) error { - tx, err := m.session.NewTx(ctx) - if err != nil { - return err - } - defer func() { _ = tx.Rollback() }() - rs, err := tx.Exec("update schema_history set schema_version = ? where schema_version = ?", changeSchemaVersion, changeSchemaVersion-1) - if err != nil { - return err - } - rowsAffected, err := rs.RowsAffected() - if err != nil { - return err - } - if rowsAffected == 1 { - log.WithFields(log.Fields{"changeSchemaVersion": changeSchemaVersion, "change": c}).Info("applying database change") - err := c.apply(m.session) - if err != nil { - return err - } - } - return tx.Commit() -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/null_workflow_archive.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/null_workflow_archive.go deleted file mode 100644 index 3be1c0426af..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/null_workflow_archive.go +++ /dev/null @@ -1,50 +0,0 @@ -package sqldb - -import ( - "fmt" - "time" - - "k8s.io/apimachinery/pkg/labels" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -var NullWorkflowArchive WorkflowArchive = &nullWorkflowArchive{} - -type nullWorkflowArchive struct{} - -func (r *nullWorkflowArchive) IsEnabled() bool { - return false -} - -func (r *nullWorkflowArchive) ArchiveWorkflow(*wfv1.Workflow) error { - return nil -} - -func (r *nullWorkflowArchive) ListWorkflows(string, string, string, time.Time, time.Time, labels.Requirements, int, int) (wfv1.Workflows, error) { - return wfv1.Workflows{}, nil -} - -func (r *nullWorkflowArchive) CountWorkflows(string, string, string, time.Time, time.Time, labels.Requirements) (int64, error) { - return 0, nil -} - -func (r *nullWorkflowArchive) GetWorkflow(string) (*wfv1.Workflow, error) { - return nil, fmt.Errorf("getting archived workflows not supported") -} - -func (r *nullWorkflowArchive) DeleteWorkflow(string) error { - return fmt.Errorf("deleting archived workflows not supported") -} - -func (r *nullWorkflowArchive) DeleteExpiredWorkflows(time.Duration) error { - return nil -} - -func (r *nullWorkflowArchive) ListWorkflowsLabelKeys() (*wfv1.LabelKeys, error) { - return &wfv1.LabelKeys{}, nil -} - -func (r *nullWorkflowArchive) ListWorkflowsLabelValues(string) (*wfv1.LabelValues, error) { - return &wfv1.LabelValues{}, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/offload_node_status_repo.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/offload_node_status_repo.go deleted file mode 100644 index f40456425f7..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/offload_node_status_repo.go +++ /dev/null @@ -1,229 +0,0 @@ -package sqldb - -import ( - "encoding/json" - "fmt" - "hash/fnv" - "strings" - "time" - - log "github.com/sirupsen/logrus" - "upper.io/db.v3" - "upper.io/db.v3/lib/sqlbuilder" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/util/env" -) - -const OffloadNodeStatusDisabled = "Workflow has offloaded nodes, but offloading has been disabled" - -type UUIDVersion struct { - UID string `db:"uid"` - Version string `db:"version"` -} - -type OffloadNodeStatusRepo interface { - Save(uid, namespace string, nodes wfv1.Nodes) (string, error) - Get(uid, version string) (wfv1.Nodes, error) - List(namespace string) (map[UUIDVersion]wfv1.Nodes, error) - ListOldOffloads(namespace string) (map[string][]string, error) - Delete(uid, version string) error - IsEnabled() bool -} - -func NewOffloadNodeStatusRepo(session sqlbuilder.Database, clusterName, tableName string) (OffloadNodeStatusRepo, error) { - // this environment variable allows you to make Argo Workflows delete offloaded data more or less aggressively, - // useful for testing - ttl := env.LookupEnvDurationOr("OFFLOAD_NODE_STATUS_TTL", 5*time.Minute) - log.WithField("ttl", ttl).Debug("Node status offloading config") - return &nodeOffloadRepo{session: session, clusterName: clusterName, tableName: tableName, ttl: ttl}, nil -} - -type nodesRecord struct { - ClusterName string `db:"clustername"` - UUIDVersion - Namespace string `db:"namespace"` - Nodes string `db:"nodes"` -} - -type nodeOffloadRepo struct { - session sqlbuilder.Database - clusterName string - tableName string - // time to live - at what ttl an offload becomes old - ttl time.Duration -} - -func (wdc *nodeOffloadRepo) IsEnabled() bool { - return true -} - -func nodeStatusVersion(s wfv1.Nodes) (string, string, error) { - marshalled, err := json.Marshal(s) - if err != nil { - return "", "", err - } - - h := fnv.New32() - _, _ = h.Write(marshalled) - return string(marshalled), fmt.Sprintf("fnv:%v", h.Sum32()), nil -} - -func (wdc *nodeOffloadRepo) Save(uid, namespace string, nodes wfv1.Nodes) (string, error) { - marshalled, version, err := nodeStatusVersion(nodes) - if err != nil { - return "", err - } - - record := &nodesRecord{ - ClusterName: wdc.clusterName, - UUIDVersion: UUIDVersion{ - UID: uid, - Version: version, - }, - Namespace: namespace, - Nodes: marshalled, - } - - logCtx := log.WithFields(log.Fields{"uid": uid, "version": version}) - logCtx.Debug("Offloading nodes") - _, err = wdc.session.Collection(wdc.tableName).Insert(record) - if err != nil { - // if we have a duplicate, then it must have the same clustername+uid+version, which MUST mean that we - // have already written this record - if !isDuplicateKeyError(err) { - return "", err - } - logCtx.WithField("err", err).Info("Ignoring duplicate key error") - } - - logCtx.Debug("Nodes offloaded, cleaning up old offloads") - - // This might fail, which kind of fine (maybe a bug). - // It might not delete all records, which is also fine, as we always key on resource version. - // We also want to keep enough around so that we can service watches. - rs, err := wdc.session. - DeleteFrom(wdc.tableName). - Where(db.Cond{"clustername": wdc.clusterName}). - And(db.Cond{"uid": uid}). - And(db.Cond{"version <>": version}). - And(wdc.oldOffload()). - Exec() - if err != nil { - return "", err - } - rowsAffected, err := rs.RowsAffected() - if err != nil { - return "", err - } - logCtx.WithField("rowsAffected", rowsAffected).Debug("Deleted offloaded nodes") - return version, nil -} - -func isDuplicateKeyError(err error) bool { - // postgres - if strings.Contains(err.Error(), "duplicate key") { - return true - } - // mysql - if strings.Contains(err.Error(), "Duplicate entry") { - return true - } - return false -} - -func (wdc *nodeOffloadRepo) Get(uid, version string) (wfv1.Nodes, error) { - log.WithFields(log.Fields{"uid": uid, "version": version}).Debug("Getting offloaded nodes") - r := &nodesRecord{} - err := wdc.session. - SelectFrom(wdc.tableName). - Where(db.Cond{"clustername": wdc.clusterName}). - And(db.Cond{"uid": uid}). - And(db.Cond{"version": version}). - One(r) - if err != nil { - return nil, err - } - nodes := &wfv1.Nodes{} - err = json.Unmarshal([]byte(r.Nodes), nodes) - if err != nil { - return nil, err - } - return *nodes, nil -} - -func (wdc *nodeOffloadRepo) List(namespace string) (map[UUIDVersion]wfv1.Nodes, error) { - log.WithFields(log.Fields{"namespace": namespace}).Debug("Listing offloaded nodes") - var records []nodesRecord - err := wdc.session. - Select("uid", "version", "nodes"). - From(wdc.tableName). - Where(db.Cond{"clustername": wdc.clusterName}). - And(namespaceEqual(namespace)). - All(&records) - if err != nil { - return nil, err - } - - res := make(map[UUIDVersion]wfv1.Nodes) - for _, r := range records { - nodes := &wfv1.Nodes{} - err = json.Unmarshal([]byte(r.Nodes), nodes) - if err != nil { - return nil, err - } - res[UUIDVersion{UID: r.UID, Version: r.Version}] = *nodes - } - - return res, nil -} - -func (wdc *nodeOffloadRepo) ListOldOffloads(namespace string) (map[string][]string, error) { - log.WithFields(log.Fields{"namespace": namespace}).Debug("Listing old offloaded nodes") - var records []UUIDVersion - err := wdc.session. - Select("uid", "version"). - From(wdc.tableName). - Where(db.Cond{"clustername": wdc.clusterName}). - And(namespaceEqual(namespace)). - And(wdc.oldOffload()). - All(&records) - if err != nil { - return nil, err - } - x := make(map[string][]string) - for _, r := range records { - x[r.UID] = append(x[r.UID], r.Version) - } - return x, nil -} - -func (wdc *nodeOffloadRepo) Delete(uid, version string) error { - if uid == "" { - return fmt.Errorf("invalid uid") - } - if version == "" { - return fmt.Errorf("invalid version") - } - logCtx := log.WithFields(log.Fields{"uid": uid, "version": version}) - logCtx.Debug("Deleting offloaded nodes") - rs, err := wdc.session. - DeleteFrom(wdc.tableName). - Where(db.Cond{"clustername": wdc.clusterName}). - And(db.Cond{"uid": uid}). - And(db.Cond{"version": version}). - Exec() - if err != nil { - return err - } - rowsAffected, err := rs.RowsAffected() - if err != nil { - return err - } - logCtx.WithField("rowsAffected", rowsAffected).Debug("Deleted offloaded nodes") - return nil -} - -func (wdc *nodeOffloadRepo) oldOffload() string { - return fmt.Sprintf("updatedat < current_timestamp - interval '%d' second", int(wdc.ttl.Seconds())) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/sqldb.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/sqldb.go deleted file mode 100644 index b51f4e0703e..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/sqldb.go +++ /dev/null @@ -1,119 +0,0 @@ -package sqldb - -import ( - "context" - "fmt" - "time" - - "k8s.io/client-go/kubernetes" - "upper.io/db.v3/lib/sqlbuilder" - "upper.io/db.v3/mysql" - "upper.io/db.v3/postgresql" - - "github.com/argoproj/argo-workflows/v3/config" - "github.com/argoproj/argo-workflows/v3/errors" - "github.com/argoproj/argo-workflows/v3/util" -) - -// CreateDBSession creates the dB session -func CreateDBSession(kubectlConfig kubernetes.Interface, namespace string, persistConfig *config.PersistConfig) (sqlbuilder.Database, string, error) { - if persistConfig == nil { - return nil, "", errors.InternalError("Persistence config is not found") - } - - if persistConfig.PostgreSQL != nil { - return CreatePostGresDBSession(kubectlConfig, namespace, persistConfig.PostgreSQL, persistConfig.ConnectionPool) - } else if persistConfig.MySQL != nil { - return CreateMySQLDBSession(kubectlConfig, namespace, persistConfig.MySQL, persistConfig.ConnectionPool) - } - return nil, "", fmt.Errorf("no databases are configured") -} - -// CreatePostGresDBSession creates postgresDB session -func CreatePostGresDBSession(kubectlConfig kubernetes.Interface, namespace string, cfg *config.PostgreSQLConfig, persistPool *config.ConnectionPool) (sqlbuilder.Database, string, error) { - if cfg.TableName == "" { - return nil, "", errors.InternalError("tableName is empty") - } - - ctx := context.Background() - userNameByte, err := util.GetSecrets(ctx, kubectlConfig, namespace, cfg.UsernameSecret.Name, cfg.UsernameSecret.Key) - if err != nil { - return nil, "", err - } - passwordByte, err := util.GetSecrets(ctx, kubectlConfig, namespace, cfg.PasswordSecret.Name, cfg.PasswordSecret.Key) - if err != nil { - return nil, "", err - } - - settings := postgresql.ConnectionURL{ - User: string(userNameByte), - Password: string(passwordByte), - Host: cfg.GetHostname(), - Database: cfg.Database, - } - - if cfg.SSL { - if cfg.SSLMode != "" { - options := map[string]string{ - "sslmode": cfg.SSLMode, - } - settings.Options = options - } - } - - session, err := postgresql.Open(settings) - if err != nil { - return nil, "", err - } - - if persistPool != nil { - session.SetMaxOpenConns(persistPool.MaxOpenConns) - session.SetMaxIdleConns(persistPool.MaxIdleConns) - session.SetConnMaxLifetime(time.Duration(persistPool.ConnMaxLifetime)) - } - return session, cfg.TableName, nil -} - -// CreateMySQLDBSession creates Mysql DB session -func CreateMySQLDBSession(kubectlConfig kubernetes.Interface, namespace string, cfg *config.MySQLConfig, persistPool *config.ConnectionPool) (sqlbuilder.Database, string, error) { - if cfg.TableName == "" { - return nil, "", errors.InternalError("tableName is empty") - } - - ctx := context.Background() - userNameByte, err := util.GetSecrets(ctx, kubectlConfig, namespace, cfg.UsernameSecret.Name, cfg.UsernameSecret.Key) - if err != nil { - return nil, "", err - } - passwordByte, err := util.GetSecrets(ctx, kubectlConfig, namespace, cfg.PasswordSecret.Name, cfg.PasswordSecret.Key) - if err != nil { - return nil, "", err - } - - session, err := mysql.Open(mysql.ConnectionURL{ - User: string(userNameByte), - Password: string(passwordByte), - Host: cfg.GetHostname(), - Database: cfg.Database, - Options: cfg.Options, - }) - if err != nil { - return nil, "", err - } - - if persistPool != nil { - session.SetMaxOpenConns(persistPool.MaxOpenConns) - session.SetMaxIdleConns(persistPool.MaxIdleConns) - session.SetConnMaxLifetime(time.Duration(persistPool.ConnMaxLifetime)) - } - // this is needed to make MySQL run in a Golang-compatible UTF-8 character set. - _, err = session.Exec("SET NAMES 'utf8mb4'") - if err != nil { - return nil, "", err - } - _, err = session.Exec("SET CHARACTER SET utf8mb4") - if err != nil { - return nil, "", err - } - return session, cfg.TableName, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/workflow_archive.go b/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/workflow_archive.go deleted file mode 100644 index ac7ad1d7436..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/persist/sqldb/workflow_archive.go +++ /dev/null @@ -1,314 +0,0 @@ -package sqldb - -import ( - "context" - "encoding/json" - "fmt" - "time" - - log "github.com/sirupsen/logrus" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "upper.io/db.v3" - "upper.io/db.v3/lib/sqlbuilder" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/util/instanceid" -) - -const ( - archiveTableName = "argo_archived_workflows" - archiveLabelsTableName = archiveTableName + "_labels" -) - -type archivedWorkflowMetadata struct { - ClusterName string `db:"clustername"` - InstanceID string `db:"instanceid"` - UID string `db:"uid"` - Name string `db:"name"` - Namespace string `db:"namespace"` - Phase wfv1.WorkflowPhase `db:"phase"` - StartedAt time.Time `db:"startedat"` - FinishedAt time.Time `db:"finishedat"` -} - -type archivedWorkflowRecord struct { - archivedWorkflowMetadata - Workflow string `db:"workflow"` -} - -type archivedWorkflowLabelRecord struct { - ClusterName string `db:"clustername"` - UID string `db:"uid"` - // Why is this called "name" not "key"? Key is an SQL reserved word. - Key string `db:"name"` - Value string `db:"value"` -} - -type archivedWorkflowCount struct { - Total uint64 `db:"total,omitempty" json:"total"` -} - -//go:generate mockery --name=WorkflowArchive - -type WorkflowArchive interface { - ArchiveWorkflow(wf *wfv1.Workflow) error - // list workflows, with the most recently started workflows at the beginning (i.e. index 0 is the most recent) - ListWorkflows(namespace string, name string, namePrefix string, minStartAt, maxStartAt time.Time, labelRequirements labels.Requirements, limit, offset int) (wfv1.Workflows, error) - CountWorkflows(namespace string, name string, namePrefix string, minStartAt, maxStartAt time.Time, labelRequirements labels.Requirements) (int64, error) - GetWorkflow(uid string) (*wfv1.Workflow, error) - DeleteWorkflow(uid string) error - DeleteExpiredWorkflows(ttl time.Duration) error - IsEnabled() bool - ListWorkflowsLabelKeys() (*wfv1.LabelKeys, error) - ListWorkflowsLabelValues(key string) (*wfv1.LabelValues, error) -} - -type workflowArchive struct { - session sqlbuilder.Database - clusterName string - managedNamespace string - instanceIDService instanceid.Service - dbType dbType -} - -func (r *workflowArchive) IsEnabled() bool { - return true -} - -// NewWorkflowArchive returns a new workflowArchive -func NewWorkflowArchive(session sqlbuilder.Database, clusterName, managedNamespace string, instanceIDService instanceid.Service) WorkflowArchive { - return &workflowArchive{session: session, clusterName: clusterName, managedNamespace: managedNamespace, instanceIDService: instanceIDService, dbType: dbTypeFor(session)} -} - -func (r *workflowArchive) ArchiveWorkflow(wf *wfv1.Workflow) error { - logCtx := log.WithFields(log.Fields{"uid": wf.UID, "labels": wf.GetLabels()}) - logCtx.Debug("Archiving workflow") - workflow, err := json.Marshal(wf) - if err != nil { - return err - } - return r.session.Tx(context.Background(), func(sess sqlbuilder.Tx) error { - _, err := sess. - DeleteFrom(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(db.Cond{"uid": wf.UID}). - Exec() - if err != nil { - return err - } - _, err = sess.Collection(archiveTableName). - Insert(&archivedWorkflowRecord{ - archivedWorkflowMetadata: archivedWorkflowMetadata{ - ClusterName: r.clusterName, - InstanceID: r.instanceIDService.InstanceID(), - UID: string(wf.UID), - Name: wf.Name, - Namespace: wf.Namespace, - Phase: wf.Status.Phase, - StartedAt: wf.Status.StartedAt.Time, - FinishedAt: wf.Status.FinishedAt.Time, - }, - Workflow: string(workflow), - }) - if err != nil { - return err - } - - _, err = sess. - DeleteFrom(archiveLabelsTableName). - Where(db.Cond{"clustername": r.clusterName}). - And(db.Cond{"uid": wf.UID}). - Exec() - if err != nil { - return err - } - // insert the labels - for key, value := range wf.GetLabels() { - _, err := sess.Collection(archiveLabelsTableName). - Insert(&archivedWorkflowLabelRecord{ - ClusterName: r.clusterName, - UID: string(wf.UID), - Key: key, - Value: value, - }) - if err != nil { - return err - } - } - return nil - }) -} - -func (r *workflowArchive) ListWorkflows(namespace string, name string, namePrefix string, minStartedAt, maxStartedAt time.Time, labelRequirements labels.Requirements, limit int, offset int) (wfv1.Workflows, error) { - var archivedWfs []archivedWorkflowMetadata - clause, err := labelsClause(r.dbType, labelRequirements) - if err != nil { - return nil, err - } - - // If we were passed 0 as the limit, then we should load all available archived workflows - // to match the behavior of the `List` operations in the Kubernetes API - if limit == 0 { - limit = -1 - offset = -1 - } - - err = r.session. - Select("name", "namespace", "uid", "phase", "startedat", "finishedat"). - From(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(namespaceEqual(namespace)). - And(nameEqual(name)). - And(namePrefixClause(namePrefix)). - And(startedAtClause(minStartedAt, maxStartedAt)). - And(clause). - OrderBy("-startedat"). - Limit(limit). - Offset(offset). - All(&archivedWfs) - if err != nil { - return nil, err - } - wfs := make(wfv1.Workflows, len(archivedWfs)) - for i, md := range archivedWfs { - wfs[i] = wfv1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Name: md.Name, - Namespace: md.Namespace, - UID: types.UID(md.UID), - CreationTimestamp: v1.Time{Time: md.StartedAt}, - }, - Status: wfv1.WorkflowStatus{ - Phase: md.Phase, - StartedAt: v1.Time{Time: md.StartedAt}, - FinishedAt: v1.Time{Time: md.FinishedAt}, - }, - } - } - return wfs, nil -} - -func (r *workflowArchive) CountWorkflows(namespace string, name string, namePrefix string, minStartedAt, maxStartedAt time.Time, labelRequirements labels.Requirements) (int64, error) { - total := &archivedWorkflowCount{} - clause, err := labelsClause(r.dbType, labelRequirements) - if err != nil { - return 0, err - } - - err = r.session. - Select(db.Raw("count(*) as total")). - From(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(namespaceEqual(namespace)). - And(nameEqual(name)). - And(namePrefixClause(namePrefix)). - And(startedAtClause(minStartedAt, maxStartedAt)). - And(clause). - One(total) - if err != nil { - return 0, err - } - - return int64(total.Total), nil -} - -func (r *workflowArchive) clusterManagedNamespaceAndInstanceID() db.Compound { - return db.And( - db.Cond{"clustername": r.clusterName}, - namespaceEqual(r.managedNamespace), - db.Cond{"instanceid": r.instanceIDService.InstanceID()}, - ) -} - -func startedAtClause(from, to time.Time) db.Compound { - var conds []db.Compound - if !from.IsZero() { - conds = append(conds, db.Cond{"startedat > ": from}) - } - if !to.IsZero() { - conds = append(conds, db.Cond{"startedat < ": to}) - } - return db.And(conds...) -} - -func namespaceEqual(namespace string) db.Cond { - if namespace == "" { - return db.Cond{} - } else { - return db.Cond{"namespace": namespace} - } -} - -func nameEqual(name string) db.Cond { - if name == "" { - return db.Cond{} - } else { - return db.Cond{"name": name} - } -} - -func namePrefixClause(namePrefix string) db.Cond { - if namePrefix == "" { - return db.Cond{} - } else { - return db.Cond{"name LIKE ": namePrefix + "%"} - } -} - -func (r *workflowArchive) GetWorkflow(uid string) (*wfv1.Workflow, error) { - archivedWf := &archivedWorkflowRecord{} - err := r.session. - Select("workflow"). - From(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(db.Cond{"uid": uid}). - One(archivedWf) - if err != nil { - if err == db.ErrNoMoreRows { - return nil, nil - } - return nil, err - } - var wf *wfv1.Workflow - err = json.Unmarshal([]byte(archivedWf.Workflow), &wf) - if err != nil { - return nil, err - } - return wf, nil -} - -func (r *workflowArchive) DeleteWorkflow(uid string) error { - rs, err := r.session. - DeleteFrom(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(db.Cond{"uid": uid}). - Exec() - if err != nil { - return err - } - rowsAffected, err := rs.RowsAffected() - if err != nil { - return err - } - log.WithFields(log.Fields{"uid": uid, "rowsAffected": rowsAffected}).Debug("Deleted archived workflow") - return nil -} - -func (r *workflowArchive) DeleteExpiredWorkflows(ttl time.Duration) error { - rs, err := r.session. - DeleteFrom(archiveTableName). - Where(r.clusterManagedNamespaceAndInstanceID()). - And(fmt.Sprintf("finishedat < current_timestamp - interval '%d' second", int(ttl.Seconds()))). - Exec() - if err != nil { - return err - } - rowsAffected, err := rs.RowsAffected() - if err != nil { - return err - } - log.WithFields(log.Fields{"rowsAffected": rowsAffected}).Info("Deleted archived workflows") - return nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go index 2e78ad5e661..8c2ae9945b2 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/artifact_repository_types.go @@ -3,6 +3,7 @@ package v1alpha1 import ( "fmt" "path" + "strings" ) var ( @@ -73,7 +74,7 @@ func (a *ArtifactRepository) ToArtifactLocation() *ArtifactLocation { type S3ArtifactRepository struct { S3Bucket `json:",inline" protobuf:"bytes,1,opt,name=s3Bucket"` - // KeyFormat is defines the format of how to store keys. Can reference workflow variables + // KeyFormat defines the format of how to store keys and can reference workflow variables. KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` // KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. @@ -93,7 +94,7 @@ func (r *S3ArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { type OSSArtifactRepository struct { OSSBucket `json:",inline" protobuf:"bytes,1,opt,name=oSSBucket"` - // KeyFormat is defines the format of how to store keys. Can reference workflow variables + // KeyFormat defines the format of how to store keys and can reference workflow variables. KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` } @@ -109,7 +110,7 @@ func (r *OSSArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { type GCSArtifactRepository struct { GCSBucket `json:",inline" protobuf:"bytes,1,opt,name=gCSBucket"` - // KeyFormat is defines the format of how to store keys. Can reference workflow variables + // KeyFormat defines the format of how to store keys and can reference workflow variables. KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,2,opt,name=keyFormat"` } @@ -126,15 +127,20 @@ type ArtifactoryArtifactRepository struct { ArtifactoryAuth `json:",inline" protobuf:"bytes,1,opt,name=artifactoryAuth"` // RepoURL is the url for artifactory repo. RepoURL string `json:"repoURL,omitempty" protobuf:"bytes,2,opt,name=repoURL"` + // KeyFormat defines the format of how to store keys and can reference workflow variables. + KeyFormat string `json:"keyFormat,omitempty" protobuf:"bytes,3,opt,name=keyFormat"` } func (r *ArtifactoryArtifactRepository) IntoArtifactLocation(l *ArtifactLocation) { - u := "" - if r.RepoURL != "" { - u = r.RepoURL + "/" + url := r.RepoURL + if !strings.HasSuffix(url, "/") { + url = url + "/" } - u = fmt.Sprintf("%s%s", u, DefaultArchivePattern) - l.Artifactory = &ArtifactoryArtifact{ArtifactoryAuth: r.ArtifactoryAuth, URL: u} + k := r.KeyFormat + if k == "" { + k = DefaultArchivePattern + } + l.Artifactory = &ArtifactoryArtifact{ArtifactoryAuth: r.ArtifactoryAuth, URL: fmt.Sprintf("%s%s", url, k)} } // AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go index fb685a3e81f..ac1a4f44205 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/container_set_template_types.go @@ -12,16 +12,18 @@ import ( type ContainerSetTemplate struct { Containers []ContainerNode `json:"containers" protobuf:"bytes,4,rep,name=containers"` VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,3,rep,name=volumeMounts"` - // RetryStrategy describes how to retry a container nodes in the container set if it fails. - // Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set. + // RetryStrategy describes how to retry container nodes if the container set fails. + // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. RetryStrategy *ContainerSetRetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,5,opt,name=retryStrategy"` } +// ContainerSetRetryStrategy provides controls on how to retry a container set type ContainerSetRetryStrategy struct { // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` - // Nbr of retries + // Retries is the maximum number of retry attempts for each container. It does not include the + // first, original attempt; the maximum number of total attempts will be `retries + 1`. Retries *intstr.IntOrString `json:"retries" protobuf:"bytes,2,rep,name=retries"` } diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go index bdfab46d3aa..d6f49b0d864 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/event_types.go @@ -32,7 +32,7 @@ type WorkflowEventBindingSpec struct { } type Event struct { - // Selector (https://github.com/antonmedv/expr) that we must must match the event. E.g. `payload.message == "test"` + // Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == "test"` Selector string `json:"selector" protobuf:"bytes,1,opt,name=selector"` } diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go index c5c1d1aaa6e..8e251852fd3 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.pb.go @@ -13,7 +13,7 @@ import ( github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" k8s_io_api_core_v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/policy/v1beta1" + v12 "k8s.io/api/policy/v1" k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v11 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -876,10 +876,38 @@ func (m *ClusterWorkflowTemplateList) XXX_DiscardUnknown() { var xxx_messageInfo_ClusterWorkflowTemplateList proto.InternalMessageInfo +func (m *Column) Reset() { *m = Column{} } +func (*Column) ProtoMessage() {} +func (*Column) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{30} +} +func (m *Column) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Column) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Column) XXX_Merge(src proto.Message) { + xxx_messageInfo_Column.Merge(m, src) +} +func (m *Column) XXX_Size() int { + return m.Size() +} +func (m *Column) XXX_DiscardUnknown() { + xxx_messageInfo_Column.DiscardUnknown(m) +} + +var xxx_messageInfo_Column proto.InternalMessageInfo + func (m *Condition) Reset() { *m = Condition{} } func (*Condition) ProtoMessage() {} func (*Condition) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{30} + return fileDescriptor_724696e352c3df5f, []int{31} } func (m *Condition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -907,7 +935,7 @@ var xxx_messageInfo_Condition proto.InternalMessageInfo func (m *ContainerNode) Reset() { *m = ContainerNode{} } func (*ContainerNode) ProtoMessage() {} func (*ContainerNode) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{31} + return fileDescriptor_724696e352c3df5f, []int{32} } func (m *ContainerNode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -935,7 +963,7 @@ var xxx_messageInfo_ContainerNode proto.InternalMessageInfo func (m *ContainerSetRetryStrategy) Reset() { *m = ContainerSetRetryStrategy{} } func (*ContainerSetRetryStrategy) ProtoMessage() {} func (*ContainerSetRetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{32} + return fileDescriptor_724696e352c3df5f, []int{33} } func (m *ContainerSetRetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -963,7 +991,7 @@ var xxx_messageInfo_ContainerSetRetryStrategy proto.InternalMessageInfo func (m *ContainerSetTemplate) Reset() { *m = ContainerSetTemplate{} } func (*ContainerSetTemplate) ProtoMessage() {} func (*ContainerSetTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{33} + return fileDescriptor_724696e352c3df5f, []int{34} } func (m *ContainerSetTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -991,7 +1019,7 @@ var xxx_messageInfo_ContainerSetTemplate proto.InternalMessageInfo func (m *ContinueOn) Reset() { *m = ContinueOn{} } func (*ContinueOn) ProtoMessage() {} func (*ContinueOn) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{34} + return fileDescriptor_724696e352c3df5f, []int{35} } func (m *ContinueOn) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1019,7 +1047,7 @@ var xxx_messageInfo_ContinueOn proto.InternalMessageInfo func (m *Counter) Reset() { *m = Counter{} } func (*Counter) ProtoMessage() {} func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{35} + return fileDescriptor_724696e352c3df5f, []int{36} } func (m *Counter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1047,7 +1075,7 @@ var xxx_messageInfo_Counter proto.InternalMessageInfo func (m *CreateS3BucketOptions) Reset() { *m = CreateS3BucketOptions{} } func (*CreateS3BucketOptions) ProtoMessage() {} func (*CreateS3BucketOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{36} + return fileDescriptor_724696e352c3df5f, []int{37} } func (m *CreateS3BucketOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1075,7 +1103,7 @@ var xxx_messageInfo_CreateS3BucketOptions proto.InternalMessageInfo func (m *CronWorkflow) Reset() { *m = CronWorkflow{} } func (*CronWorkflow) ProtoMessage() {} func (*CronWorkflow) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{37} + return fileDescriptor_724696e352c3df5f, []int{38} } func (m *CronWorkflow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1103,7 +1131,7 @@ var xxx_messageInfo_CronWorkflow proto.InternalMessageInfo func (m *CronWorkflowList) Reset() { *m = CronWorkflowList{} } func (*CronWorkflowList) ProtoMessage() {} func (*CronWorkflowList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{38} + return fileDescriptor_724696e352c3df5f, []int{39} } func (m *CronWorkflowList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1131,7 +1159,7 @@ var xxx_messageInfo_CronWorkflowList proto.InternalMessageInfo func (m *CronWorkflowSpec) Reset() { *m = CronWorkflowSpec{} } func (*CronWorkflowSpec) ProtoMessage() {} func (*CronWorkflowSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{39} + return fileDescriptor_724696e352c3df5f, []int{40} } func (m *CronWorkflowSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1159,7 +1187,7 @@ var xxx_messageInfo_CronWorkflowSpec proto.InternalMessageInfo func (m *CronWorkflowStatus) Reset() { *m = CronWorkflowStatus{} } func (*CronWorkflowStatus) ProtoMessage() {} func (*CronWorkflowStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{40} + return fileDescriptor_724696e352c3df5f, []int{41} } func (m *CronWorkflowStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1187,7 +1215,7 @@ var xxx_messageInfo_CronWorkflowStatus proto.InternalMessageInfo func (m *DAGTask) Reset() { *m = DAGTask{} } func (*DAGTask) ProtoMessage() {} func (*DAGTask) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{41} + return fileDescriptor_724696e352c3df5f, []int{42} } func (m *DAGTask) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1215,7 +1243,7 @@ var xxx_messageInfo_DAGTask proto.InternalMessageInfo func (m *DAGTemplate) Reset() { *m = DAGTemplate{} } func (*DAGTemplate) ProtoMessage() {} func (*DAGTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{42} + return fileDescriptor_724696e352c3df5f, []int{43} } func (m *DAGTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1243,7 +1271,7 @@ var xxx_messageInfo_DAGTemplate proto.InternalMessageInfo func (m *Data) Reset() { *m = Data{} } func (*Data) ProtoMessage() {} func (*Data) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{43} + return fileDescriptor_724696e352c3df5f, []int{44} } func (m *Data) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1271,7 +1299,7 @@ var xxx_messageInfo_Data proto.InternalMessageInfo func (m *DataSource) Reset() { *m = DataSource{} } func (*DataSource) ProtoMessage() {} func (*DataSource) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{44} + return fileDescriptor_724696e352c3df5f, []int{45} } func (m *DataSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1299,7 +1327,7 @@ var xxx_messageInfo_DataSource proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{45} + return fileDescriptor_724696e352c3df5f, []int{46} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1327,7 +1355,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *ExecutorConfig) Reset() { *m = ExecutorConfig{} } func (*ExecutorConfig) ProtoMessage() {} func (*ExecutorConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{46} + return fileDescriptor_724696e352c3df5f, []int{47} } func (m *ExecutorConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1355,7 +1383,7 @@ var xxx_messageInfo_ExecutorConfig proto.InternalMessageInfo func (m *GCSArtifact) Reset() { *m = GCSArtifact{} } func (*GCSArtifact) ProtoMessage() {} func (*GCSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{47} + return fileDescriptor_724696e352c3df5f, []int{48} } func (m *GCSArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1383,7 +1411,7 @@ var xxx_messageInfo_GCSArtifact proto.InternalMessageInfo func (m *GCSArtifactRepository) Reset() { *m = GCSArtifactRepository{} } func (*GCSArtifactRepository) ProtoMessage() {} func (*GCSArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{48} + return fileDescriptor_724696e352c3df5f, []int{49} } func (m *GCSArtifactRepository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1411,7 +1439,7 @@ var xxx_messageInfo_GCSArtifactRepository proto.InternalMessageInfo func (m *GCSBucket) Reset() { *m = GCSBucket{} } func (*GCSBucket) ProtoMessage() {} func (*GCSBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{49} + return fileDescriptor_724696e352c3df5f, []int{50} } func (m *GCSBucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1439,7 +1467,7 @@ var xxx_messageInfo_GCSBucket proto.InternalMessageInfo func (m *Gauge) Reset() { *m = Gauge{} } func (*Gauge) ProtoMessage() {} func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{50} + return fileDescriptor_724696e352c3df5f, []int{51} } func (m *Gauge) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1467,7 +1495,7 @@ var xxx_messageInfo_Gauge proto.InternalMessageInfo func (m *GitArtifact) Reset() { *m = GitArtifact{} } func (*GitArtifact) ProtoMessage() {} func (*GitArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{51} + return fileDescriptor_724696e352c3df5f, []int{52} } func (m *GitArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1495,7 +1523,7 @@ var xxx_messageInfo_GitArtifact proto.InternalMessageInfo func (m *HDFSArtifact) Reset() { *m = HDFSArtifact{} } func (*HDFSArtifact) ProtoMessage() {} func (*HDFSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{52} + return fileDescriptor_724696e352c3df5f, []int{53} } func (m *HDFSArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1523,7 +1551,7 @@ var xxx_messageInfo_HDFSArtifact proto.InternalMessageInfo func (m *HDFSArtifactRepository) Reset() { *m = HDFSArtifactRepository{} } func (*HDFSArtifactRepository) ProtoMessage() {} func (*HDFSArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{53} + return fileDescriptor_724696e352c3df5f, []int{54} } func (m *HDFSArtifactRepository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1551,7 +1579,7 @@ var xxx_messageInfo_HDFSArtifactRepository proto.InternalMessageInfo func (m *HDFSConfig) Reset() { *m = HDFSConfig{} } func (*HDFSConfig) ProtoMessage() {} func (*HDFSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{54} + return fileDescriptor_724696e352c3df5f, []int{55} } func (m *HDFSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1579,7 +1607,7 @@ var xxx_messageInfo_HDFSConfig proto.InternalMessageInfo func (m *HDFSKrbConfig) Reset() { *m = HDFSKrbConfig{} } func (*HDFSKrbConfig) ProtoMessage() {} func (*HDFSKrbConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{55} + return fileDescriptor_724696e352c3df5f, []int{56} } func (m *HDFSKrbConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1607,7 +1635,7 @@ var xxx_messageInfo_HDFSKrbConfig proto.InternalMessageInfo func (m *HTTP) Reset() { *m = HTTP{} } func (*HTTP) ProtoMessage() {} func (*HTTP) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{56} + return fileDescriptor_724696e352c3df5f, []int{57} } func (m *HTTP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1635,7 +1663,7 @@ var xxx_messageInfo_HTTP proto.InternalMessageInfo func (m *HTTPArtifact) Reset() { *m = HTTPArtifact{} } func (*HTTPArtifact) ProtoMessage() {} func (*HTTPArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{57} + return fileDescriptor_724696e352c3df5f, []int{58} } func (m *HTTPArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1663,7 +1691,7 @@ var xxx_messageInfo_HTTPArtifact proto.InternalMessageInfo func (m *HTTPAuth) Reset() { *m = HTTPAuth{} } func (*HTTPAuth) ProtoMessage() {} func (*HTTPAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{58} + return fileDescriptor_724696e352c3df5f, []int{59} } func (m *HTTPAuth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1691,7 +1719,7 @@ var xxx_messageInfo_HTTPAuth proto.InternalMessageInfo func (m *HTTPBodySource) Reset() { *m = HTTPBodySource{} } func (*HTTPBodySource) ProtoMessage() {} func (*HTTPBodySource) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{59} + return fileDescriptor_724696e352c3df5f, []int{60} } func (m *HTTPBodySource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1719,7 +1747,7 @@ var xxx_messageInfo_HTTPBodySource proto.InternalMessageInfo func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{60} + return fileDescriptor_724696e352c3df5f, []int{61} } func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1747,7 +1775,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo func (m *HTTPHeaderSource) Reset() { *m = HTTPHeaderSource{} } func (*HTTPHeaderSource) ProtoMessage() {} func (*HTTPHeaderSource) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{61} + return fileDescriptor_724696e352c3df5f, []int{62} } func (m *HTTPHeaderSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1775,7 +1803,7 @@ var xxx_messageInfo_HTTPHeaderSource proto.InternalMessageInfo func (m *Header) Reset() { *m = Header{} } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{62} + return fileDescriptor_724696e352c3df5f, []int{63} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1803,7 +1831,7 @@ var xxx_messageInfo_Header proto.InternalMessageInfo func (m *Histogram) Reset() { *m = Histogram{} } func (*Histogram) ProtoMessage() {} func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{63} + return fileDescriptor_724696e352c3df5f, []int{64} } func (m *Histogram) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1831,7 +1859,7 @@ var xxx_messageInfo_Histogram proto.InternalMessageInfo func (m *Inputs) Reset() { *m = Inputs{} } func (*Inputs) ProtoMessage() {} func (*Inputs) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{64} + return fileDescriptor_724696e352c3df5f, []int{65} } func (m *Inputs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1859,7 +1887,7 @@ var xxx_messageInfo_Inputs proto.InternalMessageInfo func (m *Item) Reset() { *m = Item{} } func (*Item) ProtoMessage() {} func (*Item) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{65} + return fileDescriptor_724696e352c3df5f, []int{66} } func (m *Item) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1887,7 +1915,7 @@ var xxx_messageInfo_Item proto.InternalMessageInfo func (m *LabelKeys) Reset() { *m = LabelKeys{} } func (*LabelKeys) ProtoMessage() {} func (*LabelKeys) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{66} + return fileDescriptor_724696e352c3df5f, []int{67} } func (m *LabelKeys) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1915,7 +1943,7 @@ var xxx_messageInfo_LabelKeys proto.InternalMessageInfo func (m *LabelValueFrom) Reset() { *m = LabelValueFrom{} } func (*LabelValueFrom) ProtoMessage() {} func (*LabelValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{67} + return fileDescriptor_724696e352c3df5f, []int{68} } func (m *LabelValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1943,7 +1971,7 @@ var xxx_messageInfo_LabelValueFrom proto.InternalMessageInfo func (m *LabelValues) Reset() { *m = LabelValues{} } func (*LabelValues) ProtoMessage() {} func (*LabelValues) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{68} + return fileDescriptor_724696e352c3df5f, []int{69} } func (m *LabelValues) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1971,7 +1999,7 @@ var xxx_messageInfo_LabelValues proto.InternalMessageInfo func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } func (*LifecycleHook) ProtoMessage() {} func (*LifecycleHook) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{69} + return fileDescriptor_724696e352c3df5f, []int{70} } func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1999,7 +2027,7 @@ var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo func (m *Link) Reset() { *m = Link{} } func (*Link) ProtoMessage() {} func (*Link) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{70} + return fileDescriptor_724696e352c3df5f, []int{71} } func (m *Link) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2027,7 +2055,7 @@ var xxx_messageInfo_Link proto.InternalMessageInfo func (m *ManifestFrom) Reset() { *m = ManifestFrom{} } func (*ManifestFrom) ProtoMessage() {} func (*ManifestFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{71} + return fileDescriptor_724696e352c3df5f, []int{72} } func (m *ManifestFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2055,7 +2083,7 @@ var xxx_messageInfo_ManifestFrom proto.InternalMessageInfo func (m *MemoizationStatus) Reset() { *m = MemoizationStatus{} } func (*MemoizationStatus) ProtoMessage() {} func (*MemoizationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{72} + return fileDescriptor_724696e352c3df5f, []int{73} } func (m *MemoizationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2083,7 +2111,7 @@ var xxx_messageInfo_MemoizationStatus proto.InternalMessageInfo func (m *Memoize) Reset() { *m = Memoize{} } func (*Memoize) ProtoMessage() {} func (*Memoize) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{73} + return fileDescriptor_724696e352c3df5f, []int{74} } func (m *Memoize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2111,7 +2139,7 @@ var xxx_messageInfo_Memoize proto.InternalMessageInfo func (m *Metadata) Reset() { *m = Metadata{} } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{74} + return fileDescriptor_724696e352c3df5f, []int{75} } func (m *Metadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2139,7 +2167,7 @@ var xxx_messageInfo_Metadata proto.InternalMessageInfo func (m *MetricLabel) Reset() { *m = MetricLabel{} } func (*MetricLabel) ProtoMessage() {} func (*MetricLabel) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{75} + return fileDescriptor_724696e352c3df5f, []int{76} } func (m *MetricLabel) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2167,7 +2195,7 @@ var xxx_messageInfo_MetricLabel proto.InternalMessageInfo func (m *Metrics) Reset() { *m = Metrics{} } func (*Metrics) ProtoMessage() {} func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{76} + return fileDescriptor_724696e352c3df5f, []int{77} } func (m *Metrics) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2195,7 +2223,7 @@ var xxx_messageInfo_Metrics proto.InternalMessageInfo func (m *Mutex) Reset() { *m = Mutex{} } func (*Mutex) ProtoMessage() {} func (*Mutex) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{77} + return fileDescriptor_724696e352c3df5f, []int{78} } func (m *Mutex) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2223,7 +2251,7 @@ var xxx_messageInfo_Mutex proto.InternalMessageInfo func (m *MutexHolding) Reset() { *m = MutexHolding{} } func (*MutexHolding) ProtoMessage() {} func (*MutexHolding) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{78} + return fileDescriptor_724696e352c3df5f, []int{79} } func (m *MutexHolding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2251,7 +2279,7 @@ var xxx_messageInfo_MutexHolding proto.InternalMessageInfo func (m *MutexStatus) Reset() { *m = MutexStatus{} } func (*MutexStatus) ProtoMessage() {} func (*MutexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{79} + return fileDescriptor_724696e352c3df5f, []int{80} } func (m *MutexStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2276,10 +2304,38 @@ func (m *MutexStatus) XXX_DiscardUnknown() { var xxx_messageInfo_MutexStatus proto.InternalMessageInfo +func (m *NodeFlag) Reset() { *m = NodeFlag{} } +func (*NodeFlag) ProtoMessage() {} +func (*NodeFlag) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{81} +} +func (m *NodeFlag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeFlag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeFlag) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeFlag.Merge(m, src) +} +func (m *NodeFlag) XXX_Size() int { + return m.Size() +} +func (m *NodeFlag) XXX_DiscardUnknown() { + xxx_messageInfo_NodeFlag.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeFlag proto.InternalMessageInfo + func (m *NodeResult) Reset() { *m = NodeResult{} } func (*NodeResult) ProtoMessage() {} func (*NodeResult) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{80} + return fileDescriptor_724696e352c3df5f, []int{82} } func (m *NodeResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2307,7 +2363,7 @@ var xxx_messageInfo_NodeResult proto.InternalMessageInfo func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{81} + return fileDescriptor_724696e352c3df5f, []int{83} } func (m *NodeStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2335,7 +2391,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo func (m *NodeSynchronizationStatus) Reset() { *m = NodeSynchronizationStatus{} } func (*NodeSynchronizationStatus) ProtoMessage() {} func (*NodeSynchronizationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{82} + return fileDescriptor_724696e352c3df5f, []int{84} } func (m *NodeSynchronizationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2363,7 +2419,7 @@ var xxx_messageInfo_NodeSynchronizationStatus proto.InternalMessageInfo func (m *NoneStrategy) Reset() { *m = NoneStrategy{} } func (*NoneStrategy) ProtoMessage() {} func (*NoneStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{83} + return fileDescriptor_724696e352c3df5f, []int{85} } func (m *NoneStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2391,7 +2447,7 @@ var xxx_messageInfo_NoneStrategy proto.InternalMessageInfo func (m *OAuth2Auth) Reset() { *m = OAuth2Auth{} } func (*OAuth2Auth) ProtoMessage() {} func (*OAuth2Auth) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{84} + return fileDescriptor_724696e352c3df5f, []int{86} } func (m *OAuth2Auth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2419,7 +2475,7 @@ var xxx_messageInfo_OAuth2Auth proto.InternalMessageInfo func (m *OAuth2EndpointParam) Reset() { *m = OAuth2EndpointParam{} } func (*OAuth2EndpointParam) ProtoMessage() {} func (*OAuth2EndpointParam) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{85} + return fileDescriptor_724696e352c3df5f, []int{87} } func (m *OAuth2EndpointParam) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2447,7 +2503,7 @@ var xxx_messageInfo_OAuth2EndpointParam proto.InternalMessageInfo func (m *OSSArtifact) Reset() { *m = OSSArtifact{} } func (*OSSArtifact) ProtoMessage() {} func (*OSSArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{86} + return fileDescriptor_724696e352c3df5f, []int{88} } func (m *OSSArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2475,7 +2531,7 @@ var xxx_messageInfo_OSSArtifact proto.InternalMessageInfo func (m *OSSArtifactRepository) Reset() { *m = OSSArtifactRepository{} } func (*OSSArtifactRepository) ProtoMessage() {} func (*OSSArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{87} + return fileDescriptor_724696e352c3df5f, []int{89} } func (m *OSSArtifactRepository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2503,7 +2559,7 @@ var xxx_messageInfo_OSSArtifactRepository proto.InternalMessageInfo func (m *OSSBucket) Reset() { *m = OSSBucket{} } func (*OSSBucket) ProtoMessage() {} func (*OSSBucket) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{88} + return fileDescriptor_724696e352c3df5f, []int{90} } func (m *OSSBucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2531,7 +2587,7 @@ var xxx_messageInfo_OSSBucket proto.InternalMessageInfo func (m *OSSLifecycleRule) Reset() { *m = OSSLifecycleRule{} } func (*OSSLifecycleRule) ProtoMessage() {} func (*OSSLifecycleRule) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{89} + return fileDescriptor_724696e352c3df5f, []int{91} } func (m *OSSLifecycleRule) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2559,7 +2615,7 @@ var xxx_messageInfo_OSSLifecycleRule proto.InternalMessageInfo func (m *Object) Reset() { *m = Object{} } func (*Object) ProtoMessage() {} func (*Object) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{90} + return fileDescriptor_724696e352c3df5f, []int{92} } func (m *Object) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2587,7 +2643,7 @@ var xxx_messageInfo_Object proto.InternalMessageInfo func (m *Outputs) Reset() { *m = Outputs{} } func (*Outputs) ProtoMessage() {} func (*Outputs) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{91} + return fileDescriptor_724696e352c3df5f, []int{93} } func (m *Outputs) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2615,7 +2671,7 @@ var xxx_messageInfo_Outputs proto.InternalMessageInfo func (m *ParallelSteps) Reset() { *m = ParallelSteps{} } func (*ParallelSteps) ProtoMessage() {} func (*ParallelSteps) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{92} + return fileDescriptor_724696e352c3df5f, []int{94} } func (m *ParallelSteps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2643,7 +2699,7 @@ var xxx_messageInfo_ParallelSteps proto.InternalMessageInfo func (m *Parameter) Reset() { *m = Parameter{} } func (*Parameter) ProtoMessage() {} func (*Parameter) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{93} + return fileDescriptor_724696e352c3df5f, []int{95} } func (m *Parameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2671,7 +2727,7 @@ var xxx_messageInfo_Parameter proto.InternalMessageInfo func (m *Plugin) Reset() { *m = Plugin{} } func (*Plugin) ProtoMessage() {} func (*Plugin) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{94} + return fileDescriptor_724696e352c3df5f, []int{96} } func (m *Plugin) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2699,7 +2755,7 @@ var xxx_messageInfo_Plugin proto.InternalMessageInfo func (m *PodGC) Reset() { *m = PodGC{} } func (*PodGC) ProtoMessage() {} func (*PodGC) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{95} + return fileDescriptor_724696e352c3df5f, []int{97} } func (m *PodGC) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2727,7 +2783,7 @@ var xxx_messageInfo_PodGC proto.InternalMessageInfo func (m *Prometheus) Reset() { *m = Prometheus{} } func (*Prometheus) ProtoMessage() {} func (*Prometheus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{96} + return fileDescriptor_724696e352c3df5f, []int{98} } func (m *Prometheus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2755,7 +2811,7 @@ var xxx_messageInfo_Prometheus proto.InternalMessageInfo func (m *RawArtifact) Reset() { *m = RawArtifact{} } func (*RawArtifact) ProtoMessage() {} func (*RawArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{97} + return fileDescriptor_724696e352c3df5f, []int{99} } func (m *RawArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2783,7 +2839,7 @@ var xxx_messageInfo_RawArtifact proto.InternalMessageInfo func (m *ResourceTemplate) Reset() { *m = ResourceTemplate{} } func (*ResourceTemplate) ProtoMessage() {} func (*ResourceTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{98} + return fileDescriptor_724696e352c3df5f, []int{100} } func (m *ResourceTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2811,7 +2867,7 @@ var xxx_messageInfo_ResourceTemplate proto.InternalMessageInfo func (m *RetryAffinity) Reset() { *m = RetryAffinity{} } func (*RetryAffinity) ProtoMessage() {} func (*RetryAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{99} + return fileDescriptor_724696e352c3df5f, []int{101} } func (m *RetryAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2839,7 +2895,7 @@ var xxx_messageInfo_RetryAffinity proto.InternalMessageInfo func (m *RetryNodeAntiAffinity) Reset() { *m = RetryNodeAntiAffinity{} } func (*RetryNodeAntiAffinity) ProtoMessage() {} func (*RetryNodeAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{100} + return fileDescriptor_724696e352c3df5f, []int{102} } func (m *RetryNodeAntiAffinity) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2867,7 +2923,7 @@ var xxx_messageInfo_RetryNodeAntiAffinity proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{101} + return fileDescriptor_724696e352c3df5f, []int{103} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2895,7 +2951,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *S3Artifact) Reset() { *m = S3Artifact{} } func (*S3Artifact) ProtoMessage() {} func (*S3Artifact) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{102} + return fileDescriptor_724696e352c3df5f, []int{104} } func (m *S3Artifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2923,7 +2979,7 @@ var xxx_messageInfo_S3Artifact proto.InternalMessageInfo func (m *S3ArtifactRepository) Reset() { *m = S3ArtifactRepository{} } func (*S3ArtifactRepository) ProtoMessage() {} func (*S3ArtifactRepository) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{103} + return fileDescriptor_724696e352c3df5f, []int{105} } func (m *S3ArtifactRepository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2951,7 +3007,7 @@ var xxx_messageInfo_S3ArtifactRepository proto.InternalMessageInfo func (m *S3Bucket) Reset() { *m = S3Bucket{} } func (*S3Bucket) ProtoMessage() {} func (*S3Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{104} + return fileDescriptor_724696e352c3df5f, []int{106} } func (m *S3Bucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2979,7 +3035,7 @@ var xxx_messageInfo_S3Bucket proto.InternalMessageInfo func (m *S3EncryptionOptions) Reset() { *m = S3EncryptionOptions{} } func (*S3EncryptionOptions) ProtoMessage() {} func (*S3EncryptionOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{105} + return fileDescriptor_724696e352c3df5f, []int{107} } func (m *S3EncryptionOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3007,7 +3063,7 @@ var xxx_messageInfo_S3EncryptionOptions proto.InternalMessageInfo func (m *ScriptTemplate) Reset() { *m = ScriptTemplate{} } func (*ScriptTemplate) ProtoMessage() {} func (*ScriptTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{106} + return fileDescriptor_724696e352c3df5f, []int{108} } func (m *ScriptTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3035,7 +3091,7 @@ var xxx_messageInfo_ScriptTemplate proto.InternalMessageInfo func (m *SemaphoreHolding) Reset() { *m = SemaphoreHolding{} } func (*SemaphoreHolding) ProtoMessage() {} func (*SemaphoreHolding) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{107} + return fileDescriptor_724696e352c3df5f, []int{109} } func (m *SemaphoreHolding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3063,7 +3119,7 @@ var xxx_messageInfo_SemaphoreHolding proto.InternalMessageInfo func (m *SemaphoreRef) Reset() { *m = SemaphoreRef{} } func (*SemaphoreRef) ProtoMessage() {} func (*SemaphoreRef) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{108} + return fileDescriptor_724696e352c3df5f, []int{110} } func (m *SemaphoreRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3091,7 +3147,7 @@ var xxx_messageInfo_SemaphoreRef proto.InternalMessageInfo func (m *SemaphoreStatus) Reset() { *m = SemaphoreStatus{} } func (*SemaphoreStatus) ProtoMessage() {} func (*SemaphoreStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{109} + return fileDescriptor_724696e352c3df5f, []int{111} } func (m *SemaphoreStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3119,7 +3175,7 @@ var xxx_messageInfo_SemaphoreStatus proto.InternalMessageInfo func (m *Sequence) Reset() { *m = Sequence{} } func (*Sequence) ProtoMessage() {} func (*Sequence) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{110} + return fileDescriptor_724696e352c3df5f, []int{112} } func (m *Sequence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3147,7 +3203,7 @@ var xxx_messageInfo_Sequence proto.InternalMessageInfo func (m *Submit) Reset() { *m = Submit{} } func (*Submit) ProtoMessage() {} func (*Submit) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{111} + return fileDescriptor_724696e352c3df5f, []int{113} } func (m *Submit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3175,7 +3231,7 @@ var xxx_messageInfo_Submit proto.InternalMessageInfo func (m *SubmitOpts) Reset() { *m = SubmitOpts{} } func (*SubmitOpts) ProtoMessage() {} func (*SubmitOpts) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{112} + return fileDescriptor_724696e352c3df5f, []int{114} } func (m *SubmitOpts) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3203,7 +3259,7 @@ var xxx_messageInfo_SubmitOpts proto.InternalMessageInfo func (m *SuppliedValueFrom) Reset() { *m = SuppliedValueFrom{} } func (*SuppliedValueFrom) ProtoMessage() {} func (*SuppliedValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{113} + return fileDescriptor_724696e352c3df5f, []int{115} } func (m *SuppliedValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3231,7 +3287,7 @@ var xxx_messageInfo_SuppliedValueFrom proto.InternalMessageInfo func (m *SuspendTemplate) Reset() { *m = SuspendTemplate{} } func (*SuspendTemplate) ProtoMessage() {} func (*SuspendTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{114} + return fileDescriptor_724696e352c3df5f, []int{116} } func (m *SuspendTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3259,7 +3315,7 @@ var xxx_messageInfo_SuspendTemplate proto.InternalMessageInfo func (m *Synchronization) Reset() { *m = Synchronization{} } func (*Synchronization) ProtoMessage() {} func (*Synchronization) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{115} + return fileDescriptor_724696e352c3df5f, []int{117} } func (m *Synchronization) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3287,7 +3343,7 @@ var xxx_messageInfo_Synchronization proto.InternalMessageInfo func (m *SynchronizationStatus) Reset() { *m = SynchronizationStatus{} } func (*SynchronizationStatus) ProtoMessage() {} func (*SynchronizationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{116} + return fileDescriptor_724696e352c3df5f, []int{118} } func (m *SynchronizationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3315,7 +3371,7 @@ var xxx_messageInfo_SynchronizationStatus proto.InternalMessageInfo func (m *TTLStrategy) Reset() { *m = TTLStrategy{} } func (*TTLStrategy) ProtoMessage() {} func (*TTLStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{117} + return fileDescriptor_724696e352c3df5f, []int{119} } func (m *TTLStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3343,7 +3399,7 @@ var xxx_messageInfo_TTLStrategy proto.InternalMessageInfo func (m *TarStrategy) Reset() { *m = TarStrategy{} } func (*TarStrategy) ProtoMessage() {} func (*TarStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{118} + return fileDescriptor_724696e352c3df5f, []int{120} } func (m *TarStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3371,7 +3427,7 @@ var xxx_messageInfo_TarStrategy proto.InternalMessageInfo func (m *Template) Reset() { *m = Template{} } func (*Template) ProtoMessage() {} func (*Template) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{119} + return fileDescriptor_724696e352c3df5f, []int{121} } func (m *Template) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3399,7 +3455,7 @@ var xxx_messageInfo_Template proto.InternalMessageInfo func (m *TemplateRef) Reset() { *m = TemplateRef{} } func (*TemplateRef) ProtoMessage() {} func (*TemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{120} + return fileDescriptor_724696e352c3df5f, []int{122} } func (m *TemplateRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3427,7 +3483,7 @@ var xxx_messageInfo_TemplateRef proto.InternalMessageInfo func (m *TransformationStep) Reset() { *m = TransformationStep{} } func (*TransformationStep) ProtoMessage() {} func (*TransformationStep) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{121} + return fileDescriptor_724696e352c3df5f, []int{123} } func (m *TransformationStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3455,7 +3511,7 @@ var xxx_messageInfo_TransformationStep proto.InternalMessageInfo func (m *UserContainer) Reset() { *m = UserContainer{} } func (*UserContainer) ProtoMessage() {} func (*UserContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{122} + return fileDescriptor_724696e352c3df5f, []int{124} } func (m *UserContainer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3483,7 +3539,7 @@ var xxx_messageInfo_UserContainer proto.InternalMessageInfo func (m *ValueFrom) Reset() { *m = ValueFrom{} } func (*ValueFrom) ProtoMessage() {} func (*ValueFrom) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{123} + return fileDescriptor_724696e352c3df5f, []int{125} } func (m *ValueFrom) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3511,7 +3567,7 @@ var xxx_messageInfo_ValueFrom proto.InternalMessageInfo func (m *Version) Reset() { *m = Version{} } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{124} + return fileDescriptor_724696e352c3df5f, []int{126} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3539,7 +3595,7 @@ var xxx_messageInfo_Version proto.InternalMessageInfo func (m *VolumeClaimGC) Reset() { *m = VolumeClaimGC{} } func (*VolumeClaimGC) ProtoMessage() {} func (*VolumeClaimGC) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{125} + return fileDescriptor_724696e352c3df5f, []int{127} } func (m *VolumeClaimGC) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3567,7 +3623,7 @@ var xxx_messageInfo_VolumeClaimGC proto.InternalMessageInfo func (m *Workflow) Reset() { *m = Workflow{} } func (*Workflow) ProtoMessage() {} func (*Workflow) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{126} + return fileDescriptor_724696e352c3df5f, []int{128} } func (m *Workflow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3595,7 +3651,7 @@ var xxx_messageInfo_Workflow proto.InternalMessageInfo func (m *WorkflowArtifactGCTask) Reset() { *m = WorkflowArtifactGCTask{} } func (*WorkflowArtifactGCTask) ProtoMessage() {} func (*WorkflowArtifactGCTask) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{127} + return fileDescriptor_724696e352c3df5f, []int{129} } func (m *WorkflowArtifactGCTask) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3623,7 +3679,7 @@ var xxx_messageInfo_WorkflowArtifactGCTask proto.InternalMessageInfo func (m *WorkflowArtifactGCTaskList) Reset() { *m = WorkflowArtifactGCTaskList{} } func (*WorkflowArtifactGCTaskList) ProtoMessage() {} func (*WorkflowArtifactGCTaskList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{128} + return fileDescriptor_724696e352c3df5f, []int{130} } func (m *WorkflowArtifactGCTaskList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3651,7 +3707,7 @@ var xxx_messageInfo_WorkflowArtifactGCTaskList proto.InternalMessageInfo func (m *WorkflowEventBinding) Reset() { *m = WorkflowEventBinding{} } func (*WorkflowEventBinding) ProtoMessage() {} func (*WorkflowEventBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{129} + return fileDescriptor_724696e352c3df5f, []int{131} } func (m *WorkflowEventBinding) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3679,7 +3735,7 @@ var xxx_messageInfo_WorkflowEventBinding proto.InternalMessageInfo func (m *WorkflowEventBindingList) Reset() { *m = WorkflowEventBindingList{} } func (*WorkflowEventBindingList) ProtoMessage() {} func (*WorkflowEventBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{130} + return fileDescriptor_724696e352c3df5f, []int{132} } func (m *WorkflowEventBindingList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3707,7 +3763,7 @@ var xxx_messageInfo_WorkflowEventBindingList proto.InternalMessageInfo func (m *WorkflowEventBindingSpec) Reset() { *m = WorkflowEventBindingSpec{} } func (*WorkflowEventBindingSpec) ProtoMessage() {} func (*WorkflowEventBindingSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{131} + return fileDescriptor_724696e352c3df5f, []int{133} } func (m *WorkflowEventBindingSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3732,10 +3788,38 @@ func (m *WorkflowEventBindingSpec) XXX_DiscardUnknown() { var xxx_messageInfo_WorkflowEventBindingSpec proto.InternalMessageInfo +func (m *WorkflowLevelArtifactGC) Reset() { *m = WorkflowLevelArtifactGC{} } +func (*WorkflowLevelArtifactGC) ProtoMessage() {} +func (*WorkflowLevelArtifactGC) Descriptor() ([]byte, []int) { + return fileDescriptor_724696e352c3df5f, []int{134} +} +func (m *WorkflowLevelArtifactGC) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowLevelArtifactGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowLevelArtifactGC) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowLevelArtifactGC.Merge(m, src) +} +func (m *WorkflowLevelArtifactGC) XXX_Size() int { + return m.Size() +} +func (m *WorkflowLevelArtifactGC) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowLevelArtifactGC.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowLevelArtifactGC proto.InternalMessageInfo + func (m *WorkflowList) Reset() { *m = WorkflowList{} } func (*WorkflowList) ProtoMessage() {} func (*WorkflowList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{132} + return fileDescriptor_724696e352c3df5f, []int{135} } func (m *WorkflowList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3763,7 +3847,7 @@ var xxx_messageInfo_WorkflowList proto.InternalMessageInfo func (m *WorkflowMetadata) Reset() { *m = WorkflowMetadata{} } func (*WorkflowMetadata) ProtoMessage() {} func (*WorkflowMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{133} + return fileDescriptor_724696e352c3df5f, []int{136} } func (m *WorkflowMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3791,7 +3875,7 @@ var xxx_messageInfo_WorkflowMetadata proto.InternalMessageInfo func (m *WorkflowSpec) Reset() { *m = WorkflowSpec{} } func (*WorkflowSpec) ProtoMessage() {} func (*WorkflowSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{134} + return fileDescriptor_724696e352c3df5f, []int{137} } func (m *WorkflowSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3819,7 +3903,7 @@ var xxx_messageInfo_WorkflowSpec proto.InternalMessageInfo func (m *WorkflowStatus) Reset() { *m = WorkflowStatus{} } func (*WorkflowStatus) ProtoMessage() {} func (*WorkflowStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{135} + return fileDescriptor_724696e352c3df5f, []int{138} } func (m *WorkflowStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3847,7 +3931,7 @@ var xxx_messageInfo_WorkflowStatus proto.InternalMessageInfo func (m *WorkflowStep) Reset() { *m = WorkflowStep{} } func (*WorkflowStep) ProtoMessage() {} func (*WorkflowStep) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{136} + return fileDescriptor_724696e352c3df5f, []int{139} } func (m *WorkflowStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3875,7 +3959,7 @@ var xxx_messageInfo_WorkflowStep proto.InternalMessageInfo func (m *WorkflowTaskResult) Reset() { *m = WorkflowTaskResult{} } func (*WorkflowTaskResult) ProtoMessage() {} func (*WorkflowTaskResult) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{137} + return fileDescriptor_724696e352c3df5f, []int{140} } func (m *WorkflowTaskResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3903,7 +3987,7 @@ var xxx_messageInfo_WorkflowTaskResult proto.InternalMessageInfo func (m *WorkflowTaskResultList) Reset() { *m = WorkflowTaskResultList{} } func (*WorkflowTaskResultList) ProtoMessage() {} func (*WorkflowTaskResultList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{138} + return fileDescriptor_724696e352c3df5f, []int{141} } func (m *WorkflowTaskResultList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3931,7 +4015,7 @@ var xxx_messageInfo_WorkflowTaskResultList proto.InternalMessageInfo func (m *WorkflowTaskSet) Reset() { *m = WorkflowTaskSet{} } func (*WorkflowTaskSet) ProtoMessage() {} func (*WorkflowTaskSet) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{139} + return fileDescriptor_724696e352c3df5f, []int{142} } func (m *WorkflowTaskSet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3959,7 +4043,7 @@ var xxx_messageInfo_WorkflowTaskSet proto.InternalMessageInfo func (m *WorkflowTaskSetList) Reset() { *m = WorkflowTaskSetList{} } func (*WorkflowTaskSetList) ProtoMessage() {} func (*WorkflowTaskSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{140} + return fileDescriptor_724696e352c3df5f, []int{143} } func (m *WorkflowTaskSetList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3987,7 +4071,7 @@ var xxx_messageInfo_WorkflowTaskSetList proto.InternalMessageInfo func (m *WorkflowTaskSetSpec) Reset() { *m = WorkflowTaskSetSpec{} } func (*WorkflowTaskSetSpec) ProtoMessage() {} func (*WorkflowTaskSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{141} + return fileDescriptor_724696e352c3df5f, []int{144} } func (m *WorkflowTaskSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4015,7 +4099,7 @@ var xxx_messageInfo_WorkflowTaskSetSpec proto.InternalMessageInfo func (m *WorkflowTaskSetStatus) Reset() { *m = WorkflowTaskSetStatus{} } func (*WorkflowTaskSetStatus) ProtoMessage() {} func (*WorkflowTaskSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{142} + return fileDescriptor_724696e352c3df5f, []int{145} } func (m *WorkflowTaskSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4043,7 +4127,7 @@ var xxx_messageInfo_WorkflowTaskSetStatus proto.InternalMessageInfo func (m *WorkflowTemplate) Reset() { *m = WorkflowTemplate{} } func (*WorkflowTemplate) ProtoMessage() {} func (*WorkflowTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{143} + return fileDescriptor_724696e352c3df5f, []int{146} } func (m *WorkflowTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4071,7 +4155,7 @@ var xxx_messageInfo_WorkflowTemplate proto.InternalMessageInfo func (m *WorkflowTemplateList) Reset() { *m = WorkflowTemplateList{} } func (*WorkflowTemplateList) ProtoMessage() {} func (*WorkflowTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{144} + return fileDescriptor_724696e352c3df5f, []int{147} } func (m *WorkflowTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4099,7 +4183,7 @@ var xxx_messageInfo_WorkflowTemplateList proto.InternalMessageInfo func (m *WorkflowTemplateRef) Reset() { *m = WorkflowTemplateRef{} } func (*WorkflowTemplateRef) ProtoMessage() {} func (*WorkflowTemplateRef) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{145} + return fileDescriptor_724696e352c3df5f, []int{148} } func (m *WorkflowTemplateRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4127,7 +4211,7 @@ var xxx_messageInfo_WorkflowTemplateRef proto.InternalMessageInfo func (m *ZipStrategy) Reset() { *m = ZipStrategy{} } func (*ZipStrategy) ProtoMessage() {} func (*ZipStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_724696e352c3df5f, []int{146} + return fileDescriptor_724696e352c3df5f, []int{149} } func (m *ZipStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4191,6 +4275,7 @@ func init() { proto.RegisterType((*ClientCertAuth)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClientCertAuth") proto.RegisterType((*ClusterWorkflowTemplate)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplate") proto.RegisterType((*ClusterWorkflowTemplateList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ClusterWorkflowTemplateList") + proto.RegisterType((*Column)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Column") proto.RegisterType((*Condition)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Condition") proto.RegisterType((*ContainerNode)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerNode") proto.RegisterType((*ContainerSetRetryStrategy)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.ContainerSetRetryStrategy") @@ -4244,6 +4329,7 @@ func init() { proto.RegisterType((*Mutex)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.Mutex") proto.RegisterType((*MutexHolding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MutexHolding") proto.RegisterType((*MutexStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.MutexStatus") + proto.RegisterType((*NodeFlag)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeFlag") proto.RegisterType((*NodeResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeResult") proto.RegisterType((*NodeStatus)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeStatus") proto.RegisterMapType((ResourcesDuration)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.NodeStatus.ResourcesDurationEntry") @@ -4298,6 +4384,7 @@ func init() { proto.RegisterType((*WorkflowEventBinding)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBinding") proto.RegisterType((*WorkflowEventBindingList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBindingList") proto.RegisterType((*WorkflowEventBindingSpec)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowEventBindingSpec") + proto.RegisterType((*WorkflowLevelArtifactGC)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowLevelArtifactGC") proto.RegisterType((*WorkflowList)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowList") proto.RegisterType((*WorkflowMetadata)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowMetadata.AnnotationsEntry") @@ -4310,6 +4397,7 @@ func init() { proto.RegisterMapType((Nodes)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.NodesEntry") proto.RegisterMapType((ResourcesDuration)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.ResourcesDurationEntry") proto.RegisterMapType((map[string]Template)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.StoredTemplatesEntry") + proto.RegisterMapType((map[string]bool)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStatus.TaskResultsCompletionStatusEntry") proto.RegisterType((*WorkflowStep)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStep") proto.RegisterMapType((LifecycleHooks)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowStep.HooksEntry") proto.RegisterType((*WorkflowTaskResult)(nil), "github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1.WorkflowTaskResult") @@ -4331,671 +4419,690 @@ func init() { } var fileDescriptor_724696e352c3df5f = []byte{ - // 10618 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0xbd, 0x7b, 0x70, 0x24, 0xc7, - 0x79, 0x18, 0xce, 0x59, 0x60, 0xf1, 0xf8, 0xf0, 0x38, 0x5c, 0xdf, 0x6b, 0x09, 0x92, 0x07, 0x7a, - 0x28, 0xf2, 0xc7, 0xb3, 0x29, 0xc0, 0x3c, 0x4a, 0xbf, 0x30, 0x52, 0x22, 0x09, 0x8f, 0x03, 0x0e, - 0x04, 0x70, 0x00, 0x7b, 0x71, 0x77, 0x26, 0xc5, 0x48, 0x1a, 0xec, 0x36, 0x76, 0x87, 0xd8, 0x9d, - 0x59, 0xcd, 0xcc, 0x02, 0x07, 0xf2, 0x28, 0x29, 0xb2, 0x9e, 0xb1, 0x62, 0x25, 0xb6, 0x24, 0x4b, - 0x4a, 0x52, 0xa5, 0x28, 0x52, 0xa2, 0x52, 0x5c, 0x49, 0xc9, 0x95, 0x3f, 0x52, 0xf6, 0x7f, 0xa9, - 0x94, 0x4b, 0x29, 0xa7, 0x2a, 0x72, 0x99, 0x89, 0xf4, 0x47, 0x0c, 0x46, 0x70, 0xa2, 0xaa, 0x24, - 0xa5, 0xaa, 0x44, 0x15, 0x3b, 0xf6, 0xe5, 0x51, 0xa9, 0x7e, 0x4e, 0xf7, 0xec, 0x2c, 0x0e, 0xb8, - 0x6b, 0xe0, 0x58, 0xf6, 0x5f, 0xc0, 0x7e, 0xfd, 0xf5, 0xf7, 0x75, 0xf7, 0xf4, 0x7c, 0xfd, 0xbd, - 0xfa, 0x1b, 0x58, 0xab, 0xf9, 0x49, 0xbd, 0xbd, 0x31, 0x59, 0x09, 0x9b, 0x53, 0x5e, 0x54, 0x0b, - 0x5b, 0x51, 0xf8, 0x2a, 0xfb, 0xe7, 0x9d, 0x3b, 0x61, 0xb4, 0xb5, 0xd9, 0x08, 0x77, 0xe2, 0xa9, - 0xed, 0xe7, 0xa6, 0x5a, 0x5b, 0xb5, 0x29, 0xaf, 0xe5, 0xc7, 0x53, 0x12, 0x3a, 0xb5, 0xfd, 0xac, - 0xd7, 0x68, 0xd5, 0xbd, 0x67, 0xa7, 0x6a, 0x24, 0x20, 0x91, 0x97, 0x90, 0xea, 0x64, 0x2b, 0x0a, - 0x93, 0x10, 0x7d, 0x20, 0xa5, 0x38, 0x29, 0x29, 0xb2, 0x7f, 0x3e, 0xac, 0x28, 0x4e, 0x6e, 0x3f, - 0x37, 0xd9, 0xda, 0xaa, 0x4d, 0x52, 0x8a, 0x93, 0x12, 0x3a, 0x29, 0x29, 0x8e, 0xbf, 0x53, 0x1b, - 0x53, 0x2d, 0xac, 0x85, 0x53, 0x8c, 0xf0, 0x46, 0x7b, 0x93, 0xfd, 0x62, 0x3f, 0xd8, 0x7f, 0x9c, - 0xe1, 0xb8, 0xbb, 0xf5, 0x7c, 0x3c, 0xe9, 0x87, 0x74, 0x7c, 0x53, 0x95, 0x30, 0x22, 0x53, 0xdb, - 0x1d, 0x83, 0x1a, 0xbf, 0xa4, 0xe1, 0xb4, 0xc2, 0x86, 0x5f, 0xd9, 0x9d, 0xda, 0x7e, 0x76, 0x83, - 0x24, 0x9d, 0xe3, 0x1f, 0x7f, 0x57, 0x8a, 0xda, 0xf4, 0x2a, 0x75, 0x3f, 0x20, 0xd1, 0x6e, 0x3a, - 0xff, 0x26, 0x49, 0xbc, 0x3c, 0x06, 0x53, 0xdd, 0x7a, 0x45, 0xed, 0x20, 0xf1, 0x9b, 0xa4, 0xa3, - 0xc3, 0xff, 0x7f, 0xb7, 0x0e, 0x71, 0xa5, 0x4e, 0x9a, 0x5e, 0x47, 0xbf, 0xe7, 0xba, 0xf5, 0x6b, - 0x27, 0x7e, 0x63, 0xca, 0x0f, 0x92, 0x38, 0x89, 0xb2, 0x9d, 0xdc, 0x2b, 0xd0, 0x37, 0xdd, 0x0c, - 0xdb, 0x41, 0x82, 0xde, 0x0b, 0xc5, 0x6d, 0xaf, 0xd1, 0x26, 0x25, 0xe7, 0x71, 0xe7, 0xe9, 0xc1, - 0x99, 0x27, 0xbf, 0xbf, 0x37, 0xf1, 0xd0, 0xfe, 0xde, 0x44, 0xf1, 0x06, 0x05, 0xde, 0xd9, 0x9b, - 0x38, 0x4b, 0x82, 0x4a, 0x58, 0xf5, 0x83, 0xda, 0xd4, 0xab, 0x71, 0x18, 0x4c, 0x5e, 0x6b, 0x37, - 0x37, 0x48, 0x84, 0x79, 0x1f, 0xf7, 0x0f, 0x0a, 0x70, 0x6a, 0x3a, 0xaa, 0xd4, 0xfd, 0x6d, 0x52, - 0x4e, 0x28, 0xfd, 0xda, 0x2e, 0xaa, 0x43, 0x4f, 0xe2, 0x45, 0x8c, 0xdc, 0xd0, 0xe5, 0x95, 0xc9, - 0xfb, 0x7d, 0xf8, 0x93, 0xeb, 0x5e, 0x24, 0x69, 0xcf, 0xf4, 0xef, 0xef, 0x4d, 0xf4, 0xac, 0x7b, - 0x11, 0xa6, 0x2c, 0x50, 0x03, 0x7a, 0x83, 0x30, 0x20, 0xa5, 0x02, 0x63, 0x75, 0xed, 0xfe, 0x59, - 0x5d, 0x0b, 0x03, 0x35, 0x8f, 0x99, 0x81, 0xfd, 0xbd, 0x89, 0x5e, 0x0a, 0xc1, 0x8c, 0x0b, 0x9d, - 0xd7, 0x6b, 0x7e, 0xab, 0xd4, 0x63, 0x6b, 0x5e, 0x2f, 0xfb, 0x2d, 0x73, 0x5e, 0x2f, 0xfb, 0x2d, - 0x4c, 0x59, 0xb8, 0x9f, 0x2f, 0xc0, 0xe0, 0x74, 0x54, 0x6b, 0x37, 0x49, 0x90, 0xc4, 0xe8, 0xe3, - 0x00, 0x2d, 0x2f, 0xf2, 0x9a, 0x24, 0x21, 0x51, 0x5c, 0x72, 0x1e, 0xef, 0x79, 0x7a, 0xe8, 0xf2, - 0xd2, 0xfd, 0xb3, 0x5f, 0x93, 0x34, 0x67, 0x90, 0x78, 0xe4, 0xa0, 0x40, 0x31, 0xd6, 0x58, 0xa2, - 0xd7, 0x61, 0xd0, 0x8b, 0x12, 0x7f, 0xd3, 0xab, 0x24, 0x71, 0xa9, 0xc0, 0xf8, 0xbf, 0x70, 0xff, - 0xfc, 0xa7, 0x05, 0xc9, 0x99, 0xd3, 0x82, 0xfd, 0xa0, 0x84, 0xc4, 0x38, 0xe5, 0xe7, 0xfe, 0x76, - 0x2f, 0x0c, 0x4d, 0x47, 0xc9, 0xc2, 0x6c, 0x39, 0xf1, 0x92, 0x76, 0x8c, 0x7e, 0xcf, 0x81, 0x33, - 0x31, 0x5f, 0x36, 0x9f, 0xc4, 0x6b, 0x51, 0x58, 0x21, 0x71, 0x4c, 0xaa, 0x62, 0x5d, 0x36, 0xad, - 0x8c, 0x4b, 0x32, 0x9b, 0x2c, 0x77, 0x32, 0xba, 0x12, 0x24, 0xd1, 0xee, 0xcc, 0xb3, 0x62, 0xcc, - 0x67, 0x72, 0x30, 0x3e, 0xf9, 0xd6, 0x04, 0x92, 0x53, 0xa1, 0x94, 0xf8, 0x23, 0xc6, 0x79, 0xa3, - 0x46, 0x5f, 0x73, 0x60, 0xb8, 0x15, 0x56, 0x63, 0x4c, 0x2a, 0x61, 0xbb, 0x45, 0xaa, 0x62, 0x79, - 0x3f, 0x6c, 0x77, 0x1a, 0x6b, 0x1a, 0x07, 0x3e, 0xfe, 0xb3, 0x62, 0xfc, 0xc3, 0x7a, 0x13, 0x36, - 0x86, 0x82, 0x9e, 0x87, 0xe1, 0x20, 0x4c, 0xca, 0x2d, 0x52, 0xf1, 0x37, 0x7d, 0x52, 0x65, 0x1b, - 0x7f, 0x20, 0xed, 0x79, 0x4d, 0x6b, 0xc3, 0x06, 0xe6, 0xf8, 0x3c, 0x94, 0xba, 0xad, 0x1c, 0x1a, - 0x83, 0x9e, 0x2d, 0xb2, 0xcb, 0x85, 0x0d, 0xa6, 0xff, 0xa2, 0xb3, 0x52, 0x00, 0xd1, 0xd7, 0x78, - 0x40, 0x48, 0x96, 0xf7, 0x14, 0x9e, 0x77, 0xc6, 0xdf, 0x0f, 0xa7, 0x3b, 0x86, 0x7e, 0x14, 0x02, - 0xee, 0x0f, 0xfa, 0x60, 0x40, 0x3e, 0x0a, 0xf4, 0x38, 0xf4, 0x06, 0x5e, 0x53, 0xca, 0xb9, 0x61, - 0x31, 0x8f, 0xde, 0x6b, 0x5e, 0x93, 0xbe, 0xe1, 0x5e, 0x93, 0x50, 0x8c, 0x96, 0x97, 0xd4, 0x19, - 0x1d, 0x0d, 0x63, 0xcd, 0x4b, 0xea, 0x98, 0xb5, 0xa0, 0x47, 0xa1, 0xb7, 0x19, 0x56, 0x09, 0x5b, - 0x8b, 0x22, 0x97, 0x10, 0x2b, 0x61, 0x95, 0x60, 0x06, 0xa5, 0xfd, 0x37, 0xa3, 0xb0, 0x59, 0xea, - 0x35, 0xfb, 0xcf, 0x47, 0x61, 0x13, 0xb3, 0x16, 0xf4, 0x55, 0x07, 0xc6, 0xe4, 0xde, 0x5e, 0x0e, - 0x2b, 0x5e, 0xe2, 0x87, 0x41, 0xa9, 0xc8, 0x24, 0x0a, 0xb6, 0xf7, 0x4a, 0x49, 0xca, 0x33, 0x25, - 0x31, 0x84, 0xb1, 0x6c, 0x0b, 0xee, 0x18, 0x05, 0xba, 0x0c, 0x50, 0x6b, 0x84, 0x1b, 0x5e, 0x83, - 0x2e, 0x48, 0xa9, 0x8f, 0x4d, 0x41, 0x49, 0x86, 0x05, 0xd5, 0x82, 0x35, 0x2c, 0x74, 0x0b, 0xfa, - 0x3d, 0x2e, 0xfd, 0x4b, 0xfd, 0x6c, 0x12, 0x2f, 0xda, 0x98, 0x84, 0x71, 0x9c, 0xcc, 0x0c, 0xed, - 0xef, 0x4d, 0xf4, 0x0b, 0x20, 0x96, 0xec, 0xd0, 0x33, 0x30, 0x10, 0xb6, 0xe8, 0xb8, 0xbd, 0x46, - 0x69, 0x80, 0x6d, 0xcc, 0x31, 0x31, 0xd6, 0x81, 0x55, 0x01, 0xc7, 0x0a, 0x03, 0x5d, 0x82, 0xfe, - 0xb8, 0xbd, 0x41, 0x9f, 0x63, 0x69, 0x90, 0x4d, 0xec, 0x94, 0x40, 0xee, 0x2f, 0x73, 0x30, 0x96, - 0xed, 0xe8, 0xdd, 0x30, 0x14, 0x91, 0x4a, 0x3b, 0x8a, 0x09, 0x7d, 0xb0, 0x25, 0x60, 0xb4, 0xcf, - 0x08, 0xf4, 0x21, 0x9c, 0x36, 0x61, 0x1d, 0x0f, 0xbd, 0x0f, 0x46, 0xe9, 0x03, 0xbe, 0x72, 0xab, - 0x15, 0x91, 0x38, 0xa6, 0x4f, 0x75, 0x88, 0x31, 0x3a, 0x2f, 0x7a, 0x8e, 0xce, 0x1b, 0xad, 0x38, - 0x83, 0x8d, 0x6e, 0x03, 0x78, 0x4a, 0x66, 0x94, 0x86, 0xd9, 0x62, 0x2e, 0xdb, 0xdb, 0x11, 0x0b, - 0xb3, 0x33, 0xa3, 0xf4, 0x39, 0xa6, 0xbf, 0xb1, 0xc6, 0x8f, 0xae, 0x4f, 0x95, 0x34, 0x48, 0x42, - 0xaa, 0xa5, 0x11, 0x36, 0x61, 0xb5, 0x3e, 0x73, 0x1c, 0x8c, 0x65, 0xbb, 0xfb, 0x77, 0x0a, 0xa0, - 0x51, 0x41, 0x33, 0x30, 0x20, 0xe4, 0x9a, 0x78, 0x25, 0x67, 0x9e, 0x92, 0xcf, 0x41, 0x3e, 0xc1, - 0x3b, 0x7b, 0xb9, 0xf2, 0x50, 0xf5, 0x43, 0x6f, 0xc0, 0x50, 0x2b, 0xac, 0xae, 0x90, 0xc4, 0xab, - 0x7a, 0x89, 0x27, 0x4e, 0x73, 0x0b, 0x27, 0x8c, 0xa4, 0x38, 0x73, 0x8a, 0x3e, 0xba, 0xb5, 0x94, - 0x05, 0xd6, 0xf9, 0xa1, 0x17, 0x00, 0xc5, 0x24, 0xda, 0xf6, 0x2b, 0x64, 0xba, 0x52, 0xa1, 0x2a, - 0x11, 0x7b, 0x01, 0x7a, 0xd8, 0x64, 0xc6, 0xc5, 0x64, 0x50, 0xb9, 0x03, 0x03, 0xe7, 0xf4, 0x72, - 0xdf, 0x2c, 0xc0, 0xa8, 0x36, 0xd7, 0x16, 0xa9, 0xa0, 0xef, 0x38, 0x70, 0x4a, 0x1d, 0x67, 0x33, - 0xbb, 0xd7, 0xe8, 0xae, 0xe2, 0x87, 0x15, 0xb1, 0xf9, 0x7c, 0x29, 0x2f, 0xf5, 0x53, 0xf0, 0xe1, - 0xb2, 0xfe, 0x82, 0x98, 0xc3, 0xa9, 0x4c, 0x2b, 0xce, 0x0e, 0x6b, 0xfc, 0x2b, 0x0e, 0x9c, 0xcd, - 0x23, 0x91, 0x23, 0x73, 0xeb, 0xba, 0xcc, 0xb5, 0x2a, 0xbc, 0x28, 0x57, 0x3a, 0x19, 0x5d, 0x8e, - 0xff, 0xdf, 0x02, 0x8c, 0xe9, 0x5b, 0x88, 0x69, 0x02, 0xff, 0xc2, 0x81, 0x73, 0x72, 0x06, 0x98, - 0xc4, 0xed, 0x46, 0x66, 0x79, 0x9b, 0x56, 0x97, 0x97, 0x9f, 0xa4, 0xd3, 0x79, 0xfc, 0xf8, 0x32, - 0x3f, 0x26, 0x96, 0xf9, 0x5c, 0x2e, 0x0e, 0xce, 0x1f, 0xea, 0xf8, 0xb7, 0x1c, 0x18, 0xef, 0x4e, - 0x34, 0x67, 0xe1, 0x5b, 0xe6, 0xc2, 0xbf, 0x6c, 0x6f, 0x92, 0x9c, 0x3d, 0x5b, 0x7e, 0x36, 0x59, - 0xfd, 0x01, 0xfc, 0xe6, 0x00, 0x74, 0x9c, 0x21, 0xe8, 0x59, 0x18, 0x12, 0xe2, 0x78, 0x39, 0xac, - 0xc5, 0x6c, 0x90, 0x03, 0xfc, 0x5d, 0x9b, 0x4e, 0xc1, 0x58, 0xc7, 0x41, 0x55, 0x28, 0xc4, 0xcf, - 0x89, 0xa1, 0x5b, 0x10, 0x6f, 0xe5, 0xe7, 0x94, 0x16, 0xd9, 0xb7, 0xbf, 0x37, 0x51, 0x28, 0x3f, - 0x87, 0x0b, 0xf1, 0x73, 0x54, 0x53, 0xaf, 0xf9, 0x89, 0x3d, 0x4d, 0x7d, 0xc1, 0x4f, 0x14, 0x1f, - 0xa6, 0xa9, 0x2f, 0xf8, 0x09, 0xa6, 0x2c, 0xa8, 0x05, 0x52, 0x4f, 0x92, 0x16, 0x3b, 0xf1, 0xad, - 0x58, 0x20, 0x57, 0xd7, 0xd7, 0xd7, 0x14, 0x2f, 0xa6, 0x5f, 0x50, 0x08, 0x66, 0x5c, 0xd0, 0xe7, - 0x1c, 0xba, 0xe2, 0xbc, 0x31, 0x8c, 0x76, 0x85, 0xe2, 0x70, 0xdd, 0xde, 0x16, 0x08, 0xa3, 0x5d, - 0xc5, 0x5c, 0x3c, 0x48, 0xd5, 0x80, 0x75, 0xd6, 0x6c, 0xe2, 0xd5, 0xcd, 0x98, 0xe9, 0x09, 0x76, - 0x26, 0x3e, 0x37, 0x5f, 0xce, 0x4c, 0x7c, 0x6e, 0xbe, 0x8c, 0x19, 0x17, 0xfa, 0x40, 0x23, 0x6f, - 0x47, 0xe8, 0x18, 0x16, 0x1e, 0x28, 0xf6, 0x76, 0xcc, 0x07, 0x8a, 0xbd, 0x1d, 0x4c, 0x59, 0x50, - 0x4e, 0x61, 0x1c, 0x33, 0x95, 0xc2, 0x0a, 0xa7, 0xd5, 0x72, 0xd9, 0xe4, 0xb4, 0x5a, 0x2e, 0x63, - 0xca, 0x82, 0x6d, 0xd2, 0x4a, 0xcc, 0xf4, 0x11, 0x3b, 0x9b, 0x74, 0x36, 0xc3, 0x69, 0x61, 0xb6, - 0x8c, 0x29, 0x0b, 0x2a, 0x32, 0xbc, 0xd7, 0xda, 0x11, 0x57, 0x66, 0x86, 0x2e, 0xaf, 0x5a, 0xd8, - 0x2f, 0x94, 0x9c, 0xe2, 0x36, 0xb8, 0xbf, 0x37, 0x51, 0x64, 0x20, 0xcc, 0x19, 0xb9, 0xbf, 0xdb, - 0x93, 0x8a, 0x0b, 0x29, 0xcf, 0xd1, 0xdf, 0x66, 0x07, 0xa1, 0x90, 0x05, 0x42, 0xf5, 0x75, 0x8e, - 0x4d, 0xf5, 0x3d, 0xc3, 0x4f, 0x3c, 0x83, 0x1d, 0xce, 0xf2, 0x47, 0xbf, 0xe6, 0x74, 0xda, 0xb6, - 0x9e, 0xfd, 0xb3, 0x2c, 0x3d, 0x98, 0xf9, 0x59, 0x71, 0xa0, 0xc9, 0x3b, 0xfe, 0x39, 0x27, 0x55, - 0x22, 0xe2, 0x6e, 0xe7, 0xc0, 0x47, 0xcc, 0x73, 0xc0, 0xa2, 0x41, 0xae, 0xcb, 0xfd, 0xcf, 0x3b, - 0x30, 0x22, 0xe1, 0x54, 0x3d, 0x8e, 0xd1, 0x2d, 0x18, 0x90, 0x23, 0x15, 0x4f, 0xcf, 0xa6, 0x2f, - 0x40, 0x29, 0xf1, 0x6a, 0x30, 0x8a, 0x9b, 0xfb, 0x9d, 0x3e, 0x40, 0xe9, 0x59, 0xd5, 0x0a, 0x63, - 0x9f, 0x49, 0xa2, 0x7b, 0x38, 0x85, 0x02, 0xed, 0x14, 0xba, 0x61, 0xf3, 0x14, 0x4a, 0x87, 0x65, - 0x9c, 0x47, 0xbf, 0x96, 0x91, 0xdb, 0xfc, 0x60, 0xfa, 0xf0, 0xb1, 0xc8, 0x6d, 0x6d, 0x08, 0x07, - 0x4b, 0xf0, 0x6d, 0x21, 0xc1, 0xf9, 0xd1, 0xf5, 0x4b, 0x76, 0x25, 0xb8, 0x36, 0x8a, 0xac, 0x2c, - 0x8f, 0xb8, 0x84, 0xe5, 0x67, 0xd7, 0x4d, 0xab, 0x12, 0x56, 0xe3, 0x6a, 0xca, 0xda, 0x88, 0xcb, - 0xda, 0x3e, 0x5b, 0x3c, 0x35, 0x59, 0x9b, 0xe5, 0xa9, 0xa4, 0xee, 0x6b, 0x52, 0xea, 0xf2, 0x53, - 0xeb, 0x25, 0xcb, 0x52, 0x57, 0xe3, 0xdb, 0x29, 0x7f, 0x3f, 0x0a, 0xe7, 0x3a, 0xf1, 0x30, 0xd9, - 0x44, 0x53, 0x30, 0x58, 0x09, 0x83, 0x4d, 0xbf, 0xb6, 0xe2, 0xb5, 0x84, 0xbd, 0xa6, 0x64, 0xd1, - 0xac, 0x6c, 0xc0, 0x29, 0x0e, 0x7a, 0x8c, 0x0b, 0x1e, 0xee, 0x11, 0x19, 0x12, 0xa8, 0x3d, 0x4b, - 0x64, 0x97, 0x49, 0xa1, 0xf7, 0x0c, 0x7c, 0xf5, 0x1b, 0x13, 0x0f, 0x7d, 0xe2, 0xdf, 0x3f, 0xfe, - 0x90, 0xfb, 0xfb, 0x3d, 0xf0, 0x48, 0x2e, 0x4f, 0xa1, 0xad, 0xff, 0xa6, 0xa1, 0xad, 0x6b, 0xed, - 0x42, 0x8a, 0xdc, 0xb4, 0xa9, 0xc8, 0x6a, 0xe4, 0xf3, 0xf4, 0x72, 0xad, 0x19, 0xe7, 0x0f, 0x8a, - 0x2e, 0x54, 0xe0, 0x35, 0x49, 0xdc, 0xf2, 0x2a, 0x44, 0xcc, 0x5e, 0x2d, 0xd4, 0x35, 0xd9, 0x80, - 0x53, 0x1c, 0x6e, 0x42, 0x6f, 0x7a, 0xed, 0x46, 0x22, 0x1c, 0x65, 0x9a, 0x09, 0xcd, 0xc0, 0x58, - 0xb6, 0xa3, 0xbf, 0xeb, 0x00, 0xea, 0xe4, 0x2a, 0x5e, 0xc4, 0xf5, 0xe3, 0x58, 0x87, 0x99, 0xf3, - 0xfb, 0x9a, 0x11, 0xae, 0xcd, 0x34, 0x67, 0x1c, 0xda, 0x33, 0xfd, 0x58, 0x7a, 0x0e, 0x71, 0xe3, - 0xe0, 0x10, 0x3e, 0x34, 0xe6, 0x6a, 0xa9, 0x54, 0x48, 0x1c, 0x73, 0x77, 0x9c, 0xee, 0x6a, 0x61, - 0x60, 0x2c, 0xdb, 0xd1, 0x04, 0x14, 0x49, 0x14, 0x85, 0x91, 0xb0, 0xb5, 0xd9, 0x36, 0xbe, 0x42, - 0x01, 0x98, 0xc3, 0xdd, 0x9f, 0x14, 0xa0, 0xd4, 0xcd, 0x3a, 0x41, 0xbf, 0xa5, 0xd9, 0xd5, 0xc2, - 0x72, 0x12, 0x86, 0x5f, 0x78, 0x7c, 0x36, 0x51, 0xd6, 0x00, 0xec, 0x62, 0x61, 0x8b, 0x56, 0x9c, - 0x1d, 0xe0, 0xf8, 0x97, 0x34, 0x0b, 0x5b, 0x27, 0x91, 0x73, 0xc0, 0x6f, 0x9a, 0x07, 0xfc, 0x9a, - 0xed, 0x49, 0xe9, 0xc7, 0xfc, 0x1f, 0x16, 0xe1, 0x8c, 0x6c, 0x2d, 0x13, 0x7a, 0x54, 0xbe, 0xd8, - 0x26, 0xd1, 0x2e, 0xfa, 0xa1, 0x03, 0x67, 0xbd, 0xac, 0xeb, 0xc6, 0x27, 0xc7, 0xb0, 0xd0, 0x1a, - 0xd7, 0xc9, 0xe9, 0x1c, 0x8e, 0x7c, 0xa1, 0x2f, 0x8b, 0x85, 0x3e, 0x9b, 0x87, 0xd2, 0xc5, 0xef, - 0x9e, 0x3b, 0x01, 0xf4, 0x3c, 0x0c, 0x4b, 0x38, 0x73, 0xf7, 0xf0, 0x57, 0x5c, 0x39, 0xb7, 0xa7, - 0xb5, 0x36, 0x6c, 0x60, 0xd2, 0x9e, 0x09, 0x69, 0xb6, 0x1a, 0x5e, 0x42, 0x34, 0x47, 0x91, 0xea, - 0xb9, 0xae, 0xb5, 0x61, 0x03, 0x13, 0x3d, 0x05, 0x7d, 0x41, 0x58, 0x25, 0x8b, 0x55, 0xe1, 0x20, - 0x1e, 0x15, 0x7d, 0xfa, 0xae, 0x31, 0x28, 0x16, 0xad, 0xe8, 0xc9, 0xd4, 0x1b, 0x57, 0x64, 0xaf, - 0xd0, 0x50, 0x9e, 0x27, 0x0e, 0xfd, 0x7d, 0x07, 0x06, 0x69, 0x8f, 0xf5, 0xdd, 0x16, 0xa1, 0x67, - 0x1b, 0x7d, 0x22, 0xd5, 0xe3, 0x79, 0x22, 0xd7, 0x24, 0x1b, 0xd3, 0xd5, 0x31, 0xa8, 0xe0, 0x9f, - 0x7c, 0x6b, 0x62, 0x40, 0xfe, 0xc0, 0xe9, 0xa8, 0xc6, 0x17, 0xe0, 0xe1, 0xae, 0x4f, 0xf3, 0x48, - 0xa1, 0x80, 0xbf, 0x02, 0xa3, 0xe6, 0x20, 0x8e, 0x14, 0x07, 0xf8, 0xe7, 0xda, 0x6b, 0xc7, 0xe7, - 0x25, 0xe4, 0xd9, 0x03, 0xd3, 0x66, 0xd5, 0x66, 0x98, 0x13, 0x5b, 0xcf, 0xdc, 0x0c, 0x73, 0x62, - 0x33, 0xcc, 0xb9, 0xbf, 0xe7, 0xa4, 0xaf, 0xa6, 0xa6, 0xe6, 0xd1, 0x83, 0xb9, 0x1d, 0x35, 0x84, - 0x20, 0x56, 0x07, 0xf3, 0x75, 0xbc, 0x8c, 0x29, 0x1c, 0x7d, 0x49, 0x93, 0x8e, 0xb4, 0x5b, 0x5b, - 0x84, 0x35, 0x2c, 0xb9, 0xe8, 0x0d, 0xc2, 0x9d, 0xf2, 0x4f, 0x34, 0xe0, 0xec, 0x10, 0xdc, 0x1f, - 0x3b, 0xf0, 0xd8, 0x81, 0x4a, 0x6b, 0xee, 0xc0, 0x9d, 0x07, 0x3e, 0x70, 0x7a, 0xac, 0x45, 0xa4, - 0x15, 0x5e, 0xc7, 0xcb, 0xe2, 0x79, 0xa9, 0x63, 0x0d, 0x73, 0x30, 0x96, 0xed, 0xee, 0x0f, 0x1d, - 0xc8, 0xd2, 0x43, 0x1e, 0x8c, 0xb6, 0x63, 0x12, 0xd1, 0x13, 0xb2, 0x4c, 0x2a, 0x11, 0x91, 0xbb, - 0xed, 0xc9, 0x49, 0x1e, 0xbc, 0xa7, 0x03, 0x9e, 0xac, 0x84, 0x11, 0x99, 0xdc, 0x7e, 0x76, 0x92, - 0x63, 0x2c, 0x91, 0xdd, 0x32, 0x69, 0x10, 0x4a, 0x63, 0x06, 0xed, 0xef, 0x4d, 0x8c, 0x5e, 0x37, - 0x08, 0xe0, 0x0c, 0x41, 0xca, 0xa2, 0xe5, 0xc5, 0xf1, 0x4e, 0x18, 0x55, 0x05, 0x8b, 0xc2, 0x91, - 0x59, 0xac, 0x19, 0x04, 0x70, 0x86, 0xa0, 0xfb, 0x26, 0xb5, 0x06, 0x75, 0x25, 0x14, 0x7d, 0x83, - 0xaa, 0x32, 0x14, 0x32, 0xd3, 0x08, 0x37, 0x66, 0xc3, 0x20, 0xf1, 0xfc, 0x80, 0xc8, 0xd8, 0xff, - 0xba, 0x25, 0x95, 0xd7, 0xa0, 0x9d, 0xba, 0xe4, 0x3b, 0xdb, 0x70, 0xce, 0x58, 0xa8, 0xca, 0xb2, - 0xd1, 0x08, 0x37, 0xb2, 0x41, 0x3d, 0x8a, 0x84, 0x59, 0x8b, 0xfb, 0x33, 0x07, 0x2e, 0x74, 0xd1, - 0xad, 0xd1, 0x57, 0x1c, 0x18, 0xd9, 0x78, 0x5b, 0xcc, 0xcd, 0x1c, 0x06, 0x7a, 0x1f, 0x8c, 0x52, - 0x00, 0x3d, 0x58, 0xe6, 0xc3, 0xa8, 0xe9, 0x25, 0x62, 0x82, 0x2a, 0xe0, 0x34, 0x63, 0xb4, 0xe2, - 0x0c, 0xb6, 0xfb, 0xeb, 0x05, 0xc8, 0xe1, 0x82, 0x9e, 0x81, 0x01, 0x12, 0x54, 0x5b, 0xa1, 0x1f, - 0x24, 0x42, 0xb6, 0x28, 0x21, 0x76, 0x45, 0xc0, 0xb1, 0xc2, 0x10, 0xe6, 0x84, 0x58, 0x98, 0x42, - 0x87, 0x39, 0x21, 0x46, 0x9e, 0xe2, 0xa0, 0x1a, 0x8c, 0x79, 0x3c, 0x5c, 0xc2, 0xf6, 0x1e, 0xdb, - 0xa6, 0x3d, 0x47, 0xd9, 0xa6, 0x67, 0x59, 0x34, 0x33, 0x43, 0x02, 0x77, 0x10, 0x45, 0xef, 0x86, - 0xa1, 0x76, 0x4c, 0xca, 0x73, 0x4b, 0xb3, 0x11, 0xa9, 0x72, 0x23, 0x57, 0x0b, 0xe3, 0x5d, 0x4f, - 0x9b, 0xb0, 0x8e, 0xe7, 0xfe, 0x4b, 0x07, 0xfa, 0x67, 0xbc, 0xca, 0x56, 0xb8, 0xb9, 0x49, 0x97, - 0xa2, 0xda, 0x8e, 0x52, 0x3f, 0x95, 0xb6, 0x14, 0x73, 0x02, 0x8e, 0x15, 0x06, 0x5a, 0x87, 0x3e, - 0xfe, 0xc2, 0x8b, 0xd7, 0xee, 0x17, 0xb5, 0xf9, 0xa8, 0xb4, 0x1c, 0xb6, 0x1d, 0xda, 0x89, 0xdf, - 0x98, 0xe4, 0x69, 0x39, 0x93, 0x8b, 0x41, 0xb2, 0x1a, 0x95, 0x93, 0xc8, 0x0f, 0x6a, 0x33, 0x40, - 0xa5, 0xff, 0x3c, 0xa3, 0x81, 0x05, 0x2d, 0x3a, 0x8d, 0xa6, 0x77, 0x4b, 0xb2, 0x13, 0xba, 0x86, - 0x9a, 0xc6, 0x4a, 0xda, 0x84, 0x75, 0x3c, 0xf7, 0xf7, 0x1d, 0x18, 0x9c, 0xf1, 0x62, 0xbf, 0xf2, - 0xe7, 0x48, 0xf8, 0x7c, 0x08, 0x8a, 0xb3, 0x5e, 0xa5, 0x4e, 0xd0, 0xf5, 0xac, 0x0d, 0x3b, 0x74, - 0xf9, 0xe9, 0x3c, 0x36, 0xca, 0x9e, 0xd5, 0x39, 0x8d, 0x74, 0xb3, 0x74, 0xdd, 0xb7, 0x1c, 0x18, - 0x9d, 0x6d, 0xf8, 0x24, 0x48, 0x66, 0x49, 0x94, 0xb0, 0x85, 0xab, 0xc1, 0x58, 0x45, 0x41, 0xee, - 0x65, 0xe9, 0xd8, 0x6e, 0x9d, 0xcd, 0x90, 0xc0, 0x1d, 0x44, 0x51, 0x15, 0x4e, 0x71, 0x58, 0xfa, - 0x56, 0x1c, 0x69, 0xfd, 0x98, 0xb3, 0x73, 0xd6, 0xa4, 0x80, 0xb3, 0x24, 0xdd, 0x9f, 0x3a, 0x70, - 0x61, 0xb6, 0xd1, 0x8e, 0x13, 0x12, 0xdd, 0x14, 0xd2, 0x48, 0x6a, 0xab, 0xe8, 0x23, 0x30, 0xd0, - 0x94, 0x01, 0x58, 0xe7, 0x2e, 0x1b, 0x98, 0xc9, 0x33, 0x8a, 0x4d, 0x07, 0xb3, 0xba, 0xf1, 0x2a, - 0xa9, 0x24, 0x2b, 0x24, 0xf1, 0xd2, 0x6c, 0x81, 0x14, 0x86, 0x15, 0x55, 0xd4, 0x82, 0xde, 0xb8, - 0x45, 0x2a, 0xf6, 0x92, 0xb5, 0xe4, 0x1c, 0xca, 0x2d, 0x52, 0x49, 0xe5, 0x3a, 0x0b, 0x1d, 0x32, - 0x4e, 0xee, 0xff, 0x72, 0xe0, 0x91, 0x2e, 0xf3, 0x5d, 0xf6, 0xe3, 0x04, 0xbd, 0xd2, 0x31, 0xe7, - 0xc9, 0xc3, 0xcd, 0x99, 0xf6, 0x66, 0x33, 0x56, 0x02, 0x41, 0x42, 0xb4, 0xf9, 0x7e, 0x0c, 0x8a, - 0x7e, 0x42, 0x9a, 0xd2, 0xab, 0x6c, 0xc1, 0xff, 0xd3, 0x65, 0x2e, 0x33, 0x23, 0x32, 0x65, 0x6f, - 0x91, 0xf2, 0xc3, 0x9c, 0xad, 0xfb, 0xaf, 0x1c, 0xa0, 0x1b, 0xbd, 0xea, 0x8b, 0x58, 0x5d, 0x6f, - 0xb2, 0xdb, 0x92, 0x86, 0xbb, 0x54, 0xe0, 0x7b, 0xa9, 0x3e, 0x7d, 0x67, 0x6f, 0x62, 0x44, 0x21, - 0x32, 0x05, 0x9e, 0xa1, 0xa2, 0x0f, 0x41, 0x5f, 0xcc, 0x8c, 0x5e, 0x21, 0xd9, 0xe7, 0xa5, 0x86, - 0xca, 0x4d, 0xe1, 0x3b, 0x7b, 0x13, 0x87, 0x4a, 0x8c, 0x9c, 0x54, 0xb4, 0x45, 0x58, 0x51, 0x50, - 0xa5, 0x2a, 0x55, 0x93, 0xc4, 0xb1, 0x57, 0x93, 0x36, 0x94, 0x52, 0xa9, 0x56, 0x38, 0x18, 0xcb, - 0x76, 0xf7, 0xcb, 0x0e, 0x8c, 0xa8, 0xf3, 0x84, 0x2a, 0xc8, 0xe8, 0x9a, 0x7e, 0xf2, 0xf0, 0x87, - 0xf7, 0x58, 0x17, 0x21, 0x20, 0xce, 0xd6, 0x83, 0x0f, 0xa6, 0x77, 0xc1, 0x70, 0x95, 0xb4, 0x48, - 0x50, 0x25, 0x41, 0x85, 0x1a, 0xb8, 0xf4, 0xa1, 0x0d, 0xce, 0x8c, 0x51, 0x8b, 0x6e, 0x4e, 0x83, - 0x63, 0x03, 0xcb, 0xfd, 0xa6, 0x03, 0x0f, 0x2b, 0x72, 0x65, 0x92, 0x60, 0x92, 0x44, 0xbb, 0x2a, - 0x11, 0xf2, 0x68, 0x07, 0xc8, 0x4d, 0xaa, 0x61, 0x26, 0x11, 0x67, 0x7e, 0x6f, 0x27, 0xc8, 0x10, - 0xd7, 0x47, 0x19, 0x11, 0x2c, 0xa9, 0xb9, 0xbf, 0xda, 0x03, 0x67, 0xf5, 0x41, 0xaa, 0x77, 0xfe, - 0x97, 0x1d, 0x00, 0xb5, 0x02, 0xf4, 0x8c, 0xec, 0xb1, 0x13, 0x1d, 0x32, 0x9e, 0x54, 0x2a, 0x15, - 0x14, 0x38, 0xc6, 0x1a, 0x5b, 0xf4, 0x12, 0x0c, 0x6f, 0x87, 0x8d, 0x76, 0x93, 0xac, 0xd0, 0x13, - 0x3c, 0x2e, 0xf5, 0xb0, 0x61, 0x4c, 0xe4, 0x3d, 0xcc, 0x1b, 0x29, 0x5e, 0x6a, 0x70, 0x6b, 0xc0, - 0x18, 0x1b, 0xa4, 0xa8, 0x2d, 0x31, 0x12, 0xe9, 0x8f, 0x44, 0x78, 0x9d, 0x3f, 0x68, 0x71, 0x8e, - 0xd9, 0xa7, 0x3e, 0x73, 0x7a, 0x7f, 0x6f, 0x62, 0xc4, 0x00, 0x61, 0x73, 0x10, 0xee, 0x4b, 0xc0, - 0xd6, 0xc2, 0x0f, 0xda, 0x64, 0x35, 0x40, 0x4f, 0x48, 0x2f, 0x18, 0x8f, 0x5c, 0xa8, 0x97, 0x59, - 0xf7, 0x84, 0x51, 0x6b, 0x71, 0xd3, 0xf3, 0x1b, 0x2c, 0x41, 0x90, 0x62, 0x29, 0x6b, 0x71, 0x9e, - 0x41, 0xb1, 0x68, 0x75, 0x27, 0xa1, 0x7f, 0x96, 0xce, 0x9d, 0x44, 0x94, 0xae, 0x9e, 0xd7, 0x3b, - 0x62, 0xe4, 0xf5, 0xca, 0xfc, 0xdd, 0x75, 0x38, 0x37, 0x1b, 0x11, 0x2f, 0x21, 0xe5, 0xe7, 0x66, - 0xda, 0x95, 0x2d, 0x92, 0xf0, 0xe4, 0xa9, 0x18, 0xbd, 0x17, 0x46, 0x42, 0x26, 0xc5, 0x97, 0xc3, - 0xca, 0x96, 0x1f, 0xd4, 0x84, 0x53, 0xf3, 0x9c, 0xa0, 0x32, 0xb2, 0xaa, 0x37, 0x62, 0x13, 0xd7, - 0xfd, 0x8f, 0x05, 0x18, 0x9e, 0x8d, 0xc2, 0x40, 0x4a, 0xaa, 0x13, 0x38, 0x5d, 0x12, 0xe3, 0x74, - 0xb1, 0x10, 0x50, 0xd4, 0xc7, 0xdf, 0xed, 0x84, 0x41, 0xb7, 0x95, 0x88, 0xec, 0xb1, 0x65, 0x15, - 0x18, 0x7c, 0x19, 0xed, 0xf4, 0x61, 0x9b, 0x02, 0xd4, 0xfd, 0x4f, 0x0e, 0x8c, 0xe9, 0xe8, 0x27, - 0x70, 0xa8, 0xc5, 0xe6, 0xa1, 0x76, 0xcd, 0xee, 0x7c, 0xbb, 0x9c, 0x64, 0x9f, 0xef, 0x33, 0xe7, - 0xc9, 0xa2, 0xc9, 0x5f, 0x75, 0x60, 0x78, 0x47, 0x03, 0x88, 0xc9, 0xda, 0xd6, 0x2b, 0xde, 0x21, - 0xc5, 0x8c, 0x0e, 0xbd, 0x93, 0xf9, 0x8d, 0x8d, 0x91, 0x50, 0xb9, 0x1f, 0x57, 0xea, 0xa4, 0xda, - 0x6e, 0x48, 0xbf, 0xa2, 0x5a, 0xd2, 0xb2, 0x80, 0x63, 0x85, 0x81, 0x5e, 0x81, 0xd3, 0x95, 0x30, - 0xa8, 0xb4, 0xa3, 0x88, 0x04, 0x95, 0xdd, 0x35, 0x76, 0x15, 0x41, 0x1c, 0x88, 0x93, 0xa2, 0xdb, - 0xe9, 0xd9, 0x2c, 0xc2, 0x9d, 0x3c, 0x20, 0xee, 0x24, 0xc4, 0xdd, 0xf1, 0x31, 0x3d, 0xb2, 0x84, - 0x0d, 0xa4, 0xb9, 0xe3, 0x19, 0x18, 0xcb, 0x76, 0x74, 0x1d, 0x2e, 0xc4, 0x89, 0x17, 0x25, 0x7e, - 0x50, 0x9b, 0x23, 0x5e, 0xb5, 0xe1, 0x07, 0x54, 0xbb, 0x0f, 0x83, 0x2a, 0x0f, 0xd6, 0xf5, 0xcc, - 0x3c, 0xb2, 0xbf, 0x37, 0x71, 0xa1, 0x9c, 0x8f, 0x82, 0xbb, 0xf5, 0x45, 0x1f, 0x82, 0x71, 0xe1, - 0xf0, 0xdf, 0x6c, 0x37, 0x5e, 0x08, 0x37, 0xe2, 0xab, 0x7e, 0x4c, 0x4d, 0xeb, 0x65, 0xbf, 0xe9, - 0x27, 0x2c, 0x24, 0x57, 0x9c, 0xb9, 0xb8, 0xbf, 0x37, 0x31, 0x5e, 0xee, 0x8a, 0x85, 0x0f, 0xa0, - 0x80, 0x30, 0x9c, 0xe7, 0xc2, 0xaf, 0x83, 0x76, 0x3f, 0xa3, 0x3d, 0xbe, 0xbf, 0x37, 0x71, 0x7e, - 0x3e, 0x17, 0x03, 0x77, 0xe9, 0x49, 0x9f, 0x60, 0xe2, 0x37, 0xc9, 0x6b, 0x61, 0x40, 0x58, 0x2a, - 0x88, 0xf6, 0x04, 0xd7, 0x05, 0x1c, 0x2b, 0x0c, 0xf4, 0x6a, 0xba, 0x13, 0xe9, 0xeb, 0x22, 0x52, - 0x3a, 0x8e, 0x2e, 0xe1, 0x98, 0xb5, 0x70, 0x53, 0xa3, 0xc4, 0x72, 0x15, 0x0d, 0xda, 0xee, 0x1f, - 0x14, 0x00, 0x75, 0x8a, 0x08, 0xb4, 0x04, 0x7d, 0x5e, 0x25, 0xf1, 0xb7, 0x65, 0xee, 0xdb, 0x13, - 0x79, 0xc7, 0x27, 0x67, 0x85, 0xc9, 0x26, 0xa1, 0x3b, 0x84, 0xa4, 0x72, 0x65, 0x9a, 0x75, 0xc5, - 0x82, 0x04, 0x0a, 0xe1, 0x74, 0xc3, 0x8b, 0x13, 0xb9, 0x57, 0xab, 0x74, 0xca, 0x42, 0xb0, 0xfe, - 0xfc, 0xe1, 0x26, 0x45, 0x7b, 0xcc, 0x9c, 0xa3, 0x3b, 0x77, 0x39, 0x4b, 0x08, 0x77, 0xd2, 0x46, - 0x1f, 0x67, 0x7a, 0x08, 0x57, 0x12, 0xa5, 0x02, 0xb0, 0x64, 0xe5, 0x8c, 0xe6, 0x34, 0x0d, 0x1d, - 0x44, 0xb0, 0xc1, 0x1a, 0x4b, 0xf7, 0x5f, 0x03, 0xf4, 0xcf, 0x4d, 0x2f, 0xac, 0x7b, 0xf1, 0xd6, - 0x21, 0x42, 0x5c, 0x74, 0x77, 0x08, 0x1d, 0x2a, 0xfb, 0x7e, 0x4b, 0xdd, 0x0a, 0x2b, 0x0c, 0x14, - 0x40, 0x9f, 0x1f, 0xd0, 0x17, 0xa2, 0x34, 0x6a, 0xcb, 0xc1, 0xac, 0x34, 0x7f, 0xe6, 0x32, 0x58, - 0x64, 0xd4, 0xb1, 0xe0, 0x82, 0x6e, 0xc3, 0xa0, 0x27, 0xef, 0x8e, 0x88, 0x63, 0x69, 0xc9, 0x86, - 0xe7, 0x54, 0x90, 0xd4, 0x73, 0x57, 0x04, 0x08, 0xa7, 0x0c, 0xd1, 0x27, 0x1c, 0x18, 0x92, 0x53, - 0xc7, 0x64, 0x53, 0x04, 0x35, 0x57, 0xec, 0xcd, 0x19, 0x93, 0x4d, 0x9e, 0xd8, 0xa0, 0x01, 0xb0, - 0xce, 0xb2, 0x43, 0x95, 0x2f, 0x1e, 0x46, 0x95, 0x47, 0x3b, 0x30, 0xb8, 0xe3, 0x27, 0x75, 0x76, - 0xf0, 0x88, 0x60, 0xca, 0xfc, 0xfd, 0x8f, 0x9a, 0x92, 0x4b, 0x57, 0xec, 0xa6, 0x64, 0x80, 0x53, - 0x5e, 0x68, 0x8a, 0x33, 0x66, 0x77, 0x6f, 0x98, 0xc8, 0x1a, 0x34, 0x3b, 0xb0, 0x06, 0x9c, 0xe2, - 0xd0, 0x25, 0x1e, 0xa6, 0xbf, 0xca, 0xe4, 0xa3, 0x6d, 0xfa, 0x1e, 0x8b, 0x64, 0x35, 0x0b, 0xfb, - 0x4a, 0x52, 0xe4, 0x8b, 0x75, 0x53, 0xe3, 0x81, 0x0d, 0x8e, 0xf4, 0x1d, 0xd9, 0xa9, 0x93, 0x40, - 0x24, 0xd3, 0xab, 0x77, 0xe4, 0x66, 0x9d, 0x04, 0x98, 0xb5, 0xa0, 0xdb, 0xdc, 0xb4, 0xe0, 0x3a, - 0xae, 0x48, 0x3c, 0x5b, 0xb6, 0xa3, 0x76, 0x73, 0x9a, 0x3c, 0x9f, 0x3d, 0xfd, 0x8d, 0x35, 0x7e, - 0x54, 0x5d, 0x0e, 0x83, 0x2b, 0xb7, 0xfc, 0x44, 0x64, 0xe1, 0x2b, 0x49, 0xb7, 0xca, 0xa0, 0x58, - 0xb4, 0xf2, 0xa0, 0x3d, 0xdd, 0x04, 0x31, 0x4b, 0xb9, 0x1f, 0xd4, 0x83, 0xf6, 0x0c, 0x8c, 0x65, - 0x3b, 0xfa, 0x7b, 0x0e, 0x14, 0xeb, 0x61, 0xb8, 0x15, 0x97, 0x46, 0xd8, 0xe6, 0xb0, 0xa0, 0xea, - 0x09, 0x89, 0x33, 0x79, 0x95, 0x92, 0x35, 0xef, 0x15, 0x15, 0x19, 0xec, 0xce, 0xde, 0xc4, 0xe8, - 0xb2, 0xbf, 0x49, 0x2a, 0xbb, 0x95, 0x06, 0x61, 0x90, 0x4f, 0xbe, 0xa5, 0x41, 0xae, 0x6c, 0x93, - 0x20, 0xc1, 0x7c, 0x54, 0xe3, 0x9f, 0x77, 0x00, 0x52, 0x42, 0x39, 0xd1, 0x31, 0x62, 0xc6, 0x93, - 0x2d, 0xd8, 0x79, 0xc6, 0xd0, 0xf4, 0x70, 0xdb, 0xbf, 0x71, 0x60, 0x88, 0x4e, 0x4e, 0x8a, 0xc0, - 0xa7, 0xa0, 0x2f, 0xf1, 0xa2, 0x1a, 0x91, 0x2e, 0x65, 0xf5, 0x38, 0xd6, 0x19, 0x14, 0x8b, 0x56, - 0x14, 0x40, 0x31, 0xf1, 0xe2, 0x2d, 0xa9, 0x5d, 0x2e, 0x5a, 0x5b, 0xe2, 0x54, 0xb1, 0xa4, 0xbf, - 0x62, 0xcc, 0xd9, 0xa0, 0xa7, 0x61, 0x80, 0x2a, 0x00, 0xf3, 0x5e, 0x2c, 0x93, 0x36, 0x86, 0xa9, - 0x10, 0x9f, 0x17, 0x30, 0xac, 0x5a, 0xdd, 0x5f, 0x2f, 0x40, 0xef, 0x1c, 0xb7, 0x33, 0xfa, 0xe2, - 0xb0, 0x1d, 0x55, 0x88, 0xd0, 0x37, 0x2d, 0xec, 0x69, 0x4a, 0xb7, 0xcc, 0x68, 0x6a, 0x9a, 0x3e, - 0xfb, 0x8d, 0x05, 0x2f, 0x6a, 0xc8, 0x8e, 0x26, 0x91, 0x17, 0xc4, 0x9b, 0xcc, 0x79, 0xef, 0x87, - 0x81, 0x58, 0x22, 0x0b, 0xbb, 0x70, 0xdd, 0xa0, 0x5b, 0x4e, 0x48, 0x2b, 0x8d, 0x21, 0x98, 0x6d, - 0x38, 0x33, 0x06, 0xf7, 0x37, 0x1c, 0x80, 0x74, 0xf4, 0xe8, 0x73, 0x0e, 0x8c, 0x78, 0x7a, 0xb2, - 0xa0, 0x58, 0xa3, 0x55, 0x7b, 0x81, 0x3b, 0x46, 0x96, 0x9b, 0xd8, 0x06, 0x08, 0x9b, 0x8c, 0xdd, - 0x77, 0x43, 0x91, 0xbd, 0x1d, 0x4c, 0x17, 0x17, 0x5e, 0xd2, 0xac, 0x0f, 0x46, 0x7a, 0x4f, 0xb1, - 0xc2, 0x70, 0x5f, 0x81, 0xd1, 0x2b, 0xb7, 0x48, 0xa5, 0x9d, 0x84, 0x11, 0xf7, 0x11, 0x77, 0xb9, - 0x1c, 0xe2, 0xdc, 0xd3, 0xe5, 0x90, 0xef, 0x3a, 0x30, 0xa4, 0x65, 0x8e, 0xd1, 0x93, 0xba, 0x36, - 0x5b, 0xe6, 0x76, 0xb7, 0x58, 0xaa, 0x25, 0x2b, 0xb9, 0x69, 0x9c, 0x64, 0x7a, 0x8c, 0x28, 0x10, - 0x4e, 0x19, 0xde, 0x25, 0xb3, 0xcb, 0xfd, 0x5d, 0x07, 0xce, 0xe5, 0xa6, 0xb9, 0x3d, 0xe0, 0x61, - 0x4f, 0xc1, 0xe0, 0x16, 0xd9, 0x35, 0x42, 0x5e, 0xaa, 0xc3, 0x92, 0x6c, 0xc0, 0x29, 0x8e, 0xfb, - 0x3d, 0x07, 0x52, 0x4a, 0x54, 0x14, 0x6d, 0xa4, 0x23, 0xd7, 0x44, 0x91, 0xe0, 0x24, 0x5a, 0xd1, - 0x6d, 0xb8, 0x60, 0x3e, 0xc1, 0x7b, 0xf4, 0xcc, 0x73, 0x9b, 0x29, 0x9f, 0x12, 0xee, 0xc6, 0xc2, - 0xbd, 0x01, 0xc5, 0x05, 0xaf, 0x5d, 0x23, 0x87, 0x72, 0xe2, 0x50, 0x31, 0x16, 0x11, 0xaf, 0x91, - 0x48, 0x35, 0x5d, 0x88, 0x31, 0x2c, 0x60, 0x58, 0xb5, 0xba, 0x3f, 0x2c, 0xc2, 0x90, 0x76, 0x99, - 0x81, 0x9e, 0xe3, 0x11, 0x69, 0x85, 0x59, 0x5d, 0x97, 0x3e, 0x6c, 0xcc, 0x5a, 0xe8, 0xfb, 0x13, - 0x91, 0x6d, 0x3f, 0xe6, 0x22, 0xc7, 0x78, 0x7f, 0xb0, 0x80, 0x63, 0x85, 0x81, 0x26, 0xa0, 0x58, - 0x25, 0xad, 0xa4, 0xce, 0xa4, 0x69, 0x2f, 0xcf, 0xe8, 0x9a, 0xa3, 0x00, 0xcc, 0xe1, 0x14, 0x61, - 0x93, 0x24, 0x95, 0x3a, 0x73, 0x36, 0x8a, 0x94, 0xaf, 0x79, 0x0a, 0xc0, 0x1c, 0x9e, 0x13, 0xab, - 0x2a, 0x1e, 0x7f, 0xac, 0xaa, 0xcf, 0x72, 0xac, 0x0a, 0xb5, 0xe0, 0x4c, 0x1c, 0xd7, 0xd7, 0x22, - 0x7f, 0xdb, 0x4b, 0x48, 0xba, 0x73, 0xfa, 0x8f, 0xc2, 0xe7, 0x02, 0xbb, 0x5e, 0x5c, 0xbe, 0x9a, - 0xa5, 0x82, 0xf3, 0x48, 0xa3, 0x32, 0x9c, 0xf3, 0x83, 0x98, 0x54, 0xda, 0x11, 0x59, 0xac, 0x05, - 0x61, 0x44, 0xae, 0x86, 0x31, 0x25, 0x27, 0x2e, 0x47, 0xaa, 0x24, 0xc8, 0xc5, 0x3c, 0x24, 0x9c, - 0xdf, 0x17, 0x2d, 0xc0, 0xe9, 0xaa, 0x1f, 0x7b, 0x1b, 0x0d, 0x52, 0x6e, 0x6f, 0x34, 0x43, 0x6a, - 0xb0, 0xf1, 0x0b, 0x0b, 0x03, 0x33, 0x0f, 0x4b, 0xd7, 0xc4, 0x5c, 0x16, 0x01, 0x77, 0xf6, 0x41, - 0xcf, 0xc3, 0x70, 0xec, 0x07, 0xb5, 0x06, 0x99, 0x89, 0xbc, 0xa0, 0x52, 0x17, 0xb7, 0x2a, 0x95, - 0x0b, 0xb7, 0xac, 0xb5, 0x61, 0x03, 0x93, 0xbd, 0xaf, 0xbc, 0x4f, 0x46, 0x93, 0x13, 0xd8, 0xa2, - 0xd5, 0xfd, 0x91, 0x03, 0xc3, 0x7a, 0x02, 0x32, 0xd5, 0x92, 0xa1, 0x3e, 0x37, 0x5f, 0xe6, 0x72, - 0xdc, 0xde, 0x69, 0x7d, 0x55, 0xd1, 0x4c, 0xad, 0xca, 0x14, 0x86, 0x35, 0x9e, 0x87, 0xb8, 0x4e, - 0xfc, 0x04, 0x14, 0x37, 0x43, 0xaa, 0x4c, 0xf4, 0x98, 0xbe, 0xdf, 0x79, 0x0a, 0xc4, 0xbc, 0xcd, - 0xfd, 0x1f, 0x0e, 0x9c, 0xcf, 0xcf, 0xad, 0x7e, 0x3b, 0x4c, 0xf2, 0x32, 0x00, 0x9d, 0x8a, 0x21, - 0x90, 0xb5, 0x82, 0x02, 0xb2, 0x05, 0x6b, 0x58, 0x87, 0x9b, 0xf6, 0x9f, 0x50, 0x85, 0x36, 0xe5, - 0xf3, 0x05, 0x07, 0x46, 0x28, 0xdb, 0xa5, 0x68, 0xc3, 0x98, 0xed, 0xaa, 0x9d, 0xd9, 0x2a, 0xb2, - 0xa9, 0x8b, 0xdb, 0x00, 0x63, 0x93, 0x39, 0xfa, 0x05, 0x18, 0xf4, 0xaa, 0xd5, 0x88, 0xc4, 0xb1, - 0x0a, 0x16, 0xb1, 0xd0, 0xf2, 0xb4, 0x04, 0xe2, 0xb4, 0x9d, 0x0a, 0xd1, 0x7a, 0x75, 0x33, 0xa6, - 0x72, 0x49, 0x78, 0xf6, 0x94, 0x10, 0xa5, 0x4c, 0x28, 0x1c, 0x2b, 0x0c, 0xf7, 0x6f, 0xf6, 0x82, - 0xc9, 0x1b, 0x55, 0xe1, 0xd4, 0x56, 0xb4, 0x31, 0xcb, 0xc2, 0xdf, 0xf7, 0x12, 0x86, 0x66, 0xe1, - 0xe1, 0x25, 0x93, 0x02, 0xce, 0x92, 0x14, 0x5c, 0x96, 0xc8, 0x6e, 0xe2, 0x6d, 0xdc, 0x73, 0x10, - 0x7a, 0xc9, 0xa4, 0x80, 0xb3, 0x24, 0xd1, 0xbb, 0x61, 0x68, 0x2b, 0xda, 0x90, 0x22, 0x3a, 0x9b, - 0xd1, 0xb0, 0x94, 0x36, 0x61, 0x1d, 0x8f, 0x2e, 0xe1, 0x56, 0xb4, 0x41, 0x8f, 0x34, 0x79, 0xbd, - 0x5e, 0x2d, 0xe1, 0x92, 0x80, 0x63, 0x85, 0x81, 0x5a, 0x80, 0xb6, 0xe4, 0xea, 0xa9, 0x60, 0xbf, - 0x38, 0x49, 0x0e, 0x9f, 0x2b, 0xc0, 0x92, 0xa6, 0x97, 0x3a, 0xe8, 0xe0, 0x1c, 0xda, 0xe8, 0x25, - 0xb8, 0xb0, 0x15, 0x6d, 0x88, 0x83, 0x7e, 0x2d, 0xf2, 0x83, 0x8a, 0xdf, 0x32, 0xae, 0xd2, 0x4f, - 0x88, 0xe1, 0x5e, 0x58, 0xca, 0x47, 0xc3, 0xdd, 0xfa, 0xbb, 0xbf, 0xd5, 0x0b, 0xec, 0x12, 0x20, - 0x95, 0x85, 0x4d, 0x92, 0xd4, 0xc3, 0x6a, 0x56, 0x77, 0x59, 0x61, 0x50, 0x2c, 0x5a, 0x65, 0x6a, - 0x60, 0xa1, 0x4b, 0x6a, 0xe0, 0x0e, 0xf4, 0xd7, 0x89, 0x57, 0x25, 0x91, 0x74, 0xb5, 0x2d, 0xdb, - 0xb9, 0xb6, 0x78, 0x95, 0x11, 0x4d, 0x4d, 0x68, 0xfe, 0x3b, 0xc6, 0x92, 0x1b, 0x7a, 0x0f, 0x8c, - 0x52, 0x2d, 0x24, 0x6c, 0x27, 0xd2, 0xaf, 0xdc, 0xcb, 0xfc, 0xca, 0xec, 0x44, 0x5d, 0x37, 0x5a, - 0x70, 0x06, 0x13, 0xcd, 0xc1, 0x98, 0xf0, 0x01, 0x2b, 0x17, 0x9e, 0x58, 0x58, 0x55, 0xe3, 0xa0, - 0x9c, 0x69, 0xc7, 0x1d, 0x3d, 0x58, 0x2e, 0x58, 0x58, 0xe5, 0x61, 0x40, 0x3d, 0x17, 0x2c, 0xac, - 0xee, 0x62, 0xd6, 0x82, 0x5e, 0x83, 0x01, 0xfa, 0x77, 0x3e, 0x0a, 0x9b, 0xc2, 0xaf, 0xb2, 0x66, - 0x67, 0x75, 0x28, 0x0f, 0x61, 0xe5, 0x31, 0xed, 0x6c, 0x46, 0x70, 0xc1, 0x8a, 0x1f, 0xb5, 0x35, - 0xe4, 0x39, 0x5c, 0xde, 0xf2, 0x5b, 0x37, 0x48, 0xe4, 0x6f, 0xee, 0x32, 0xa5, 0x61, 0x20, 0xb5, - 0x35, 0x16, 0x3b, 0x30, 0x70, 0x4e, 0x2f, 0xf7, 0x0b, 0x05, 0x18, 0xd6, 0xef, 0x92, 0xde, 0x2d, - 0x5f, 0x34, 0x4e, 0x37, 0x05, 0xb7, 0x2c, 0xaf, 0x5a, 0x98, 0xf6, 0xdd, 0x36, 0x44, 0x1d, 0x7a, - 0xbd, 0xb6, 0xd0, 0x16, 0xad, 0x38, 0xb0, 0xd8, 0x8c, 0xdb, 0x49, 0x9d, 0x5f, 0x3a, 0x62, 0x99, - 0x9c, 0x8c, 0x83, 0xfb, 0xe9, 0x1e, 0x18, 0x90, 0x8d, 0xe8, 0x53, 0x0e, 0x40, 0x9a, 0x82, 0x23, - 0x44, 0xe9, 0x9a, 0x8d, 0xfc, 0x0c, 0x3d, 0x7b, 0x48, 0x73, 0x3a, 0x2b, 0x38, 0xd6, 0xf8, 0xa2, - 0x04, 0xfa, 0x42, 0x3a, 0xb8, 0xcb, 0xf6, 0xee, 0x43, 0xaf, 0x52, 0xc6, 0x97, 0x19, 0xf7, 0xd4, - 0xe5, 0xc5, 0x60, 0x58, 0xf0, 0xa2, 0xd6, 0xdb, 0x86, 0xcc, 0x0c, 0xb3, 0xe7, 0x1e, 0x56, 0xc9, - 0x66, 0xa9, 0x31, 0xa6, 0x40, 0x38, 0x65, 0xe8, 0x3e, 0x0b, 0xa3, 0xe6, 0xcb, 0x40, 0x2d, 0x82, - 0x8d, 0xdd, 0x84, 0x70, 0x5f, 0xc1, 0x30, 0xb7, 0x08, 0x66, 0x28, 0x00, 0x73, 0xb8, 0xfb, 0x26, - 0xd5, 0x03, 0x94, 0x78, 0x39, 0x84, 0x7b, 0xfe, 0x09, 0xdd, 0xd1, 0xd5, 0xcd, 0x66, 0xfa, 0x38, - 0x0c, 0xb2, 0x7f, 0xd8, 0x8b, 0xde, 0x63, 0x2b, 0x68, 0x9c, 0x8e, 0x53, 0xbc, 0xea, 0x4c, 0x27, - 0xb8, 0x21, 0x19, 0xe1, 0x94, 0xa7, 0x1b, 0xc2, 0x58, 0x16, 0x1b, 0x7d, 0x10, 0x86, 0x63, 0x79, - 0xac, 0xa6, 0x37, 0xa3, 0x0e, 0x79, 0xfc, 0x32, 0x9f, 0x6d, 0x59, 0xeb, 0x8e, 0x0d, 0x62, 0xee, - 0x2a, 0xf4, 0x59, 0x5d, 0x42, 0xf7, 0xdb, 0x0e, 0x0c, 0xb2, 0xa8, 0x59, 0x2d, 0xf2, 0x9a, 0x69, - 0x97, 0x9e, 0x03, 0x56, 0x3d, 0x86, 0x7e, 0x6e, 0x5f, 0xcb, 0x6c, 0x13, 0x0b, 0x52, 0x86, 0x97, - 0x31, 0x4b, 0xa5, 0x0c, 0x37, 0xe4, 0x63, 0x2c, 0x39, 0xb9, 0x9f, 0x29, 0x40, 0xdf, 0x62, 0xd0, - 0x6a, 0xff, 0x85, 0x2f, 0xa5, 0xb5, 0x02, 0xbd, 0x8b, 0x09, 0x69, 0x9a, 0x15, 0xdf, 0x86, 0x67, - 0x9e, 0xd4, 0xab, 0xbd, 0x95, 0xcc, 0x6a, 0x6f, 0xd8, 0xdb, 0x91, 0xc9, 0x58, 0xc2, 0xbf, 0x9b, - 0xde, 0x0e, 0x7b, 0x06, 0x06, 0x97, 0xbd, 0x0d, 0xd2, 0x58, 0x22, 0xbb, 0xec, 0x2e, 0x17, 0x4f, - 0x0c, 0x70, 0x52, 0xc3, 0xde, 0x08, 0xe2, 0xcf, 0xc1, 0x28, 0xc3, 0x56, 0x2f, 0x03, 0xb5, 0x1c, - 0x48, 0x5a, 0x2e, 0xc7, 0x31, 0x2d, 0x07, 0xad, 0x54, 0x8e, 0x86, 0xe5, 0x4e, 0xc2, 0x50, 0x4a, - 0xe5, 0x10, 0x5c, 0x7f, 0x56, 0x80, 0x11, 0xc3, 0x4d, 0x6d, 0x04, 0xef, 0x9c, 0xbb, 0x06, 0xef, - 0x8c, 0x60, 0x5a, 0xe1, 0x41, 0x07, 0xd3, 0x7a, 0x4e, 0x3e, 0x98, 0x66, 0x3e, 0xa4, 0xde, 0x43, - 0x3d, 0xa4, 0x06, 0xf4, 0x2e, 0xfb, 0xc1, 0xd6, 0xe1, 0xe4, 0x4c, 0x5c, 0x09, 0x5b, 0x1d, 0x72, - 0xa6, 0x4c, 0x81, 0x98, 0xb7, 0x49, 0xcd, 0xa5, 0x27, 0x5f, 0x73, 0x71, 0x3f, 0xe5, 0xc0, 0xf0, - 0x8a, 0x17, 0xf8, 0x9b, 0x24, 0x4e, 0xd8, 0xbe, 0x4a, 0x8e, 0xf5, 0x4e, 0xcf, 0x70, 0x97, 0xdb, - 0xe9, 0x9f, 0x74, 0xe0, 0xf4, 0x0a, 0x69, 0x86, 0xfe, 0x6b, 0x5e, 0x9a, 0xeb, 0x48, 0xc7, 0x5e, - 0xf7, 0x13, 0x91, 0xda, 0xa5, 0xc6, 0x7e, 0xd5, 0x4f, 0x30, 0x85, 0xdf, 0xc5, 0x07, 0xcb, 0xd2, - 0xeb, 0xa9, 0x81, 0xa6, 0xdd, 0x33, 0x4b, 0xb3, 0x18, 0x65, 0x03, 0x4e, 0x71, 0xdc, 0xdf, 0x76, - 0xa0, 0x9f, 0x0f, 0x82, 0x48, 0xda, 0x4e, 0x17, 0xda, 0x75, 0x28, 0xb2, 0x7e, 0x62, 0x57, 0x2f, - 0x58, 0x50, 0x7f, 0x28, 0x39, 0xfe, 0x0e, 0xb2, 0x7f, 0x31, 0x67, 0xc0, 0xcc, 0x16, 0xef, 0xd6, - 0xb4, 0x4a, 0xf3, 0x4c, 0xcd, 0x16, 0x06, 0xc5, 0xa2, 0xd5, 0xfd, 0x7a, 0x0f, 0x0c, 0xa8, 0xa2, - 0x4c, 0xec, 0xca, 0x7c, 0x10, 0x84, 0x89, 0xc7, 0x93, 0x02, 0xb8, 0xac, 0xfe, 0xa0, 0xbd, 0xa2, - 0x50, 0x93, 0xd3, 0x29, 0x75, 0x1e, 0x7b, 0x53, 0x46, 0xa8, 0xd6, 0x82, 0xf5, 0x41, 0xa0, 0x8f, - 0x41, 0x5f, 0x83, 0x4a, 0x1f, 0x29, 0xba, 0x6f, 0x58, 0x1c, 0x0e, 0x13, 0x6b, 0x62, 0x24, 0x6a, - 0x85, 0x38, 0x10, 0x0b, 0xae, 0xe3, 0xef, 0x83, 0xb1, 0xec, 0xa8, 0xef, 0x76, 0x0d, 0x6e, 0x50, - 0xbf, 0x44, 0xf7, 0x97, 0x85, 0xf4, 0x3c, 0x7a, 0x57, 0xf7, 0x45, 0x18, 0x5a, 0x21, 0x49, 0xe4, - 0x57, 0x18, 0x81, 0xbb, 0x6d, 0xae, 0x43, 0xe9, 0x0f, 0x9f, 0x65, 0x9b, 0x95, 0xd2, 0x8c, 0xd1, - 0x6d, 0x80, 0x56, 0x14, 0x52, 0xfb, 0x95, 0xb4, 0xe5, 0xc3, 0xb6, 0xa0, 0x0f, 0xaf, 0x29, 0x9a, - 0x3c, 0x5c, 0x9c, 0xfe, 0xc6, 0x1a, 0x3f, 0xf7, 0x12, 0x14, 0x57, 0xda, 0x09, 0xb9, 0x75, 0x77, - 0x89, 0xe5, 0x7e, 0x10, 0x86, 0x19, 0xea, 0xd5, 0xb0, 0x41, 0x4f, 0x49, 0x3a, 0xd3, 0x26, 0xfd, - 0x9d, 0x75, 0xd0, 0x33, 0x24, 0xcc, 0xdb, 0xe8, 0x1b, 0x50, 0x0f, 0x1b, 0x55, 0x75, 0x47, 0x46, - 0x3d, 0xdf, 0xab, 0x0c, 0x8a, 0x45, 0xab, 0xfb, 0xcb, 0x05, 0x18, 0x62, 0x1d, 0x85, 0xf4, 0xd8, - 0x85, 0xfe, 0x3a, 0xe7, 0x23, 0x96, 0xc4, 0x42, 0x76, 0x9b, 0x3e, 0x7a, 0xcd, 0x34, 0xe3, 0x00, - 0x2c, 0xf9, 0x51, 0xd6, 0x3b, 0x9e, 0x9f, 0x50, 0xd6, 0x85, 0xe3, 0x65, 0x7d, 0x93, 0xb3, 0xc1, - 0x92, 0x9f, 0xfb, 0xe5, 0x02, 0x00, 0x2b, 0x99, 0xc5, 0xaf, 0x68, 0xfe, 0x22, 0x14, 0x5b, 0x75, - 0x2f, 0xce, 0x06, 0xdd, 0x8a, 0x6b, 0x14, 0x78, 0x47, 0x5c, 0x42, 0x65, 0x3f, 0x30, 0x47, 0xd4, - 0x13, 0xcb, 0x0b, 0x07, 0x27, 0x96, 0xa3, 0x16, 0xf4, 0x87, 0xed, 0x84, 0xea, 0x86, 0xe2, 0x70, - 0xb5, 0x10, 0x73, 0x5e, 0xe5, 0x04, 0x79, 0x36, 0xb6, 0xf8, 0x81, 0x25, 0x1b, 0xf4, 0x3c, 0x0c, - 0xb4, 0xa2, 0xb0, 0x46, 0xcf, 0x4a, 0x71, 0x9c, 0x3e, 0x2a, 0xf5, 0x8f, 0x35, 0x01, 0xbf, 0xa3, - 0xfd, 0x8f, 0x15, 0xb6, 0xfb, 0x93, 0x53, 0x7c, 0x5d, 0xc4, 0xe6, 0x18, 0x87, 0x82, 0x2f, 0x3d, - 0x41, 0x20, 0x48, 0x14, 0x16, 0xe7, 0x70, 0xc1, 0xaf, 0xaa, 0x7d, 0x5c, 0xe8, 0x7a, 0xf2, 0xbe, - 0x1b, 0x86, 0xaa, 0x7e, 0xdc, 0x6a, 0x78, 0xbb, 0xd7, 0x72, 0xdc, 0x70, 0x73, 0x69, 0x13, 0xd6, - 0xf1, 0xd0, 0x33, 0xe2, 0x1a, 0x41, 0xaf, 0xe1, 0x7a, 0x91, 0xd7, 0x08, 0xd2, 0x2b, 0xc0, 0xfc, - 0x06, 0x41, 0xf6, 0xaa, 0x74, 0xf1, 0xd0, 0x57, 0xa5, 0xb3, 0x9a, 0x4f, 0xdf, 0xc9, 0x6b, 0x3e, - 0xef, 0x85, 0x11, 0xf9, 0x93, 0xa9, 0x23, 0xa5, 0xb3, 0x6c, 0xf4, 0xca, 0x3d, 0xbc, 0xae, 0x37, - 0x62, 0x13, 0x37, 0xdd, 0xb4, 0xfd, 0x87, 0xdd, 0xb4, 0x97, 0x01, 0x36, 0xc2, 0x76, 0x50, 0xf5, - 0xa2, 0xdd, 0xc5, 0x39, 0x91, 0x74, 0xa8, 0x14, 0xad, 0x19, 0xd5, 0x82, 0x35, 0x2c, 0x7d, 0xa3, - 0x0f, 0xde, 0x65, 0xa3, 0x7f, 0x10, 0x06, 0x59, 0x82, 0x26, 0xa9, 0x4e, 0x27, 0x22, 0x1d, 0xe7, - 0x28, 0xb9, 0x7c, 0x4a, 0xed, 0x28, 0x4b, 0x22, 0x38, 0xa5, 0x87, 0x3e, 0x04, 0xb0, 0xe9, 0x07, - 0x7e, 0x5c, 0x67, 0xd4, 0x87, 0x8e, 0x4c, 0x5d, 0xcd, 0x73, 0x5e, 0x51, 0xc1, 0x1a, 0x45, 0xf4, - 0x0a, 0x9c, 0x26, 0x71, 0xe2, 0x37, 0xbd, 0x84, 0x54, 0xd5, 0x5d, 0xb8, 0x12, 0xf3, 0x1d, 0xaa, - 0x14, 0xd9, 0x2b, 0x59, 0x84, 0x3b, 0x79, 0x40, 0xdc, 0x49, 0xc8, 0x78, 0x23, 0xc7, 0x8f, 0xf2, - 0x46, 0xa2, 0x3f, 0x75, 0xe0, 0x74, 0x44, 0x78, 0x8e, 0x46, 0xac, 0x06, 0x76, 0x8e, 0xc9, 0xcb, - 0x8a, 0x8d, 0x6a, 0xd4, 0xaa, 0xec, 0x04, 0xce, 0x72, 0xe1, 0x8a, 0x02, 0x91, 0xb3, 0xef, 0x68, - 0xbf, 0x93, 0x07, 0xfc, 0xe4, 0x5b, 0x13, 0x13, 0x9d, 0xa5, 0xd1, 0x15, 0x71, 0xfa, 0xe6, 0xfd, - 0x8d, 0xb7, 0x26, 0xc6, 0xe4, 0xef, 0x74, 0xd1, 0x3a, 0x26, 0x49, 0xcf, 0xbd, 0x56, 0x58, 0x5d, - 0x5c, 0x13, 0x79, 0x53, 0xea, 0xdc, 0x5b, 0xa3, 0x40, 0xcc, 0xdb, 0xd0, 0xd3, 0x30, 0x50, 0xf5, - 0x48, 0x33, 0x0c, 0x54, 0x5d, 0x51, 0xa6, 0x3d, 0xcf, 0x09, 0x18, 0x56, 0xad, 0xa8, 0x01, 0x7d, - 0x3e, 0x33, 0xd1, 0x45, 0x92, 0xa4, 0x05, 0xbf, 0x00, 0x37, 0xf9, 0x65, 0x8a, 0x24, 0x13, 0xc2, - 0x82, 0x87, 0x2e, 0xf5, 0x4f, 0x9d, 0x8c, 0xd4, 0x7f, 0x1a, 0x06, 0x2a, 0x75, 0xbf, 0x51, 0x8d, - 0x48, 0x50, 0x1a, 0x63, 0xb6, 0x2a, 0x5b, 0x89, 0x59, 0x01, 0xc3, 0xaa, 0x15, 0xfd, 0x25, 0x18, - 0x09, 0xdb, 0x09, 0x7b, 0xc9, 0xe9, 0xf3, 0x8f, 0x4b, 0xa7, 0x19, 0x3a, 0x4b, 0x79, 0x59, 0xd5, - 0x1b, 0xb0, 0x89, 0x47, 0x85, 0x6d, 0x3d, 0x8c, 0x59, 0xad, 0x12, 0x26, 0x6c, 0xcf, 0x9b, 0xc2, - 0xf6, 0xaa, 0xd6, 0x86, 0x0d, 0x4c, 0xf4, 0x55, 0x07, 0x4e, 0x37, 0xb3, 0xa6, 0x4b, 0xe9, 0x02, - 0x5b, 0x99, 0xb2, 0x0d, 0x15, 0x37, 0x43, 0x9a, 0x67, 0x06, 0x77, 0x80, 0x71, 0xe7, 0x20, 0x58, - 0xd5, 0xa0, 0x78, 0x37, 0xa8, 0xd4, 0xa3, 0x30, 0x30, 0x87, 0xf7, 0xb0, 0xad, 0x9b, 0x3c, 0xec, - 0x2d, 0xcb, 0x63, 0x31, 0xf3, 0xf0, 0xfe, 0xde, 0xc4, 0xb9, 0xdc, 0x26, 0x9c, 0x3f, 0xa8, 0xf1, - 0x39, 0x38, 0x9f, 0xff, 0xa6, 0xde, 0x4d, 0xd7, 0xee, 0xd1, 0x75, 0xed, 0x79, 0x78, 0xb8, 0xeb, - 0xa0, 0xa8, 0xcc, 0x97, 0x8a, 0x99, 0x63, 0xca, 0xfc, 0x0e, 0x45, 0x6a, 0x14, 0x86, 0xf5, 0x82, - 0xf6, 0xee, 0xff, 0xe9, 0x01, 0x48, 0x3d, 0xc4, 0xc8, 0x83, 0x51, 0xee, 0x8d, 0x5e, 0x9c, 0xbb, - 0xe7, 0x6b, 0xc1, 0xb3, 0x06, 0x01, 0x9c, 0x21, 0x88, 0x9a, 0x80, 0x38, 0x84, 0xff, 0xbe, 0x97, - 0xa8, 0x22, 0x0b, 0xc2, 0xcd, 0x76, 0x10, 0xc1, 0x39, 0x84, 0xe9, 0x8c, 0x92, 0x70, 0x8b, 0x04, - 0xd7, 0xf1, 0xf2, 0xbd, 0xdc, 0x2d, 0xe7, 0x71, 0x28, 0x83, 0x00, 0xce, 0x10, 0x44, 0x2e, 0xf4, - 0x31, 0xaf, 0x84, 0x4c, 0x2b, 0x66, 0xe2, 0x85, 0x9d, 0xf9, 0x31, 0x16, 0x2d, 0xe8, 0xcb, 0x0e, - 0x8c, 0xca, 0x2b, 0xf2, 0xcc, 0x0f, 0x28, 0x13, 0x8a, 0xaf, 0xdb, 0xf2, 0xf0, 0x5f, 0xd1, 0xa9, - 0xa7, 0xe9, 0x7a, 0x06, 0x38, 0xc6, 0x99, 0x41, 0xb8, 0x2f, 0xc1, 0x99, 0x9c, 0xee, 0x56, 0x6c, - 0xb9, 0xef, 0x3a, 0x30, 0xa4, 0x15, 0x62, 0x43, 0xb7, 0x61, 0x30, 0x2c, 0x5b, 0xcf, 0x11, 0x5b, - 0x2d, 0x77, 0xe4, 0x88, 0x29, 0x10, 0x4e, 0x19, 0x1e, 0x26, 0xb5, 0x2d, 0xb7, 0x6a, 0xdc, 0x03, - 0x1e, 0xf6, 0x91, 0x53, 0xdb, 0xfe, 0x5d, 0x2f, 0xa4, 0x94, 0x8e, 0x58, 0xba, 0x21, 0x4d, 0x84, - 0x2b, 0x1c, 0x98, 0x08, 0x57, 0x85, 0x53, 0x1e, 0x8b, 0xa2, 0xde, 0x63, 0xc1, 0x06, 0x5e, 0x87, - 0xd3, 0xa4, 0x80, 0xb3, 0x24, 0x29, 0x97, 0x38, 0xed, 0xca, 0xb8, 0xf4, 0x1e, 0x99, 0x4b, 0xd9, - 0xa4, 0x80, 0xb3, 0x24, 0xd1, 0x2b, 0x50, 0xaa, 0xb0, 0xdb, 0x8e, 0x7c, 0x8e, 0x8b, 0x9b, 0xd7, - 0xc2, 0x64, 0x2d, 0x22, 0x31, 0x09, 0x12, 0x51, 0x69, 0xe9, 0x71, 0xb1, 0x0a, 0xa5, 0xd9, 0x2e, - 0x78, 0xb8, 0x2b, 0x05, 0x6a, 0x30, 0xb0, 0x30, 0xac, 0x9f, 0xec, 0x32, 0x21, 0x22, 0xe2, 0xd3, - 0xca, 0x60, 0x28, 0xeb, 0x8d, 0xd8, 0xc4, 0x45, 0xbf, 0xe2, 0xc0, 0x48, 0x43, 0x3a, 0xaa, 0x71, - 0xbb, 0x21, 0xcb, 0x06, 0x62, 0x2b, 0xdb, 0x6f, 0x59, 0xa7, 0xcc, 0x75, 0x09, 0x03, 0x84, 0x4d, - 0xde, 0xee, 0x9b, 0x0e, 0x8c, 0x65, 0xbb, 0xa1, 0x2d, 0x78, 0xac, 0xe9, 0x45, 0x5b, 0x8b, 0xc1, - 0x66, 0xc4, 0xee, 0x01, 0x24, 0xfc, 0xa9, 0x4e, 0x6f, 0x26, 0x24, 0x9a, 0xf3, 0x76, 0x79, 0x04, - 0xaf, 0xa8, 0x3e, 0x20, 0xf3, 0xd8, 0xca, 0x41, 0xc8, 0xf8, 0x60, 0x5a, 0xa8, 0x0c, 0xe7, 0x28, - 0x02, 0x2b, 0x7a, 0xe5, 0x87, 0x41, 0xca, 0xa4, 0xc0, 0x98, 0xa8, 0x7c, 0xb6, 0x95, 0x3c, 0x24, - 0x9c, 0xdf, 0xd7, 0x1d, 0x80, 0x3e, 0x7e, 0x07, 0xca, 0xfd, 0xb7, 0x05, 0x90, 0x4a, 0xda, 0x5f, - 0xec, 0xa0, 0x10, 0x3d, 0xd0, 0x22, 0xe6, 0x68, 0x11, 0x3e, 0x00, 0x76, 0xa0, 0x89, 0x0a, 0x71, - 0xa2, 0x85, 0x6a, 0xaf, 0xe4, 0x96, 0x9f, 0xcc, 0x86, 0x55, 0x69, 0xf9, 0x33, 0xed, 0xf5, 0x8a, - 0x80, 0x61, 0xd5, 0xea, 0x7e, 0xca, 0x81, 0x11, 0x3a, 0xcb, 0x46, 0x83, 0x34, 0xca, 0x09, 0x69, - 0xc5, 0x28, 0x86, 0x62, 0x4c, 0xff, 0xb1, 0xe7, 0xc1, 0x4a, 0xaf, 0xbe, 0x91, 0x96, 0x16, 0x32, - 0xa0, 0x4c, 0x30, 0xe7, 0xe5, 0x7e, 0xa7, 0x07, 0x06, 0xd5, 0x62, 0x1f, 0x22, 0x0e, 0x71, 0x39, - 0x2d, 0xde, 0xc8, 0xa5, 0x61, 0x49, 0x2b, 0xdc, 0x48, 0xcd, 0xf5, 0xe9, 0x60, 0x97, 0xdf, 0xb1, - 0x4f, 0xab, 0x38, 0x3e, 0x63, 0x06, 0x3c, 0xcf, 0xeb, 0x51, 0x34, 0x0d, 0x5f, 0x44, 0x3e, 0x6f, - 0xe9, 0xf1, 0xe6, 0x5e, 0x5b, 0x27, 0x8b, 0x0a, 0xa6, 0x75, 0x0f, 0x34, 0x67, 0xbe, 0xeb, 0x51, - 0x3c, 0xd4, 0x77, 0x3d, 0x2e, 0x41, 0x2f, 0x09, 0xda, 0x4d, 0xa6, 0xb6, 0x0c, 0x32, 0x75, 0xbd, - 0xf7, 0x4a, 0xd0, 0x6e, 0x9a, 0x33, 0x63, 0x28, 0xe8, 0x7d, 0x30, 0x54, 0x25, 0x71, 0x25, 0xf2, - 0xd9, 0xc5, 0x71, 0xe1, 0xef, 0x78, 0x94, 0x39, 0x91, 0x52, 0xb0, 0xd9, 0x51, 0xef, 0xe0, 0xbe, - 0x06, 0x7d, 0x6b, 0x8d, 0x76, 0xcd, 0x0f, 0x50, 0x0b, 0xfa, 0xf8, 0x35, 0x72, 0x71, 0xf2, 0x5a, - 0xb0, 0x01, 0xf9, 0xdb, 0xae, 0xe5, 0x42, 0xf0, 0x1b, 0x90, 0x82, 0x8f, 0xfb, 0xcf, 0x1c, 0xa0, - 0x06, 0xeb, 0xc2, 0x2c, 0xfa, 0xab, 0x1d, 0x9f, 0xb1, 0xf8, 0xb9, 0x9c, 0xcf, 0x58, 0x8c, 0x30, - 0xe4, 0x9c, 0x2f, 0x58, 0x34, 0x60, 0x84, 0xb9, 0xe8, 0xe5, 0x79, 0x24, 0x54, 0xdc, 0xe7, 0x0e, - 0x79, 0xf3, 0x5a, 0xef, 0x2a, 0xa4, 0xb3, 0x0e, 0xc2, 0x26, 0x71, 0xf7, 0x77, 0x7a, 0x41, 0xf3, - 0x64, 0x1f, 0x62, 0x7b, 0x7f, 0x34, 0x13, 0xb7, 0x58, 0xb1, 0x12, 0xb7, 0x90, 0xc1, 0x00, 0x2e, - 0x32, 0xcc, 0x50, 0x05, 0x1d, 0x54, 0x9d, 0x34, 0x5a, 0xe2, 0xe5, 0x50, 0x83, 0xba, 0x4a, 0x1a, - 0x2d, 0xcc, 0x5a, 0xd4, 0x1d, 0xb2, 0xde, 0xae, 0x77, 0xc8, 0xea, 0x50, 0xac, 0x79, 0xed, 0x1a, - 0x11, 0x89, 0x7b, 0x16, 0x42, 0x54, 0x2c, 0xa9, 0x9e, 0x87, 0xa8, 0xd8, 0xbf, 0x98, 0x33, 0xa0, - 0x6f, 0x67, 0x5d, 0x66, 0x32, 0x08, 0x5f, 0xa3, 0x85, 0xb7, 0x53, 0x25, 0x47, 0xf0, 0xb7, 0x53, - 0xfd, 0xc4, 0x29, 0x33, 0xd4, 0x82, 0xfe, 0x0a, 0x2f, 0xd8, 0x20, 0x0e, 0xfc, 0x45, 0x1b, 0x97, - 0xe4, 0x18, 0x41, 0xee, 0x8a, 0x10, 0x3f, 0xb0, 0x64, 0xe3, 0x4e, 0xc1, 0x90, 0x56, 0xfe, 0x9e, - 0x3e, 0x06, 0x55, 0x2b, 0x40, 0x7b, 0x0c, 0x73, 0x5e, 0xe2, 0x61, 0xd6, 0xe2, 0x7e, 0xb3, 0x17, - 0x94, 0x4b, 0x48, 0xbf, 0xd2, 0xe5, 0x55, 0xb4, 0xca, 0x26, 0xc6, 0x5d, 0xe2, 0x30, 0xc0, 0xa2, - 0x95, 0x2a, 0x45, 0x4d, 0x12, 0xd5, 0x94, 0x11, 0x2a, 0xe4, 0xab, 0x52, 0x8a, 0x56, 0xf4, 0x46, - 0x6c, 0xe2, 0x52, 0x8d, 0xb6, 0x29, 0x22, 0xbb, 0xd9, 0xbc, 0x59, 0x19, 0xf1, 0xc5, 0x0a, 0x03, - 0x7d, 0xca, 0x81, 0xe1, 0xa6, 0x16, 0x08, 0x16, 0xf9, 0x7b, 0x36, 0x02, 0x17, 0x1a, 0x55, 0x9e, - 0x67, 0xa3, 0x43, 0xb0, 0xc1, 0x15, 0x2d, 0xc0, 0xe9, 0x98, 0x24, 0xab, 0x3b, 0x01, 0x89, 0xd4, - 0x55, 0x6b, 0x71, 0xf7, 0x5e, 0x25, 0xcd, 0x97, 0xb3, 0x08, 0xb8, 0xb3, 0x4f, 0x6e, 0xca, 0x63, - 0xf1, 0xc8, 0x29, 0x8f, 0x73, 0x30, 0xb6, 0xe9, 0xf9, 0x8d, 0x76, 0x44, 0xba, 0x26, 0x4e, 0xce, - 0x67, 0xda, 0x71, 0x47, 0x0f, 0x76, 0x6f, 0xa3, 0xe1, 0xd5, 0xe2, 0x52, 0xbf, 0x76, 0x6f, 0x83, - 0x02, 0x30, 0x87, 0xbb, 0xff, 0xd8, 0x01, 0x5e, 0xf4, 0x64, 0x7a, 0x73, 0xd3, 0x0f, 0xfc, 0x64, - 0x17, 0x7d, 0xcd, 0x81, 0xb1, 0x20, 0xac, 0x92, 0xe9, 0x20, 0xf1, 0x25, 0xd0, 0x5e, 0xad, 0x67, - 0xc6, 0xeb, 0x5a, 0x86, 0x3c, 0xbf, 0x41, 0x9f, 0x85, 0xe2, 0x8e, 0x61, 0xb8, 0x17, 0xe0, 0x5c, - 0x2e, 0x01, 0xf7, 0xcd, 0x1e, 0x30, 0x6b, 0xb7, 0xa0, 0x17, 0xa1, 0xd8, 0x60, 0xd5, 0x04, 0x9c, - 0x7b, 0x2c, 0xca, 0xc3, 0xd6, 0x8a, 0x97, 0x1b, 0xe0, 0x94, 0xd0, 0x1c, 0x0c, 0xb1, 0x82, 0x30, - 0xa2, 0xd6, 0x03, 0x7f, 0x23, 0xdc, 0xf4, 0x13, 0x53, 0xaa, 0xe9, 0x8e, 0xf9, 0x13, 0xeb, 0xdd, - 0xd0, 0xeb, 0xd0, 0xbf, 0xc1, 0x2b, 0xd5, 0xd9, 0x0b, 0x5d, 0x89, 0xd2, 0x77, 0x4c, 0x99, 0x91, - 0x75, 0xf0, 0xee, 0xa4, 0xff, 0x62, 0xc9, 0x11, 0xed, 0xc2, 0x80, 0x27, 0x9f, 0x69, 0xaf, 0xad, - 0x3c, 0x7c, 0x63, 0xff, 0x88, 0x44, 0x0b, 0xf9, 0x0c, 0x15, 0xbb, 0x4c, 0x46, 0x4a, 0xf1, 0x50, - 0x19, 0x29, 0xdf, 0x76, 0x00, 0xd2, 0x2a, 0xfd, 0xe8, 0x16, 0x0c, 0xc4, 0xcf, 0x19, 0x56, 0xbe, - 0x8d, 0xcb, 0xd3, 0x82, 0xa2, 0x76, 0xc1, 0x50, 0x40, 0xb0, 0xe2, 0x76, 0x37, 0xcf, 0xc4, 0xcf, - 0x1c, 0x38, 0x9b, 0xf7, 0x35, 0x81, 0x07, 0x38, 0xe2, 0xa3, 0x3a, 0x25, 0x44, 0x87, 0xb5, 0x88, - 0x6c, 0xfa, 0xb7, 0xb2, 0x49, 0x2b, 0x4b, 0xb2, 0x01, 0xa7, 0x38, 0xee, 0xf7, 0xfa, 0x40, 0x31, - 0x3e, 0x26, 0x27, 0xc6, 0x53, 0xd4, 0xc8, 0xa9, 0xa5, 0x15, 0x14, 0x15, 0x1e, 0x66, 0x50, 0x2c, - 0x5a, 0xa9, 0xa1, 0x23, 0x73, 0xa9, 0x85, 0xc8, 0x66, 0xbb, 0x50, 0xe6, 0x5c, 0x63, 0xd5, 0x9a, - 0xe7, 0x16, 0x29, 0x9e, 0x88, 0x5b, 0xa4, 0xcf, 0xbe, 0x5b, 0xe4, 0x12, 0xf4, 0x47, 0x61, 0x83, - 0x4c, 0xe3, 0x6b, 0x42, 0x7d, 0x4f, 0x6b, 0xdb, 0x72, 0x30, 0x96, 0xed, 0xd9, 0xb2, 0x9a, 0x03, - 0x87, 0x2b, 0xab, 0x89, 0xbe, 0xe7, 0x1c, 0xe0, 0x79, 0x19, 0xb4, 0x75, 0x26, 0xe4, 0x56, 0xb2, - 0x62, 0xb6, 0xc8, 0xbd, 0xb8, 0x73, 0xbe, 0xee, 0xc0, 0x69, 0x12, 0x54, 0xa2, 0x5d, 0x46, 0x47, - 0x50, 0x13, 0xa1, 0xd3, 0xeb, 0x36, 0x5e, 0xbe, 0x2b, 0x59, 0xe2, 0x3c, 0x2e, 0xd2, 0x01, 0xc6, - 0x9d, 0xc3, 0x70, 0x7f, 0x52, 0x80, 0x33, 0x39, 0x14, 0xd8, 0x35, 0x99, 0x26, 0xdd, 0x40, 0x8b, - 0xd5, 0xec, 0xeb, 0xb3, 0x24, 0xe0, 0x58, 0x61, 0xa0, 0x35, 0x38, 0xbb, 0xd5, 0x8c, 0x53, 0x2a, - 0xb3, 0x61, 0x90, 0x90, 0x5b, 0xf2, 0x65, 0x92, 0x51, 0xd0, 0xb3, 0x4b, 0x39, 0x38, 0x38, 0xb7, - 0x27, 0xd5, 0x36, 0x48, 0xe0, 0x6d, 0x34, 0x48, 0xda, 0x24, 0x2e, 0x79, 0x29, 0x6d, 0xe3, 0x4a, - 0xa6, 0x1d, 0x77, 0xf4, 0x40, 0x9f, 0x73, 0xe0, 0x91, 0x98, 0x44, 0xdb, 0x24, 0x2a, 0xfb, 0x55, - 0x32, 0xdb, 0x8e, 0x93, 0xb0, 0x49, 0xa2, 0x7b, 0x74, 0x0d, 0x4e, 0xec, 0xef, 0x4d, 0x3c, 0x52, - 0xee, 0x4e, 0x0d, 0x1f, 0xc4, 0xca, 0xfd, 0x9c, 0x03, 0xa3, 0x65, 0x66, 0xac, 0x2a, 0xd5, 0xd7, - 0x76, 0xe9, 0xc1, 0xa7, 0x54, 0x49, 0x81, 0x8c, 0x10, 0x33, 0x8b, 0x00, 0xb8, 0xaf, 0xc2, 0x58, - 0x99, 0x34, 0xbd, 0x56, 0x9d, 0xdd, 0xd0, 0xe4, 0x69, 0x3a, 0x53, 0x30, 0x18, 0x4b, 0x58, 0xf6, - 0x7b, 0x1e, 0x0a, 0x19, 0xa7, 0x38, 0xe8, 0x49, 0x9e, 0x52, 0x24, 0xef, 0x79, 0x0c, 0x72, 0x23, - 0x81, 0xe7, 0x21, 0xc5, 0x58, 0xb6, 0xb9, 0x3b, 0x30, 0x9c, 0x76, 0x27, 0x9b, 0xa8, 0x06, 0xa7, - 0x2a, 0xda, 0x15, 0xa9, 0x34, 0x39, 0xfd, 0xf0, 0xb7, 0xa9, 0x78, 0x8d, 0x52, 0x93, 0x08, 0xce, - 0x52, 0x75, 0xbf, 0x58, 0x80, 0x53, 0x8a, 0xb3, 0x08, 0x79, 0xbd, 0x91, 0x4d, 0x83, 0xc2, 0x36, - 0x4a, 0x9d, 0x98, 0x2b, 0x79, 0x40, 0x2a, 0xd4, 0x1b, 0xd9, 0x54, 0xa8, 0x63, 0x65, 0xdf, 0x11, - 0xc5, 0xfb, 0x76, 0x01, 0x06, 0x54, 0xe1, 0x95, 0x17, 0xa1, 0xc8, 0xec, 0xb8, 0xfb, 0xd3, 0x46, - 0x99, 0x4d, 0x88, 0x39, 0x25, 0x4a, 0x92, 0x65, 0x72, 0xdc, 0x73, 0xd5, 0xc9, 0x41, 0xee, 0x7e, - 0xf3, 0xa2, 0x04, 0x73, 0x4a, 0x68, 0x09, 0x7a, 0x48, 0x50, 0x15, 0x6a, 0xe9, 0xd1, 0x09, 0xb2, - 0xef, 0xe8, 0x5c, 0x09, 0xaa, 0x98, 0x52, 0x61, 0xa5, 0x0f, 0xb9, 0xf6, 0x91, 0xf9, 0x6a, 0x82, - 0x50, 0x3d, 0x44, 0xab, 0xfb, 0x2b, 0x3d, 0xd0, 0x57, 0x6e, 0x6f, 0x50, 0x05, 0xfb, 0x5b, 0x0e, - 0x9c, 0xd9, 0xc9, 0x54, 0x49, 0x4d, 0xb7, 0xec, 0x75, 0x7b, 0x2e, 0x48, 0x3d, 0x9b, 0xe8, 0x11, - 0xf9, 0x49, 0xe8, 0x9c, 0x46, 0x9c, 0x37, 0x1c, 0xa3, 0x2a, 0x62, 0xcf, 0xb1, 0x54, 0x45, 0xbc, - 0x75, 0xcc, 0xe9, 0xeb, 0x23, 0xdd, 0x52, 0xd7, 0xdd, 0xdf, 0x29, 0x02, 0xf0, 0xa7, 0xb1, 0xda, - 0x4a, 0x0e, 0xe3, 0xa3, 0x7a, 0x1e, 0x86, 0xe5, 0x37, 0xea, 0xf3, 0x3e, 0xc8, 0xb1, 0xa0, 0xb5, - 0x61, 0x03, 0x93, 0x19, 0x04, 0x41, 0x12, 0xed, 0x72, 0xa5, 0x31, 0x9b, 0xa2, 0xae, 0x5a, 0xb0, - 0x86, 0x85, 0x26, 0x0d, 0x9f, 0x3f, 0x0f, 0xe5, 0x8e, 0x1e, 0xe0, 0xa2, 0x7f, 0x1f, 0x8c, 0x9a, - 0xb5, 0x1a, 0x84, 0xa6, 0xa4, 0x42, 0xaf, 0x66, 0x89, 0x07, 0x9c, 0xc1, 0xa6, 0x9b, 0xb8, 0x1a, - 0xed, 0xe2, 0x76, 0x20, 0x54, 0x26, 0xb5, 0x89, 0xe7, 0x18, 0x14, 0x8b, 0x56, 0x76, 0x51, 0x9e, - 0x9d, 0x46, 0x1c, 0x2e, 0x2e, 0xdb, 0xa7, 0x17, 0xe5, 0xb5, 0x36, 0x6c, 0x60, 0x52, 0x0e, 0xc2, - 0xc7, 0x07, 0xe6, 0x6b, 0x92, 0x71, 0xcc, 0xb5, 0x60, 0x34, 0x34, 0x7d, 0x13, 0x3c, 0x5f, 0xeb, - 0x5d, 0x87, 0xdc, 0x7a, 0x46, 0x5f, 0x1e, 0x32, 0xcf, 0xb8, 0x32, 0x32, 0xf4, 0xa9, 0xce, 0xa8, - 0x67, 0x72, 0x0f, 0x9b, 0xa9, 0x86, 0x5d, 0x93, 0xad, 0xd7, 0xe0, 0x6c, 0x2b, 0xac, 0xae, 0x45, - 0x7e, 0x18, 0xf9, 0xc9, 0xee, 0x6c, 0xc3, 0x8b, 0x63, 0xb6, 0x31, 0x46, 0x4c, 0xe5, 0x64, 0x2d, - 0x07, 0x07, 0xe7, 0xf6, 0xa4, 0xda, 0x7d, 0x4b, 0x00, 0x59, 0x9a, 0x51, 0x91, 0x6b, 0xf7, 0x12, - 0x11, 0xab, 0x56, 0xf7, 0x0c, 0x9c, 0x2e, 0xb7, 0x5b, 0xad, 0x86, 0x4f, 0xaa, 0xca, 0xa7, 0xee, - 0xbe, 0x1f, 0x4e, 0x89, 0x9a, 0x89, 0x4a, 0x15, 0x38, 0x52, 0x85, 0x5f, 0xf7, 0x4f, 0x1d, 0x38, - 0x95, 0x49, 0xea, 0x40, 0xaf, 0x67, 0x0f, 0x70, 0x2b, 0x0e, 0x2b, 0xfd, 0xec, 0xe6, 0x2f, 0x69, - 0xae, 0x32, 0x50, 0x97, 0xc9, 0xcb, 0xd6, 0xee, 0x00, 0xb0, 0x14, 0x5f, 0x7e, 0x22, 0xe8, 0x19, - 0xd0, 0xee, 0x67, 0x0b, 0x90, 0x9f, 0x49, 0x83, 0x3e, 0xd6, 0xb9, 0x00, 0x2f, 0x5a, 0x5c, 0x00, - 0x91, 0xca, 0xd3, 0x7d, 0x0d, 0x02, 0x73, 0x0d, 0x56, 0x2c, 0xad, 0x81, 0xe0, 0xdb, 0xb9, 0x12, - 0xff, 0xd3, 0x81, 0xa1, 0xf5, 0xf5, 0x65, 0xe5, 0x5f, 0xc2, 0x70, 0x3e, 0xe6, 0x77, 0x9c, 0x59, - 0x9c, 0x72, 0x36, 0x6c, 0xb6, 0x78, 0xd8, 0x52, 0x84, 0x53, 0x59, 0xf9, 0xca, 0x72, 0x2e, 0x06, - 0xee, 0xd2, 0x13, 0x2d, 0xc2, 0x19, 0xbd, 0xa5, 0xac, 0x7d, 0x8f, 0xab, 0x28, 0xea, 0x8a, 0x74, - 0x36, 0xe3, 0xbc, 0x3e, 0x59, 0x52, 0xc2, 0x55, 0x28, 0xbe, 0x7f, 0xdf, 0x41, 0x4a, 0x34, 0xe3, - 0xbc, 0x3e, 0xee, 0x2a, 0x0c, 0xad, 0x7b, 0x91, 0x9a, 0xf8, 0x07, 0x60, 0xac, 0x12, 0x36, 0xa5, - 0x8b, 0x66, 0x99, 0x6c, 0x93, 0x86, 0x98, 0x32, 0xaf, 0x9a, 0x9f, 0x69, 0xc3, 0x1d, 0xd8, 0xee, - 0x7f, 0xbb, 0x08, 0xea, 0xce, 0xd6, 0x21, 0x4e, 0x98, 0x96, 0xca, 0x31, 0x2c, 0x5a, 0xce, 0x31, - 0x54, 0xb2, 0x36, 0x93, 0x67, 0x98, 0xa4, 0x79, 0x86, 0x7d, 0xb6, 0xf3, 0x0c, 0x95, 0xc2, 0xd8, - 0x91, 0x6b, 0xf8, 0x15, 0x07, 0x86, 0x83, 0xb0, 0x4a, 0x54, 0x30, 0xaa, 0x9f, 0x69, 0xad, 0xaf, - 0xd8, 0x4b, 0x9e, 0xe6, 0x39, 0x73, 0x82, 0x3c, 0xcf, 0x44, 0x55, 0x47, 0x94, 0xde, 0x84, 0x8d, - 0x71, 0xa0, 0x79, 0xcd, 0x69, 0xc8, 0x7d, 0xf3, 0x8f, 0xe6, 0x59, 0x0f, 0x77, 0xf5, 0x00, 0xde, - 0xd2, 0xf4, 0xa6, 0x41, 0xeb, 0x1f, 0x8b, 0x4f, 0x43, 0x0c, 0xb2, 0x02, 0x6b, 0xaa, 0x4f, 0xb9, - 0xd0, 0xc7, 0x53, 0x56, 0x45, 0x05, 0x1b, 0x16, 0xf9, 0xe2, 0xe9, 0xac, 0x58, 0xb4, 0xa0, 0x44, - 0x06, 0xbc, 0x87, 0x6c, 0xd5, 0x53, 0x37, 0x02, 0xea, 0xf9, 0x11, 0x6f, 0xf4, 0x82, 0x6e, 0x94, - 0x0e, 0x1f, 0xc6, 0x28, 0x1d, 0xe9, 0x6a, 0x90, 0x7e, 0xc1, 0x81, 0xe1, 0x8a, 0x56, 0xdf, 0xbc, - 0xf4, 0xb4, 0xad, 0x2f, 0xa5, 0xe6, 0x95, 0xa1, 0xe7, 0x01, 0x15, 0xa3, 0x9e, 0xba, 0xc1, 0x9d, - 0x95, 0xdc, 0x63, 0x16, 0x38, 0x3b, 0xfa, 0xad, 0xdc, 0xd4, 0x37, 0x2d, 0x7a, 0x99, 0xc4, 0x47, - 0x61, 0x58, 0xf0, 0x42, 0xb7, 0x61, 0x40, 0x66, 0x3d, 0x8b, 0x9c, 0x64, 0x6c, 0xc3, 0xc3, 0x6d, - 0x86, 0xd1, 0x64, 0xa1, 0x2e, 0x0e, 0xc5, 0x8a, 0x23, 0xaa, 0x43, 0x4f, 0xd5, 0xab, 0x89, 0xec, - 0xe4, 0x15, 0x3b, 0x75, 0x10, 0x25, 0x4f, 0x66, 0x5e, 0xcd, 0x4d, 0x2f, 0x60, 0xca, 0x02, 0xdd, - 0x4a, 0x0b, 0x44, 0x8f, 0x59, 0x3b, 0x7d, 0x4d, 0x35, 0x89, 0xfb, 0x18, 0x3a, 0xea, 0x4d, 0x57, - 0x45, 0xe4, 0xf1, 0xff, 0x63, 0x6c, 0xe7, 0xed, 0x14, 0x52, 0xe4, 0x95, 0x1f, 0xd2, 0xe8, 0x25, - 0xe5, 0xc2, 0xbe, 0xd0, 0xfe, 0xf3, 0xb6, 0xb8, 0xb0, 0xfa, 0x05, 0xd9, 0x2f, 0xb3, 0x37, 0xa0, - 0xaf, 0xc5, 0xb2, 0x18, 0x4a, 0xbf, 0x60, 0xeb, 0x6c, 0xe1, 0x59, 0x11, 0x7c, 0x6f, 0xf2, 0xff, - 0xb1, 0xe0, 0x81, 0xae, 0x40, 0x3f, 0xff, 0xce, 0x01, 0xcf, 0x0e, 0x1f, 0xba, 0x3c, 0xde, 0xfd, - 0x6b, 0x09, 0xe9, 0x41, 0xc1, 0x7f, 0xc7, 0x58, 0xf6, 0x45, 0x5f, 0x74, 0x60, 0x94, 0x4a, 0xd4, - 0xf4, 0xc3, 0x0c, 0x25, 0x64, 0x4b, 0x66, 0x5d, 0x8f, 0xa9, 0x46, 0x22, 0x65, 0x8d, 0x32, 0x93, - 0x16, 0x0d, 0x76, 0x38, 0xc3, 0x1e, 0xbd, 0x01, 0x03, 0xb1, 0x5f, 0x25, 0x15, 0x2f, 0x8a, 0x4b, - 0x67, 0x8e, 0x67, 0x28, 0x69, 0xac, 0x43, 0x30, 0xc2, 0x8a, 0x65, 0xee, 0x17, 0xca, 0xcf, 0x3e, - 0xe0, 0x2f, 0x94, 0xff, 0x75, 0x07, 0xce, 0xf1, 0xba, 0xdc, 0xd9, 0xa2, 0xec, 0xe7, 0xee, 0xd1, - 0xbd, 0xc2, 0xd2, 0xda, 0xa7, 0xf3, 0x48, 0xe2, 0x7c, 0x4e, 0xac, 0xb0, 0xa7, 0xf9, 0x1d, 0x8d, - 0xf3, 0x56, 0x63, 0x7e, 0x87, 0xff, 0x76, 0x06, 0x7a, 0x16, 0x86, 0x5a, 0xe2, 0x38, 0xf4, 0xe3, - 0x26, 0xbb, 0xa4, 0xd0, 0xc3, 0x2f, 0x72, 0xad, 0xa5, 0x60, 0xac, 0xe3, 0x18, 0x55, 0x5e, 0x2f, - 0x1d, 0x54, 0xe5, 0x15, 0x5d, 0x87, 0xa1, 0x24, 0x6c, 0x90, 0x48, 0x58, 0xaa, 0x25, 0xb6, 0x03, - 0x2f, 0xe6, 0xbd, 0x5b, 0xeb, 0x0a, 0x2d, 0xb5, 0x64, 0x53, 0x58, 0x8c, 0x75, 0x3a, 0x2c, 0x31, - 0x54, 0xd4, 0x3b, 0x8f, 0x98, 0x09, 0xfb, 0x70, 0x26, 0x31, 0x54, 0x6f, 0xc4, 0x26, 0x2e, 0x5a, - 0x80, 0xd3, 0xad, 0x0e, 0x1b, 0x98, 0x5f, 0x53, 0x52, 0xe9, 0x04, 0x9d, 0x06, 0x70, 0x67, 0x1f, - 0xc3, 0xfa, 0x7d, 0xe4, 0x20, 0xeb, 0xb7, 0x4b, 0xcd, 0xd3, 0x47, 0xef, 0xa5, 0xe6, 0x29, 0xaa, - 0xc2, 0xa3, 0x5e, 0x3b, 0x09, 0x59, 0x8d, 0x0e, 0xb3, 0x0b, 0xcf, 0x91, 0x7d, 0x9c, 0xa7, 0xdd, - 0xee, 0xef, 0x4d, 0x3c, 0x3a, 0x7d, 0x00, 0x1e, 0x3e, 0x90, 0x0a, 0x7a, 0x0d, 0x06, 0x88, 0xa8, - 0xdb, 0x5a, 0xfa, 0x39, 0x5b, 0x4a, 0x82, 0x59, 0x09, 0x56, 0xa6, 0x3c, 0x72, 0x18, 0x56, 0xfc, - 0xd0, 0x3a, 0x0c, 0xd5, 0xc3, 0x38, 0x99, 0x6e, 0xf8, 0x5e, 0x4c, 0xe2, 0xd2, 0x63, 0x6c, 0xd3, - 0xe4, 0xea, 0x5e, 0x57, 0x25, 0x5a, 0xba, 0x67, 0xae, 0xa6, 0x3d, 0xb1, 0x4e, 0x06, 0x11, 0x16, - 0xf9, 0x63, 0x09, 0xc2, 0x32, 0x2a, 0x73, 0x91, 0x4d, 0xec, 0xa9, 0x3c, 0xca, 0x6b, 0x61, 0xb5, - 0x6c, 0x62, 0xab, 0xd0, 0x9f, 0x0e, 0xc4, 0x59, 0x9a, 0xe8, 0x79, 0x18, 0x6e, 0x85, 0xd5, 0x72, - 0x8b, 0x54, 0xd6, 0xbc, 0xa4, 0x52, 0x2f, 0x4d, 0x98, 0x5e, 0xb7, 0x35, 0xad, 0x0d, 0x1b, 0x98, - 0xa8, 0x05, 0xfd, 0x4d, 0x7e, 0x79, 0xbb, 0xf4, 0x84, 0x2d, 0xdb, 0x46, 0xdc, 0x06, 0xe7, 0xfa, - 0x82, 0xf8, 0x81, 0x25, 0x1b, 0xf4, 0x0f, 0x1c, 0x38, 0x95, 0xb9, 0x76, 0x53, 0x7a, 0x87, 0x35, - 0x95, 0xc5, 0x24, 0x3c, 0xf3, 0x14, 0x5b, 0x3e, 0x13, 0x78, 0xa7, 0x13, 0x84, 0xb3, 0x23, 0xe2, - 0xeb, 0xc2, 0x2a, 0x30, 0x94, 0x9e, 0xb4, 0xb7, 0x2e, 0x8c, 0xa0, 0x5c, 0x17, 0xf6, 0x03, 0x4b, - 0x36, 0xe8, 0x12, 0xf4, 0x8b, 0x62, 0x69, 0xa5, 0xa7, 0xcc, 0xf0, 0xad, 0xa8, 0xa9, 0x86, 0x65, - 0xfb, 0xf8, 0xfb, 0xe1, 0x74, 0x87, 0xe9, 0x76, 0xa4, 0x32, 0x00, 0xbf, 0xe1, 0x80, 0x7e, 0x63, - 0xd6, 0xfa, 0xc7, 0x12, 0x9e, 0x87, 0xe1, 0x0a, 0xff, 0xca, 0x19, 0xbf, 0x73, 0xdb, 0x6b, 0xfa, - 0x3f, 0x67, 0xb5, 0x36, 0x6c, 0x60, 0xba, 0x57, 0x01, 0x75, 0x56, 0xb2, 0xbe, 0xa7, 0x1a, 0x33, - 0xff, 0xc8, 0x81, 0x11, 0x43, 0x67, 0xb0, 0x1e, 0xf1, 0x9b, 0x07, 0xd4, 0xf4, 0xa3, 0x28, 0x8c, - 0xf4, 0x6f, 0x57, 0x89, 0xd2, 0xbd, 0xec, 0xbe, 0xd3, 0x4a, 0x47, 0x2b, 0xce, 0xe9, 0xe1, 0xfe, - 0xd3, 0x5e, 0x48, 0x73, 0x7e, 0x55, 0xb9, 0x51, 0xa7, 0x6b, 0xb9, 0xd1, 0x67, 0x60, 0xe0, 0xd5, - 0x38, 0x0c, 0xd6, 0xd2, 0xa2, 0xa4, 0xea, 0x59, 0xbc, 0x50, 0x5e, 0xbd, 0xc6, 0x30, 0x15, 0x06, - 0xc3, 0xfe, 0xe8, 0xbc, 0xdf, 0x48, 0x3a, 0xab, 0x56, 0xbe, 0xf0, 0x22, 0x87, 0x63, 0x85, 0xc1, - 0x3e, 0x63, 0xb5, 0x4d, 0x94, 0x63, 0x3c, 0xfd, 0x8c, 0x15, 0x2f, 0x52, 0xcf, 0xda, 0xd0, 0x14, - 0x0c, 0x2a, 0xa7, 0xba, 0xf0, 0xd4, 0xab, 0x95, 0x52, 0x9e, 0x77, 0x9c, 0xe2, 0x30, 0x85, 0x50, - 0x38, 0x62, 0x85, 0x0b, 0xa5, 0x6c, 0xc3, 0x3c, 0xc9, 0xb8, 0x76, 0xb9, 0x6c, 0x97, 0x60, 0xac, - 0x58, 0xe6, 0x85, 0x3d, 0x07, 0x8f, 0x23, 0xec, 0xa9, 0x27, 0xa0, 0x17, 0x0f, 0x9b, 0x80, 0x6e, - 0xee, 0xed, 0x81, 0x43, 0xed, 0xed, 0x4f, 0xf7, 0x40, 0xff, 0x0d, 0x12, 0xb1, 0x62, 0xcd, 0x97, - 0xa0, 0x7f, 0x9b, 0xff, 0x9b, 0xbd, 0x49, 0x28, 0x30, 0xb0, 0x6c, 0xa7, 0xcf, 0x6d, 0xa3, 0xed, - 0x37, 0xaa, 0x73, 0xe9, 0x5b, 0x9c, 0xd6, 0x79, 0x93, 0x0d, 0x38, 0xc5, 0xa1, 0x1d, 0x6a, 0x54, - 0xb3, 0x6f, 0x36, 0xfd, 0x24, 0x9b, 0x04, 0xb4, 0x20, 0x1b, 0x70, 0x8a, 0x83, 0x9e, 0x82, 0xbe, - 0x9a, 0x9f, 0xac, 0x7b, 0xb5, 0x6c, 0x94, 0x6f, 0x81, 0x41, 0xb1, 0x68, 0x65, 0x61, 0x22, 0x3f, - 0x59, 0x8f, 0x08, 0xf3, 0xec, 0x76, 0x94, 0x14, 0x58, 0xd0, 0xda, 0xb0, 0x81, 0xc9, 0x86, 0x14, - 0x8a, 0x99, 0x89, 0x0c, 0xc8, 0x74, 0x48, 0xb2, 0x01, 0xa7, 0x38, 0x74, 0xff, 0x57, 0xc2, 0x66, - 0xcb, 0x6f, 0x88, 0xdc, 0x5c, 0x6d, 0xff, 0xcf, 0x0a, 0x38, 0x56, 0x18, 0x14, 0x9b, 0x8a, 0x30, - 0x2a, 0x7e, 0xb2, 0x9f, 0x0c, 0x5a, 0x13, 0x70, 0xac, 0x30, 0xdc, 0x1b, 0x30, 0xc2, 0xdf, 0xe4, - 0xd9, 0x86, 0xe7, 0x37, 0x17, 0x66, 0xd1, 0x95, 0x8e, 0x04, 0xf4, 0x4b, 0x39, 0x09, 0xe8, 0xe7, - 0x8c, 0x4e, 0x9d, 0x89, 0xe8, 0xee, 0x8f, 0x0a, 0x30, 0x70, 0x82, 0x5f, 0x5d, 0x3b, 0xf1, 0x6f, - 0x7a, 0xa2, 0x5b, 0x99, 0x2f, 0xae, 0xad, 0xd9, 0xbc, 0x4f, 0x72, 0xe0, 0xd7, 0xd6, 0xfe, 0x4b, - 0x01, 0xce, 0x4b, 0xd4, 0xf4, 0x9b, 0xf6, 0xec, 0x93, 0x41, 0xc7, 0xbf, 0xd0, 0x91, 0xb1, 0xd0, - 0x6b, 0xf6, 0xac, 0xd1, 0x85, 0xd9, 0xae, 0x4b, 0xfd, 0x5a, 0x66, 0xa9, 0xb1, 0x55, 0xae, 0x07, - 0x2f, 0xf6, 0x9f, 0x39, 0x30, 0x9e, 0xbf, 0xd8, 0x27, 0xf0, 0x91, 0xbb, 0x37, 0xcc, 0x8f, 0xdc, - 0xfd, 0x92, 0xbd, 0x2d, 0x66, 0x4e, 0xa5, 0xcb, 0xe7, 0xee, 0xfe, 0xc4, 0x81, 0xb3, 0xb2, 0x03, - 0x3b, 0x3d, 0x67, 0xfc, 0x80, 0x25, 0xa2, 0x1c, 0xff, 0x36, 0xbb, 0x6d, 0x6c, 0xb3, 0x97, 0xed, - 0x4d, 0x5c, 0x9f, 0x47, 0xd7, 0xef, 0xf5, 0xfe, 0xb1, 0x03, 0xa5, 0xbc, 0x0e, 0x27, 0xf0, 0xc8, - 0x5f, 0x37, 0x1f, 0xf9, 0x8d, 0xe3, 0x99, 0x79, 0xf7, 0x07, 0x5e, 0xea, 0xb6, 0x50, 0xa8, 0x21, - 0xf5, 0x2a, 0xc7, 0x56, 0x8c, 0x96, 0xb3, 0xc8, 0x57, 0xd0, 0x1a, 0xd0, 0x17, 0xb3, 0xac, 0x0d, - 0xb1, 0x05, 0xae, 0xda, 0xd0, 0xb6, 0x28, 0x3d, 0xe1, 0x63, 0x67, 0xff, 0x63, 0xc1, 0xc3, 0xfd, - 0x43, 0x07, 0x86, 0x4f, 0xf0, 0xe3, 0x95, 0xa1, 0xf9, 0x90, 0x5f, 0xb0, 0xf7, 0x90, 0xbb, 0x3c, - 0xd8, 0xbd, 0x22, 0x74, 0x7c, 0xcf, 0x0f, 0x7d, 0xc6, 0x51, 0x99, 0x1a, 0x3c, 0x9b, 0xed, 0x43, - 0xf6, 0xc6, 0x71, 0x94, 0x6a, 0x72, 0xe8, 0xeb, 0x99, 0x12, 0x7b, 0x05, 0x5b, 0x75, 0x6b, 0x3a, - 0x46, 0x73, 0x0f, 0xa5, 0xf6, 0xbe, 0xe2, 0x00, 0xf0, 0x71, 0x8a, 0x0a, 0xbd, 0x74, 0x6c, 0x1b, - 0xc7, 0xb6, 0x52, 0x94, 0x09, 0x1f, 0x9a, 0x12, 0x90, 0x69, 0x03, 0xd6, 0x46, 0x72, 0x1f, 0x35, - 0xf4, 0xee, 0xbb, 0x7c, 0xdf, 0x17, 0x1d, 0x38, 0x95, 0x19, 0x6e, 0x4e, 0xff, 0x4d, 0xf3, 0x3b, - 0x5f, 0x16, 0x74, 0x05, 0xb3, 0x6e, 0xab, 0xee, 0x0e, 0xf8, 0x23, 0x17, 0x8c, 0x0f, 0xa1, 0xa2, - 0xd7, 0x61, 0x50, 0xda, 0xf2, 0x72, 0x7b, 0xdb, 0xfc, 0xde, 0xa1, 0x52, 0xd8, 0x25, 0x24, 0xc6, - 0x29, 0xbf, 0x4c, 0x22, 0x58, 0xe1, 0x50, 0x89, 0x60, 0x0f, 0xf6, 0x6b, 0x89, 0xf9, 0x9e, 0xd6, - 0xde, 0x63, 0xf1, 0xb4, 0x3e, 0x6a, 0xdd, 0xd3, 0xfa, 0xd8, 0x09, 0x7b, 0x5a, 0xb5, 0xb0, 0x57, - 0xf1, 0x3e, 0xc2, 0x5e, 0xaf, 0xc3, 0xd9, 0xed, 0xd4, 0x8c, 0x52, 0x3b, 0x49, 0xd4, 0x68, 0xb9, - 0x94, 0xeb, 0x5f, 0xa5, 0x26, 0x61, 0x9c, 0x90, 0x20, 0xd1, 0x0c, 0xb0, 0x34, 0x07, 0xed, 0x46, - 0x0e, 0x39, 0x9c, 0xcb, 0x24, 0x1b, 0xbf, 0xe8, 0x3f, 0x44, 0xfc, 0xe2, 0x3b, 0x0e, 0x9c, 0xf3, - 0x3a, 0xae, 0x04, 0x61, 0xb2, 0x29, 0x92, 0x28, 0x6e, 0xda, 0xd3, 0xcb, 0x0d, 0xf2, 0x22, 0x50, - 0x94, 0xd7, 0x84, 0xf3, 0x07, 0x84, 0x9e, 0x4c, 0x83, 0xc9, 0x3c, 0x73, 0x31, 0x3f, 0xf2, 0xfb, - 0xf5, 0x6c, 0x86, 0x0a, 0xb0, 0xa5, 0xff, 0x88, 0x5d, 0xfb, 0xd1, 0x42, 0x96, 0xca, 0xd0, 0x7d, - 0x64, 0xa9, 0x64, 0x82, 0x49, 0xc3, 0x96, 0x82, 0x49, 0x01, 0x8c, 0xf9, 0x4d, 0xaf, 0x46, 0xd6, - 0xda, 0x8d, 0x06, 0xbf, 0xa3, 0x20, 0xbf, 0x48, 0x99, 0xeb, 0x93, 0x5a, 0x0e, 0x2b, 0x5e, 0x23, - 0xfb, 0xe1, 0x5f, 0x75, 0x17, 0x63, 0x31, 0x43, 0x09, 0x77, 0xd0, 0xa6, 0x1b, 0x96, 0x15, 0x0b, - 0x23, 0x09, 0x5d, 0x6d, 0x96, 0x0a, 0x31, 0xc0, 0x37, 0xec, 0xd5, 0x14, 0x8c, 0x75, 0x1c, 0xb4, - 0x04, 0x83, 0xd5, 0x20, 0x16, 0xb7, 0x1b, 0x4f, 0x31, 0x61, 0xf6, 0x4e, 0x2a, 0x02, 0xe7, 0xae, - 0x95, 0xd5, 0xbd, 0xc6, 0x47, 0x73, 0xea, 0xd0, 0xa9, 0x76, 0x9c, 0xf6, 0x47, 0x2b, 0x8c, 0x98, - 0xf8, 0xe4, 0x0f, 0xcf, 0x50, 0x78, 0xbc, 0x4b, 0x08, 0x64, 0xee, 0x9a, 0xfc, 0x68, 0xd1, 0x88, - 0x60, 0x27, 0xbe, 0xdd, 0x93, 0x52, 0xd0, 0xbe, 0x0c, 0x7a, 0xfa, 0xc0, 0x2f, 0x83, 0xb2, 0x02, - 0x94, 0x49, 0x43, 0x05, 0x3c, 0x2f, 0x5a, 0x2b, 0x40, 0x99, 0xe6, 0xfe, 0x89, 0x02, 0x94, 0x29, - 0x00, 0xeb, 0x2c, 0xd1, 0x6a, 0xb7, 0xc0, 0xef, 0x19, 0x26, 0x34, 0x8e, 0x1e, 0xc6, 0xd5, 0x23, - 0x80, 0x67, 0x0f, 0x8c, 0x00, 0x76, 0x44, 0x2c, 0xcf, 0x1d, 0x21, 0x62, 0x59, 0x67, 0xa5, 0x01, - 0x17, 0x66, 0x45, 0x90, 0xd8, 0x82, 0xc5, 0xc2, 0xca, 0x2e, 0xf0, 0x5c, 0x4a, 0xf6, 0x2f, 0xe6, - 0x0c, 0xba, 0xa6, 0x08, 0x5f, 0xb8, 0xe7, 0x14, 0x61, 0x2a, 0x9e, 0x53, 0x38, 0xab, 0x31, 0x59, - 0x14, 0xe2, 0x39, 0x05, 0x63, 0x1d, 0x27, 0x1b, 0xff, 0x7b, 0xf8, 0xd8, 0xe2, 0x7f, 0xe3, 0x27, - 0x10, 0xff, 0x7b, 0xe4, 0xd0, 0xf1, 0xbf, 0x37, 0xe0, 0x4c, 0x2b, 0xac, 0xce, 0xf9, 0x71, 0xd4, - 0x66, 0x97, 0xb6, 0x66, 0xda, 0xd5, 0x1a, 0x49, 0x58, 0x00, 0x71, 0xe8, 0xf2, 0x65, 0x7d, 0x90, - 0x2d, 0xf6, 0x22, 0x4f, 0x6e, 0x3f, 0xbb, 0x41, 0x12, 0xfe, 0x30, 0xb3, 0xbd, 0x98, 0x47, 0x80, - 0x25, 0x93, 0xe6, 0x34, 0xe2, 0x3c, 0x3e, 0x7a, 0xf8, 0xf1, 0xf1, 0x93, 0x09, 0x3f, 0x7e, 0x00, - 0x06, 0xe2, 0x7a, 0x3b, 0xa9, 0x86, 0x3b, 0x01, 0x8b, 0x31, 0x0f, 0xce, 0xbc, 0x43, 0x79, 0x68, - 0x05, 0xfc, 0xce, 0xde, 0xc4, 0x98, 0xfc, 0x5f, 0x73, 0xce, 0x0a, 0x08, 0xfa, 0x46, 0x97, 0x6b, - 0x29, 0xee, 0x71, 0x5e, 0x4b, 0xb9, 0x70, 0xa4, 0x2b, 0x29, 0x79, 0x31, 0xd6, 0x27, 0xde, 0x76, - 0x31, 0xd6, 0xaf, 0x39, 0x30, 0xb2, 0xad, 0x7b, 0xc2, 0x45, 0x1c, 0xd8, 0x42, 0x3e, 0x8a, 0xe1, - 0x60, 0x9f, 0x71, 0xa9, 0xb0, 0x33, 0x40, 0x77, 0xb2, 0x00, 0x6c, 0x8e, 0x24, 0x27, 0x57, 0xe6, - 0xc9, 0x07, 0x95, 0x2b, 0xf3, 0x06, 0x13, 0x66, 0xd2, 0xd2, 0x65, 0xc1, 0x61, 0xbb, 0xa9, 0xb2, - 0x52, 0x30, 0xaa, 0x4c, 0x59, 0x9d, 0x1f, 0xfa, 0x82, 0x03, 0x63, 0xd2, 0x38, 0x13, 0x91, 0xac, - 0x58, 0x24, 0xfb, 0xd9, 0xb4, 0x09, 0x59, 0xb6, 0xf8, 0x7a, 0x86, 0x0f, 0xee, 0xe0, 0x4c, 0x45, - 0xbb, 0xca, 0xad, 0xaa, 0xc5, 0x2c, 0xa7, 0x55, 0x28, 0x32, 0xd3, 0x29, 0x18, 0xeb, 0x38, 0xe8, - 0x9b, 0xea, 0x9b, 0xdf, 0x97, 0x98, 0x54, 0x7f, 0xc9, 0xb2, 0x82, 0x6a, 0xe3, 0xc3, 0xdf, 0xe8, - 0x4b, 0x0e, 0x8c, 0xed, 0x64, 0xbc, 0x1a, 0x22, 0xdb, 0x11, 0xdb, 0xf7, 0x97, 0xf0, 0xe5, 0xce, - 0x42, 0x71, 0xc7, 0x08, 0xd0, 0x6d, 0x00, 0x4f, 0x79, 0xbb, 0x45, 0x56, 0xe4, 0xb2, 0xcd, 0x08, - 0x02, 0xbf, 0xaf, 0x95, 0xfe, 0xc6, 0x1a, 0xbf, 0xfb, 0x4e, 0x74, 0x78, 0x5b, 0x7d, 0x4e, 0xfd, - 0x3f, 0x9f, 0x81, 0x51, 0x33, 0x48, 0x85, 0xde, 0x65, 0x16, 0xc5, 0xbf, 0x98, 0xad, 0x2f, 0x3e, - 0x22, 0xf1, 0x8d, 0x1a, 0xe3, 0x46, 0x11, 0xf0, 0xc2, 0xb1, 0x16, 0x01, 0xef, 0x39, 0x99, 0x22, - 0xe0, 0x63, 0xc7, 0x51, 0x04, 0xfc, 0xf4, 0x91, 0x8a, 0x80, 0x6b, 0x45, 0xd8, 0x7b, 0xef, 0x52, - 0x84, 0x7d, 0x1a, 0x4e, 0xc9, 0x4b, 0x2c, 0x44, 0x54, 0x77, 0xe6, 0xf1, 0xeb, 0x0b, 0xa2, 0xcb, - 0xa9, 0x59, 0xb3, 0x19, 0x67, 0xf1, 0xd1, 0xe7, 0x1d, 0x28, 0x06, 0xac, 0x67, 0x9f, 0xad, 0x2f, - 0xa2, 0x98, 0x5b, 0x8b, 0x59, 0xcd, 0x42, 0x28, 0xc9, 0xb4, 0xdd, 0x22, 0x83, 0xdd, 0x91, 0xff, - 0x60, 0x3e, 0x02, 0xf4, 0x0a, 0x94, 0xc2, 0xcd, 0xcd, 0x46, 0xe8, 0x55, 0xd3, 0x4a, 0xe5, 0x32, - 0xc0, 0xce, 0x2f, 0x21, 0xaa, 0x72, 0x9a, 0xab, 0x5d, 0xf0, 0x70, 0x57, 0x0a, 0xe8, 0x3b, 0x54, - 0x15, 0x49, 0xc2, 0x88, 0x54, 0x53, 0x17, 0xcd, 0x20, 0x9b, 0x33, 0xb1, 0x3e, 0xe7, 0xb2, 0xc9, - 0x87, 0xcf, 0x5e, 0x3d, 0x94, 0x4c, 0x2b, 0xce, 0x0e, 0x0b, 0x45, 0x70, 0xbe, 0x95, 0xe7, 0x21, - 0x8a, 0xc5, 0xd5, 0x9b, 0x83, 0xfc, 0x54, 0xf2, 0xd5, 0x3d, 0x9f, 0xeb, 0x63, 0x8a, 0x71, 0x17, - 0xca, 0x7a, 0x0d, 0xf3, 0x81, 0x93, 0xa9, 0x61, 0xfe, 0x71, 0x80, 0x8a, 0x2c, 0x08, 0x25, 0x7d, - 0x0e, 0x4b, 0x56, 0xee, 0x84, 0x70, 0x9a, 0xda, 0x07, 0x13, 0x15, 0x1b, 0xac, 0xb1, 0x44, 0xff, - 0x3b, 0xb7, 0xdc, 0x3e, 0x77, 0xac, 0xd4, 0xac, 0xef, 0x89, 0xb7, 0x5d, 0xc9, 0xfd, 0x7f, 0xe8, - 0xc0, 0x38, 0xdf, 0x79, 0x59, 0x75, 0x9e, 0x2a, 0x13, 0xe2, 0x92, 0x8a, 0xed, 0x1c, 0x0c, 0x96, - 0x8e, 0x56, 0x36, 0xb8, 0xb2, 0x88, 0xed, 0x01, 0x23, 0x41, 0x5f, 0xc9, 0x31, 0x22, 0x4e, 0xd9, - 0x72, 0x55, 0xe6, 0x97, 0x6a, 0x3f, 0xb3, 0x7f, 0x18, 0xbb, 0xe1, 0x9f, 0x74, 0xf5, 0xa4, 0x22, - 0x36, 0xbc, 0xbf, 0x76, 0x4c, 0x9e, 0x54, 0xbd, 0x9e, 0xfc, 0x91, 0xfc, 0xa9, 0x5f, 0x74, 0x60, - 0xcc, 0xcb, 0xe4, 0x4c, 0x30, 0xf7, 0x8f, 0x15, 0x57, 0xd4, 0x74, 0x94, 0x26, 0x62, 0x30, 0xb5, - 0x2e, 0x9b, 0x9e, 0x81, 0x3b, 0x98, 0x8f, 0x7f, 0xc6, 0xe1, 0x1f, 0xa1, 0xe9, 0xaa, 0x17, 0x6d, - 0x98, 0x7a, 0xd1, 0xb2, 0xcd, 0xcf, 0x60, 0xe8, 0x0a, 0xda, 0xaf, 0x3a, 0x70, 0x36, 0x4f, 0x6c, - 0xe7, 0x0c, 0xe9, 0x23, 0xe6, 0x90, 0x2c, 0x1a, 0x1f, 0xfa, 0x80, 0xec, 0xd4, 0xfe, 0xff, 0xe3, - 0x41, 0x2d, 0xa2, 0x96, 0x90, 0x96, 0xf5, 0x0c, 0xdb, 0x00, 0xfa, 0xfc, 0xa0, 0xe1, 0x07, 0x44, - 0xdc, 0xa6, 0xb3, 0x69, 0x8a, 0x89, 0x6f, 0x6d, 0x50, 0xea, 0x58, 0x70, 0x79, 0xc0, 0x01, 0xb6, - 0xec, 0x77, 0x84, 0x7a, 0x4f, 0xfe, 0x3b, 0x42, 0x3b, 0x30, 0xb8, 0xe3, 0x27, 0x75, 0x96, 0x18, - 0x20, 0xe2, 0x56, 0x16, 0x6e, 0xa1, 0x51, 0x72, 0xe9, 0xdc, 0x6f, 0x4a, 0x06, 0x38, 0xe5, 0x85, - 0xa6, 0x38, 0x63, 0x96, 0x57, 0x9b, 0x4d, 0x78, 0xbc, 0x29, 0x1b, 0x70, 0x8a, 0x43, 0x17, 0x6b, - 0x98, 0xfe, 0x92, 0xd5, 0x66, 0x44, 0x45, 0x52, 0x1b, 0x95, 0xe6, 0x04, 0x45, 0x7e, 0xd7, 0xf3, - 0xa6, 0xc6, 0x03, 0x1b, 0x1c, 0x55, 0x51, 0xd8, 0x81, 0xae, 0x45, 0x61, 0x6f, 0x33, 0x2d, 0x24, - 0xf1, 0x83, 0x36, 0x59, 0x0d, 0x44, 0x36, 0xee, 0xb2, 0x9d, 0x9b, 0xa9, 0x9c, 0x26, 0xb7, 0x2b, - 0xd3, 0xdf, 0x58, 0xe3, 0xa7, 0x85, 0x0f, 0x86, 0x0e, 0x0c, 0x1f, 0xa4, 0x9e, 0x83, 0x61, 0xeb, - 0x9e, 0x83, 0x84, 0xb4, 0xac, 0x78, 0x0e, 0xde, 0x56, 0x36, 0xee, 0x9f, 0x39, 0x80, 0x94, 0x32, - 0xe1, 0xc5, 0x5b, 0xe2, 0xe3, 0x6f, 0xc7, 0x9f, 0xf2, 0xf6, 0x09, 0x07, 0x20, 0x50, 0x5f, 0x9b, - 0xb3, 0x7b, 0x6a, 0x71, 0x9a, 0xe9, 0x00, 0x52, 0x18, 0xd6, 0x78, 0xba, 0xff, 0xdd, 0x49, 0x33, - 0x4b, 0xd3, 0xb9, 0x9f, 0x40, 0x42, 0xd4, 0xae, 0x99, 0x10, 0xb5, 0x6e, 0xd1, 0x03, 0xad, 0xa6, - 0xd1, 0x25, 0x35, 0xea, 0xa7, 0x05, 0x38, 0xa5, 0x23, 0x97, 0xc9, 0x49, 0x3c, 0xec, 0x1d, 0x23, - 0xbf, 0xf1, 0xba, 0xdd, 0xf9, 0x96, 0x45, 0x20, 0x23, 0x2f, 0x97, 0xf6, 0xe3, 0x99, 0x5c, 0xda, - 0x9b, 0xf6, 0x59, 0x1f, 0x9c, 0x50, 0xfb, 0x5f, 0x1d, 0x38, 0x93, 0xe9, 0x71, 0x02, 0x1b, 0x6c, - 0xdb, 0xdc, 0x60, 0x2f, 0x5a, 0x9f, 0x75, 0x97, 0xdd, 0xf5, 0xad, 0x42, 0xc7, 0x6c, 0x99, 0x65, - 0xf2, 0x69, 0x07, 0x8a, 0x89, 0x17, 0x6f, 0xc9, 0xdc, 0xa4, 0x8f, 0x1c, 0xcb, 0x0e, 0x98, 0xa4, - 0xff, 0x0b, 0xe9, 0xac, 0xc6, 0xc7, 0x60, 0x98, 0x73, 0x1f, 0xff, 0x94, 0x03, 0x90, 0x22, 0x3d, - 0x28, 0x95, 0xd5, 0xfd, 0x6e, 0x01, 0xce, 0xe5, 0x6e, 0x23, 0xf4, 0x59, 0xe5, 0x66, 0x72, 0x6c, - 0x67, 0xde, 0x19, 0x8c, 0x74, 0x6f, 0xd3, 0x88, 0xe1, 0x6d, 0x12, 0x4e, 0xa6, 0x07, 0x65, 0x70, - 0x08, 0x31, 0xad, 0x2d, 0xd6, 0x4f, 0x9c, 0x34, 0x99, 0x53, 0x55, 0x9d, 0xf9, 0x73, 0x78, 0xc5, - 0xc2, 0xfd, 0xa9, 0x96, 0x7f, 0x2e, 0x27, 0x7a, 0x02, 0xb2, 0x62, 0xc7, 0x94, 0x15, 0xd8, 0x7e, - 0x38, 0xb4, 0x8b, 0xb0, 0xf8, 0x28, 0xe4, 0xc5, 0x47, 0x0f, 0x57, 0xb2, 0xce, 0xb8, 0xac, 0x58, - 0x38, 0xf4, 0x65, 0xc5, 0x11, 0x18, 0x7a, 0xd9, 0x6f, 0xa9, 0x50, 0xde, 0xe4, 0xf7, 0x7f, 0x7c, - 0xf1, 0xa1, 0x1f, 0xfc, 0xf8, 0xe2, 0x43, 0x3f, 0xfa, 0xf1, 0xc5, 0x87, 0x3e, 0xb1, 0x7f, 0xd1, - 0xf9, 0xfe, 0xfe, 0x45, 0xe7, 0x07, 0xfb, 0x17, 0x9d, 0x1f, 0xed, 0x5f, 0x74, 0xfe, 0xc3, 0xfe, - 0x45, 0xe7, 0x6f, 0xfd, 0xd1, 0xc5, 0x87, 0x5e, 0x1e, 0x90, 0x13, 0xfb, 0x7f, 0x01, 0x00, 0x00, - 0xff, 0xff, 0xd2, 0xea, 0x98, 0x1a, 0xa3, 0xcf, 0x00, 0x00, + // 10913 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x7d, 0x6d, 0x70, 0x24, 0xc7, + 0x75, 0x18, 0x67, 0x81, 0xc5, 0xc7, 0xc3, 0xc7, 0xe1, 0xfa, 0xbe, 0x96, 0x20, 0x79, 0xa0, 0x87, + 0x22, 0x43, 0xda, 0x14, 0xce, 0x3c, 0x4a, 0x09, 0x23, 0x25, 0x92, 0xf0, 0x71, 0xc0, 0x1d, 0x01, + 0x1c, 0xc0, 0x5e, 0x1c, 0xcf, 0xa4, 0x68, 0x49, 0x83, 0xdd, 0xc6, 0xee, 0x10, 0xbb, 0x33, 0xcb, + 0x99, 0x59, 0xe0, 0xc0, 0x0f, 0x49, 0xa1, 0xf5, 0x19, 0xcb, 0x56, 0x22, 0x4b, 0xb2, 0xa4, 0x24, + 0x55, 0x8a, 0x22, 0x25, 0x2c, 0xd9, 0x15, 0x97, 0xfd, 0x2b, 0x65, 0xff, 0x4b, 0xa5, 0x5c, 0x4a, + 0x39, 0x55, 0x91, 0xcb, 0x4a, 0x49, 0x3f, 0x6c, 0x30, 0x42, 0x12, 0xfd, 0x48, 0xa2, 0xaa, 0x44, + 0x15, 0x3b, 0xf6, 0xe5, 0xa3, 0x5c, 0xfd, 0x39, 0xdd, 0xb3, 0xb3, 0xb8, 0x05, 0xae, 0x81, 0x53, + 0xd9, 0xbf, 0x80, 0x7d, 0xfd, 0xfa, 0xbd, 0xee, 0x9e, 0xee, 0xd7, 0xaf, 0xdf, 0x7b, 0xfd, 0x1a, + 0xd6, 0x6a, 0x7e, 0x52, 0x6f, 0x6f, 0x4c, 0x57, 0xc2, 0xe6, 0x25, 0x2f, 0xaa, 0x85, 0xad, 0x28, + 0x7c, 0x99, 0xfd, 0xf3, 0xce, 0x9d, 0x30, 0xda, 0xda, 0x6c, 0x84, 0x3b, 0xf1, 0xa5, 0xed, 0xa7, + 0x2f, 0xb5, 0xb6, 0x6a, 0x97, 0xbc, 0x96, 0x1f, 0x5f, 0x92, 0xd0, 0x4b, 0xdb, 0x4f, 0x79, 0x8d, + 0x56, 0xdd, 0x7b, 0xea, 0x52, 0x8d, 0x04, 0x24, 0xf2, 0x12, 0x52, 0x9d, 0x6e, 0x45, 0x61, 0x12, + 0xa2, 0x0f, 0xa4, 0x14, 0xa7, 0x25, 0x45, 0xf6, 0xcf, 0x87, 0x15, 0xc5, 0xe9, 0xed, 0xa7, 0xa7, + 0x5b, 0x5b, 0xb5, 0x69, 0x4a, 0x71, 0x5a, 0x42, 0xa7, 0x25, 0xc5, 0xc9, 0x77, 0x6a, 0x6d, 0xaa, + 0x85, 0xb5, 0xf0, 0x12, 0x23, 0xbc, 0xd1, 0xde, 0x64, 0xbf, 0xd8, 0x0f, 0xf6, 0x1f, 0x67, 0x38, + 0xe9, 0x6e, 0x3d, 0x13, 0x4f, 0xfb, 0x21, 0x6d, 0xdf, 0xa5, 0x4a, 0x18, 0x91, 0x4b, 0xdb, 0x1d, + 0x8d, 0x9a, 0x7c, 0x87, 0x86, 0xd3, 0x0a, 0x1b, 0x7e, 0x65, 0x37, 0x0f, 0xeb, 0x5d, 0x29, 0x56, + 0xd3, 0xab, 0xd4, 0xfd, 0x80, 0x44, 0xbb, 0x69, 0xd7, 0x9b, 0x24, 0xf1, 0xf2, 0x6a, 0x5d, 0xea, + 0x56, 0x2b, 0x6a, 0x07, 0x89, 0xdf, 0x24, 0x1d, 0x15, 0xfe, 0xe6, 0x9d, 0x2a, 0xc4, 0x95, 0x3a, + 0x69, 0x7a, 0x1d, 0xf5, 0x9e, 0xee, 0x56, 0xaf, 0x9d, 0xf8, 0x8d, 0x4b, 0x7e, 0x90, 0xc4, 0x49, + 0x94, 0xad, 0xe4, 0x5e, 0x81, 0x81, 0x99, 0x66, 0xd8, 0x0e, 0x12, 0xf4, 0x5e, 0x28, 0x6e, 0x7b, + 0x8d, 0x36, 0x29, 0x39, 0x0f, 0x3b, 0x8f, 0x0f, 0xcf, 0x3e, 0xfa, 0x9d, 0xbd, 0xa9, 0xfb, 0xf6, + 0xf7, 0xa6, 0x8a, 0xcf, 0x53, 0xe0, 0xed, 0xbd, 0xa9, 0xb3, 0x24, 0xa8, 0x84, 0x55, 0x3f, 0xa8, + 0x5d, 0x7a, 0x39, 0x0e, 0x83, 0xe9, 0xeb, 0xed, 0xe6, 0x06, 0x89, 0x30, 0xaf, 0xe3, 0xfe, 0x51, + 0x01, 0x4e, 0xcd, 0x44, 0x95, 0xba, 0xbf, 0x4d, 0xca, 0x09, 0xa5, 0x5f, 0xdb, 0x45, 0x75, 0xe8, + 0x4b, 0xbc, 0x88, 0x91, 0x1b, 0xb9, 0xbc, 0x32, 0x7d, 0xb7, 0xdf, 0x7d, 0x7a, 0xdd, 0x8b, 0x24, + 0xed, 0xd9, 0xc1, 0xfd, 0xbd, 0xa9, 0xbe, 0x75, 0x2f, 0xc2, 0x94, 0x05, 0x6a, 0x40, 0x7f, 0x10, + 0x06, 0xa4, 0x54, 0x60, 0xac, 0xae, 0xdf, 0x3d, 0xab, 0xeb, 0x61, 0xa0, 0xfa, 0x31, 0x3b, 0xb4, + 0xbf, 0x37, 0xd5, 0x4f, 0x21, 0x98, 0x71, 0xa1, 0xfd, 0x7a, 0xd5, 0x6f, 0x95, 0xfa, 0x6c, 0xf5, + 0xeb, 0x45, 0xbf, 0x65, 0xf6, 0xeb, 0x45, 0xbf, 0x85, 0x29, 0x0b, 0xf7, 0xb3, 0x05, 0x18, 0x9e, + 0x89, 0x6a, 0xed, 0x26, 0x09, 0x92, 0x18, 0x7d, 0x0c, 0xa0, 0xe5, 0x45, 0x5e, 0x93, 0x24, 0x24, + 0x8a, 0x4b, 0xce, 0xc3, 0x7d, 0x8f, 0x8f, 0x5c, 0x5e, 0xba, 0x7b, 0xf6, 0x6b, 0x92, 0xe6, 0x2c, + 0x12, 0x9f, 0x1c, 0x14, 0x28, 0xc6, 0x1a, 0x4b, 0xf4, 0x1a, 0x0c, 0x7b, 0x51, 0xe2, 0x6f, 0x7a, + 0x95, 0x24, 0x2e, 0x15, 0x18, 0xff, 0x67, 0xef, 0x9e, 0xff, 0x8c, 0x20, 0x39, 0x7b, 0x5a, 0xb0, + 0x1f, 0x96, 0x90, 0x18, 0xa7, 0xfc, 0xdc, 0xdf, 0xed, 0x87, 0x91, 0x99, 0x28, 0x59, 0x9c, 0x2b, + 0x27, 0x5e, 0xd2, 0x8e, 0xd1, 0x1f, 0x38, 0x70, 0x26, 0xe6, 0xc3, 0xe6, 0x93, 0x78, 0x2d, 0x0a, + 0x2b, 0x24, 0x8e, 0x49, 0x55, 0x8c, 0xcb, 0xa6, 0x95, 0x76, 0x49, 0x66, 0xd3, 0xe5, 0x4e, 0x46, + 0x57, 0x82, 0x24, 0xda, 0x9d, 0x7d, 0x4a, 0xb4, 0xf9, 0x4c, 0x0e, 0xc6, 0x9b, 0x6f, 0x4f, 0x21, + 0xd9, 0x15, 0x4a, 0x89, 0x7f, 0x62, 0x9c, 0xd7, 0x6a, 0xf4, 0x55, 0x07, 0x46, 0x5b, 0x61, 0x35, + 0xc6, 0xa4, 0x12, 0xb6, 0x5b, 0xa4, 0x2a, 0x86, 0xf7, 0xc3, 0x76, 0xbb, 0xb1, 0xa6, 0x71, 0xe0, + 0xed, 0x3f, 0x2b, 0xda, 0x3f, 0xaa, 0x17, 0x61, 0xa3, 0x29, 0xe8, 0x19, 0x18, 0x0d, 0xc2, 0xa4, + 0xdc, 0x22, 0x15, 0x7f, 0xd3, 0x27, 0x55, 0x36, 0xf1, 0x87, 0xd2, 0x9a, 0xd7, 0xb5, 0x32, 0x6c, + 0x60, 0x4e, 0x2e, 0x40, 0xa9, 0xdb, 0xc8, 0xa1, 0x09, 0xe8, 0xdb, 0x22, 0xbb, 0x5c, 0xd8, 0x60, + 0xfa, 0x2f, 0x3a, 0x2b, 0x05, 0x10, 0x5d, 0xc6, 0x43, 0x42, 0xb2, 0xbc, 0xa7, 0xf0, 0x8c, 0x33, + 0xf9, 0x7e, 0x38, 0xdd, 0xd1, 0xf4, 0xc3, 0x10, 0x70, 0xbf, 0x3b, 0x00, 0x43, 0xf2, 0x53, 0xa0, + 0x87, 0xa1, 0x3f, 0xf0, 0x9a, 0x52, 0xce, 0x8d, 0x8a, 0x7e, 0xf4, 0x5f, 0xf7, 0x9a, 0x74, 0x85, + 0x7b, 0x4d, 0x42, 0x31, 0x5a, 0x5e, 0x52, 0x67, 0x74, 0x34, 0x8c, 0x35, 0x2f, 0xa9, 0x63, 0x56, + 0x82, 0x1e, 0x84, 0xfe, 0x66, 0x58, 0x25, 0x6c, 0x2c, 0x8a, 0x5c, 0x42, 0xac, 0x84, 0x55, 0x82, + 0x19, 0x94, 0xd6, 0xdf, 0x8c, 0xc2, 0x66, 0xa9, 0xdf, 0xac, 0xbf, 0x10, 0x85, 0x4d, 0xcc, 0x4a, + 0xd0, 0x57, 0x1c, 0x98, 0x90, 0x73, 0x7b, 0x39, 0xac, 0x78, 0x89, 0x1f, 0x06, 0xa5, 0x22, 0x93, + 0x28, 0xd8, 0xde, 0x92, 0x92, 0x94, 0x67, 0x4b, 0xa2, 0x09, 0x13, 0xd9, 0x12, 0xdc, 0xd1, 0x0a, + 0x74, 0x19, 0xa0, 0xd6, 0x08, 0x37, 0xbc, 0x06, 0x1d, 0x90, 0xd2, 0x00, 0xeb, 0x82, 0x92, 0x0c, + 0x8b, 0xaa, 0x04, 0x6b, 0x58, 0xe8, 0x16, 0x0c, 0x7a, 0x5c, 0xfa, 0x97, 0x06, 0x59, 0x27, 0x9e, + 0xb3, 0xd1, 0x09, 0x63, 0x3b, 0x99, 0x1d, 0xd9, 0xdf, 0x9b, 0x1a, 0x14, 0x40, 0x2c, 0xd9, 0xa1, + 0x27, 0x61, 0x28, 0x6c, 0xd1, 0x76, 0x7b, 0x8d, 0xd2, 0x10, 0x9b, 0x98, 0x13, 0xa2, 0xad, 0x43, + 0xab, 0x02, 0x8e, 0x15, 0x06, 0x7a, 0x02, 0x06, 0xe3, 0xf6, 0x06, 0xfd, 0x8e, 0xa5, 0x61, 0xd6, + 0xb1, 0x53, 0x02, 0x79, 0xb0, 0xcc, 0xc1, 0x58, 0x96, 0xa3, 0x77, 0xc3, 0x48, 0x44, 0x2a, 0xed, + 0x28, 0x26, 0xf4, 0xc3, 0x96, 0x80, 0xd1, 0x3e, 0x23, 0xd0, 0x47, 0x70, 0x5a, 0x84, 0x75, 0x3c, + 0xf4, 0x3e, 0x18, 0xa7, 0x1f, 0xf8, 0xca, 0xad, 0x56, 0x44, 0xe2, 0x98, 0x7e, 0xd5, 0x11, 0xc6, + 0xe8, 0xbc, 0xa8, 0x39, 0xbe, 0x60, 0x94, 0xe2, 0x0c, 0x36, 0x7a, 0x1d, 0xc0, 0x53, 0x32, 0xa3, + 0x34, 0xca, 0x06, 0x73, 0xd9, 0xde, 0x8c, 0x58, 0x9c, 0x9b, 0x1d, 0xa7, 0xdf, 0x31, 0xfd, 0x8d, + 0x35, 0x7e, 0x74, 0x7c, 0xaa, 0xa4, 0x41, 0x12, 0x52, 0x2d, 0x8d, 0xb1, 0x0e, 0xab, 0xf1, 0x99, + 0xe7, 0x60, 0x2c, 0xcb, 0xdd, 0x7f, 0x54, 0x00, 0x8d, 0x0a, 0x9a, 0x85, 0x21, 0x21, 0xd7, 0xc4, + 0x92, 0x9c, 0x7d, 0x4c, 0x7e, 0x07, 0xf9, 0x05, 0x6f, 0xef, 0xe5, 0xca, 0x43, 0x55, 0x0f, 0xbd, + 0x01, 0x23, 0xad, 0xb0, 0xba, 0x42, 0x12, 0xaf, 0xea, 0x25, 0x9e, 0xd8, 0xcd, 0x2d, 0xec, 0x30, + 0x92, 0xe2, 0xec, 0x29, 0xfa, 0xe9, 0xd6, 0x52, 0x16, 0x58, 0xe7, 0x87, 0x9e, 0x05, 0x14, 0x93, + 0x68, 0xdb, 0xaf, 0x90, 0x99, 0x4a, 0x85, 0xaa, 0x44, 0x6c, 0x01, 0xf4, 0xb1, 0xce, 0x4c, 0x8a, + 0xce, 0xa0, 0x72, 0x07, 0x06, 0xce, 0xa9, 0xe5, 0x7e, 0xaf, 0x00, 0xe3, 0x5a, 0x5f, 0x5b, 0xa4, + 0x82, 0xde, 0x72, 0xe0, 0x94, 0xda, 0xce, 0x66, 0x77, 0xaf, 0xd3, 0x59, 0xc5, 0x37, 0x2b, 0x62, + 0xf3, 0xfb, 0x52, 0x5e, 0xea, 0xa7, 0xe0, 0xc3, 0x65, 0xfd, 0x05, 0xd1, 0x87, 0x53, 0x99, 0x52, + 0x9c, 0x6d, 0xd6, 0xe4, 0x97, 0x1d, 0x38, 0x9b, 0x47, 0x22, 0x47, 0xe6, 0xd6, 0x75, 0x99, 0x6b, + 0x55, 0x78, 0x51, 0xae, 0xb4, 0x33, 0xba, 0x1c, 0xff, 0xff, 0x05, 0x98, 0xd0, 0xa7, 0x10, 0xd3, + 0x04, 0xfe, 0xb5, 0x03, 0xe7, 0x64, 0x0f, 0x30, 0x89, 0xdb, 0x8d, 0xcc, 0xf0, 0x36, 0xad, 0x0e, + 0x2f, 0xdf, 0x49, 0x67, 0xf2, 0xf8, 0xf1, 0x61, 0x7e, 0x48, 0x0c, 0xf3, 0xb9, 0x5c, 0x1c, 0x9c, + 0xdf, 0xd4, 0xc9, 0x6f, 0x3a, 0x30, 0xd9, 0x9d, 0x68, 0xce, 0xc0, 0xb7, 0xcc, 0x81, 0x7f, 0xd1, + 0x5e, 0x27, 0x39, 0x7b, 0x36, 0xfc, 0xac, 0xb3, 0xfa, 0x07, 0xf8, 0xcd, 0x21, 0xe8, 0xd8, 0x43, + 0xd0, 0x53, 0x30, 0x22, 0xc4, 0xf1, 0x72, 0x58, 0x8b, 0x59, 0x23, 0x87, 0xf8, 0x5a, 0x9b, 0x49, + 0xc1, 0x58, 0xc7, 0x41, 0x55, 0x28, 0xc4, 0x4f, 0x8b, 0xa6, 0x5b, 0x10, 0x6f, 0xe5, 0xa7, 0x95, + 0x16, 0x39, 0xb0, 0xbf, 0x37, 0x55, 0x28, 0x3f, 0x8d, 0x0b, 0xf1, 0xd3, 0x54, 0x53, 0xaf, 0xf9, + 0x89, 0x3d, 0x4d, 0x7d, 0xd1, 0x4f, 0x14, 0x1f, 0xa6, 0xa9, 0x2f, 0xfa, 0x09, 0xa6, 0x2c, 0xe8, + 0x09, 0xa4, 0x9e, 0x24, 0x2d, 0xb6, 0xe3, 0x5b, 0x39, 0x81, 0x5c, 0x5d, 0x5f, 0x5f, 0x53, 0xbc, + 0x98, 0x7e, 0x41, 0x21, 0x98, 0x71, 0x41, 0x9f, 0x71, 0xe8, 0x88, 0xf3, 0xc2, 0x30, 0xda, 0x15, + 0x8a, 0xc3, 0x0d, 0x7b, 0x53, 0x20, 0x8c, 0x76, 0x15, 0x73, 0xf1, 0x21, 0x55, 0x01, 0xd6, 0x59, + 0xb3, 0x8e, 0x57, 0x37, 0x63, 0xa6, 0x27, 0xd8, 0xe9, 0xf8, 0xfc, 0x42, 0x39, 0xd3, 0xf1, 0xf9, + 0x85, 0x32, 0x66, 0x5c, 0xe8, 0x07, 0x8d, 0xbc, 0x1d, 0xa1, 0x63, 0x58, 0xf8, 0xa0, 0xd8, 0xdb, + 0x31, 0x3f, 0x28, 0xf6, 0x76, 0x30, 0x65, 0x41, 0x39, 0x85, 0x71, 0xcc, 0x54, 0x0a, 0x2b, 0x9c, + 0x56, 0xcb, 0x65, 0x93, 0xd3, 0x6a, 0xb9, 0x8c, 0x29, 0x0b, 0x36, 0x49, 0x2b, 0x31, 0xd3, 0x47, + 0xec, 0x4c, 0xd2, 0xb9, 0x0c, 0xa7, 0xc5, 0xb9, 0x32, 0xa6, 0x2c, 0xa8, 0xc8, 0xf0, 0x5e, 0x6d, + 0x47, 0x5c, 0x99, 0x19, 0xb9, 0xbc, 0x6a, 0x61, 0xbe, 0x50, 0x72, 0x8a, 0xdb, 0xf0, 0xfe, 0xde, + 0x54, 0x91, 0x81, 0x30, 0x67, 0xe4, 0xfe, 0x7e, 0x5f, 0x2a, 0x2e, 0xa4, 0x3c, 0x47, 0xff, 0x90, + 0x6d, 0x84, 0x42, 0x16, 0x08, 0xd5, 0xd7, 0x39, 0x36, 0xd5, 0xf7, 0x0c, 0xdf, 0xf1, 0x0c, 0x76, + 0x38, 0xcb, 0x1f, 0x7d, 0xc1, 0xe9, 0x3c, 0xdb, 0x7a, 0xf6, 0xf7, 0xb2, 0x74, 0x63, 0xe6, 0x7b, + 0xc5, 0x81, 0x47, 0xde, 0xc9, 0xcf, 0x38, 0xa9, 0x12, 0x11, 0x77, 0xdb, 0x07, 0x3e, 0x62, 0xee, + 0x03, 0x16, 0x0f, 0xe4, 0xba, 0xdc, 0xff, 0xac, 0x03, 0x63, 0x12, 0x4e, 0xd5, 0xe3, 0x18, 0xdd, + 0x82, 0x21, 0xd9, 0x52, 0xf1, 0xf5, 0x6c, 0xda, 0x02, 0x94, 0x12, 0xaf, 0x1a, 0xa3, 0xb8, 0xb9, + 0x6f, 0x0d, 0x00, 0x4a, 0xf7, 0xaa, 0x56, 0x18, 0xfb, 0x4c, 0x12, 0x1d, 0x61, 0x17, 0x0a, 0xb4, + 0x5d, 0xe8, 0x79, 0x9b, 0xbb, 0x50, 0xda, 0x2c, 0x63, 0x3f, 0xfa, 0x42, 0x46, 0x6e, 0xf3, 0x8d, + 0xe9, 0xc3, 0xc7, 0x22, 0xb7, 0xb5, 0x26, 0x1c, 0x2c, 0xc1, 0xb7, 0x85, 0x04, 0xe7, 0x5b, 0xd7, + 0x2f, 0xd8, 0x95, 0xe0, 0x5a, 0x2b, 0xb2, 0xb2, 0x3c, 0xe2, 0x12, 0x96, 0xef, 0x5d, 0x37, 0xad, + 0x4a, 0x58, 0x8d, 0xab, 0x29, 0x6b, 0x23, 0x2e, 0x6b, 0x07, 0x6c, 0xf1, 0xd4, 0x64, 0x6d, 0x96, + 0xa7, 0x92, 0xba, 0xaf, 0x4a, 0xa9, 0xcb, 0x77, 0xad, 0x17, 0x2c, 0x4b, 0x5d, 0x8d, 0x6f, 0xa7, + 0xfc, 0x7d, 0x05, 0xce, 0x75, 0xe2, 0x61, 0xb2, 0x89, 0x2e, 0xc1, 0x70, 0x25, 0x0c, 0x36, 0xfd, + 0xda, 0x8a, 0xd7, 0x12, 0xe7, 0x35, 0x25, 0x8b, 0xe6, 0x64, 0x01, 0x4e, 0x71, 0xd0, 0x43, 0x5c, + 0xf0, 0x70, 0x8b, 0xc8, 0x88, 0x40, 0xed, 0x5b, 0x22, 0xbb, 0x4c, 0x0a, 0xbd, 0x67, 0xe8, 0x2b, + 0x5f, 0x9f, 0xba, 0xef, 0xe3, 0x7f, 0xfc, 0xf0, 0x7d, 0xee, 0x1f, 0xf6, 0xc1, 0x03, 0xb9, 0x3c, + 0x85, 0xb6, 0xfe, 0x9b, 0x86, 0xb6, 0xae, 0x95, 0x0b, 0x29, 0x72, 0xd3, 0xa6, 0x22, 0xab, 0x91, + 0xcf, 0xd3, 0xcb, 0xb5, 0x62, 0x9c, 0xdf, 0x28, 0x3a, 0x50, 0x81, 0xd7, 0x24, 0x71, 0xcb, 0xab, + 0x10, 0xd1, 0x7b, 0x35, 0x50, 0xd7, 0x65, 0x01, 0x4e, 0x71, 0xf8, 0x11, 0x7a, 0xd3, 0x6b, 0x37, + 0x12, 0x61, 0x28, 0xd3, 0x8e, 0xd0, 0x0c, 0x8c, 0x65, 0x39, 0xfa, 0xc7, 0x0e, 0xa0, 0x4e, 0xae, + 0x62, 0x21, 0xae, 0x1f, 0xc7, 0x38, 0xcc, 0x9e, 0xdf, 0xd7, 0x0e, 0xe1, 0x5a, 0x4f, 0x73, 0xda, + 0xa1, 0x7d, 0xd3, 0x8f, 0xa6, 0xfb, 0x10, 0x3f, 0x1c, 0xf4, 0x60, 0x43, 0x63, 0xa6, 0x96, 0x4a, + 0x85, 0xc4, 0x31, 0x37, 0xc7, 0xe9, 0xa6, 0x16, 0x06, 0xc6, 0xb2, 0x1c, 0x4d, 0x41, 0x91, 0x44, + 0x51, 0x18, 0x89, 0xb3, 0x36, 0x9b, 0xc6, 0x57, 0x28, 0x00, 0x73, 0xb8, 0xfb, 0xa3, 0x02, 0x94, + 0xba, 0x9d, 0x4e, 0xd0, 0xef, 0x68, 0xe7, 0x6a, 0x71, 0x72, 0x12, 0x07, 0xbf, 0xf0, 0xf8, 0xce, + 0x44, 0xd9, 0x03, 0x60, 0x97, 0x13, 0xb6, 0x28, 0xc5, 0xd9, 0x06, 0x4e, 0x7e, 0x51, 0x3b, 0x61, + 0xeb, 0x24, 0x72, 0x36, 0xf8, 0x4d, 0x73, 0x83, 0x5f, 0xb3, 0xdd, 0x29, 0x7d, 0x9b, 0xff, 0x93, + 0x22, 0x9c, 0x91, 0xa5, 0x65, 0x42, 0xb7, 0xca, 0xe7, 0xda, 0x24, 0xda, 0x45, 0xdf, 0x77, 0xe0, + 0xac, 0x97, 0x35, 0xdd, 0xf8, 0xe4, 0x18, 0x06, 0x5a, 0xe3, 0x3a, 0x3d, 0x93, 0xc3, 0x91, 0x0f, + 0xf4, 0x65, 0x31, 0xd0, 0x67, 0xf3, 0x50, 0xba, 0xd8, 0xdd, 0x73, 0x3b, 0x80, 0x9e, 0x81, 0x51, + 0x09, 0x67, 0xe6, 0x1e, 0xbe, 0xc4, 0x95, 0x71, 0x7b, 0x46, 0x2b, 0xc3, 0x06, 0x26, 0xad, 0x99, + 0x90, 0x66, 0xab, 0xe1, 0x25, 0x44, 0x33, 0x14, 0xa9, 0x9a, 0xeb, 0x5a, 0x19, 0x36, 0x30, 0xd1, + 0x63, 0x30, 0x10, 0x84, 0x55, 0x72, 0xad, 0x2a, 0x0c, 0xc4, 0xe3, 0xa2, 0xce, 0xc0, 0x75, 0x06, + 0xc5, 0xa2, 0x14, 0x3d, 0x9a, 0x5a, 0xe3, 0x8a, 0x6c, 0x09, 0x8d, 0xe4, 0x59, 0xe2, 0xd0, 0x3f, + 0x75, 0x60, 0x98, 0xd6, 0x58, 0xdf, 0x6d, 0x11, 0xba, 0xb7, 0xd1, 0x2f, 0x52, 0x3d, 0x9e, 0x2f, + 0x72, 0x5d, 0xb2, 0x31, 0x4d, 0x1d, 0xc3, 0x0a, 0xfe, 0xe6, 0xdb, 0x53, 0x43, 0xf2, 0x07, 0x4e, + 0x5b, 0x35, 0xb9, 0x08, 0xf7, 0x77, 0xfd, 0x9a, 0x87, 0x72, 0x05, 0xfc, 0x1d, 0x18, 0x37, 0x1b, + 0x71, 0x28, 0x3f, 0xc0, 0xbf, 0xd2, 0x96, 0x1d, 0xef, 0x97, 0x90, 0x67, 0xf7, 0x4c, 0x9b, 0x55, + 0x93, 0x61, 0x5e, 0x4c, 0x3d, 0x73, 0x32, 0xcc, 0x8b, 0xc9, 0x30, 0xef, 0xfe, 0x81, 0x93, 0x2e, + 0x4d, 0x4d, 0xcd, 0xa3, 0x1b, 0x73, 0x3b, 0x6a, 0x08, 0x41, 0xac, 0x36, 0xe6, 0x1b, 0x78, 0x19, + 0x53, 0x38, 0xfa, 0xa2, 0x26, 0x1d, 0x69, 0xb5, 0xb6, 0x70, 0x6b, 0x58, 0x32, 0xd1, 0x1b, 0x84, + 0x3b, 0xe5, 0x9f, 0x28, 0xc0, 0xd9, 0x26, 0xb8, 0x5f, 0x28, 0xc0, 0x43, 0x07, 0x2a, 0xad, 0xb9, + 0x0d, 0x77, 0xee, 0x79, 0xc3, 0xe9, 0xb6, 0x16, 0x91, 0x56, 0x78, 0x03, 0x2f, 0x8b, 0xef, 0xa5, + 0xb6, 0x35, 0xcc, 0xc1, 0x58, 0x96, 0x53, 0xd5, 0x61, 0x8b, 0xec, 0x2e, 0x84, 0x51, 0xd3, 0x4b, + 0x84, 0x74, 0x50, 0xaa, 0xc3, 0x92, 0x2c, 0xc0, 0x29, 0x8e, 0xfb, 0x7d, 0x07, 0xb2, 0x0d, 0x40, + 0x1e, 0x8c, 0xb7, 0x63, 0x12, 0xd1, 0x2d, 0xb5, 0x4c, 0x2a, 0x11, 0x91, 0xd3, 0xf3, 0xd1, 0x69, + 0xee, 0xed, 0xa7, 0x3d, 0x9c, 0xae, 0x84, 0x11, 0x99, 0xde, 0x7e, 0x6a, 0x9a, 0x63, 0x2c, 0x91, + 0xdd, 0x32, 0x69, 0x10, 0x4a, 0x63, 0x16, 0xed, 0xef, 0x4d, 0x8d, 0xdf, 0x30, 0x08, 0xe0, 0x0c, + 0x41, 0xca, 0xa2, 0xe5, 0xc5, 0xf1, 0x4e, 0x18, 0x55, 0x05, 0x8b, 0xc2, 0xa1, 0x59, 0xac, 0x19, + 0x04, 0x70, 0x86, 0xa0, 0xfb, 0x3d, 0x7a, 0x7c, 0xd4, 0xb5, 0x56, 0xf4, 0x75, 0xaa, 0xfb, 0x50, + 0xc8, 0x6c, 0x23, 0xdc, 0x98, 0x0b, 0x83, 0xc4, 0xf3, 0x03, 0x22, 0x83, 0x05, 0xd6, 0x2d, 0xe9, + 0xc8, 0x06, 0xed, 0xd4, 0x86, 0xdf, 0x59, 0x86, 0x73, 0xda, 0x42, 0x75, 0x9c, 0x8d, 0x46, 0xb8, + 0x91, 0xf5, 0x02, 0x52, 0x24, 0xcc, 0x4a, 0xdc, 0x9f, 0x38, 0x70, 0xa1, 0x8b, 0x32, 0x8e, 0xbe, + 0xec, 0xc0, 0xd8, 0xc6, 0x4f, 0x45, 0xdf, 0xcc, 0x66, 0xa0, 0xf7, 0xc1, 0x38, 0x05, 0xd0, 0x9d, + 0x48, 0xcc, 0xcd, 0x82, 0xe9, 0xa1, 0x9a, 0x35, 0x4a, 0x71, 0x06, 0xdb, 0xfd, 0xb5, 0x02, 0xe4, + 0x70, 0x41, 0x4f, 0xc2, 0x10, 0x09, 0xaa, 0xad, 0xd0, 0x0f, 0x12, 0x21, 0x8c, 0x94, 0xd4, 0xbb, + 0x22, 0xe0, 0x58, 0x61, 0x88, 0xf3, 0x87, 0x18, 0x98, 0x42, 0xc7, 0xf9, 0x43, 0xb4, 0x3c, 0xc5, + 0x41, 0x35, 0x98, 0xf0, 0xb8, 0x7f, 0x85, 0xcd, 0x3d, 0x36, 0x4d, 0xfb, 0x0e, 0x33, 0x4d, 0xcf, + 0x32, 0xf7, 0x67, 0x86, 0x04, 0xee, 0x20, 0x8a, 0xde, 0x0d, 0x23, 0xed, 0x98, 0x94, 0xe7, 0x97, + 0xe6, 0x22, 0x52, 0xe5, 0xa7, 0x62, 0xcd, 0xef, 0x77, 0x23, 0x2d, 0xc2, 0x3a, 0x9e, 0xfb, 0x6f, + 0x1c, 0x18, 0x9c, 0xf5, 0x2a, 0x5b, 0xe1, 0xe6, 0x26, 0x1d, 0x8a, 0x6a, 0x3b, 0x4a, 0x0d, 0x5b, + 0xda, 0x50, 0xcc, 0x0b, 0x38, 0x56, 0x18, 0x68, 0x1d, 0x06, 0xf8, 0x82, 0x17, 0xcb, 0xee, 0xe7, + 0xb5, 0xfe, 0xa8, 0x38, 0x1e, 0x36, 0x1d, 0xda, 0x89, 0xdf, 0x98, 0xe6, 0x71, 0x3c, 0xd3, 0xd7, + 0x82, 0x64, 0x35, 0x2a, 0x27, 0x91, 0x1f, 0xd4, 0x66, 0x81, 0x6e, 0x17, 0x0b, 0x8c, 0x06, 0x16, + 0xb4, 0x68, 0x37, 0x9a, 0xde, 0x2d, 0xc9, 0x4e, 0x88, 0x1f, 0xd5, 0x8d, 0x95, 0xb4, 0x08, 0xeb, + 0x78, 0xee, 0x1f, 0x3a, 0x30, 0x3c, 0xeb, 0xc5, 0x7e, 0xe5, 0xaf, 0x90, 0xf0, 0xf9, 0x10, 0x14, + 0xe7, 0xbc, 0x4a, 0x9d, 0xa0, 0x1b, 0xd9, 0x43, 0xef, 0xc8, 0xe5, 0xc7, 0xf3, 0xd8, 0xa8, 0x03, + 0xb0, 0xce, 0x69, 0xac, 0xdb, 0xd1, 0xd8, 0x7d, 0xdb, 0x81, 0xf1, 0xb9, 0x86, 0x4f, 0x82, 0x64, + 0x8e, 0x44, 0x09, 0x1b, 0xb8, 0x1a, 0x4c, 0x54, 0x14, 0xe4, 0x28, 0x43, 0xc7, 0x66, 0xeb, 0x5c, + 0x86, 0x04, 0xee, 0x20, 0x8a, 0xaa, 0x70, 0x8a, 0xc3, 0xd2, 0x55, 0x71, 0xa8, 0xf1, 0x63, 0xd6, + 0xd1, 0x39, 0x93, 0x02, 0xce, 0x92, 0x74, 0x7f, 0xec, 0xc0, 0x85, 0xb9, 0x46, 0x3b, 0x4e, 0x48, + 0x74, 0x53, 0x48, 0x23, 0xa9, 0xde, 0xa2, 0x8f, 0xc0, 0x50, 0x53, 0x7a, 0x6c, 0x9d, 0x3b, 0x4c, + 0x60, 0x26, 0xcf, 0x28, 0x36, 0x6d, 0xcc, 0xea, 0xc6, 0xcb, 0xa4, 0x92, 0xac, 0x90, 0xc4, 0x4b, + 0xc3, 0x0b, 0x52, 0x18, 0x56, 0x54, 0x51, 0x0b, 0xfa, 0xe3, 0x16, 0xa9, 0xd8, 0x8b, 0xee, 0x92, + 0x7d, 0x28, 0xb7, 0x48, 0x25, 0x95, 0xeb, 0xcc, 0xd7, 0xc8, 0x38, 0xb9, 0xff, 0xc7, 0x81, 0x07, + 0xba, 0xf4, 0x77, 0xd9, 0x8f, 0x13, 0xf4, 0x52, 0x47, 0x9f, 0xa7, 0x7b, 0xeb, 0x33, 0xad, 0xcd, + 0x7a, 0xac, 0x04, 0x82, 0x84, 0x68, 0xfd, 0xfd, 0x28, 0x14, 0xfd, 0x84, 0x34, 0xa5, 0x19, 0xda, + 0x82, 0xc1, 0xa8, 0x4b, 0x5f, 0x66, 0xc7, 0x64, 0x8c, 0xdf, 0x35, 0xca, 0x0f, 0x73, 0xb6, 0xee, + 0x16, 0x0c, 0xcc, 0x85, 0x8d, 0x76, 0x33, 0xe8, 0x2d, 0x52, 0x26, 0xd9, 0x6d, 0x91, 0xec, 0x1e, + 0xc9, 0xd4, 0x7f, 0x56, 0x22, 0x0d, 0x47, 0x7d, 0xf9, 0x86, 0x23, 0xf7, 0xdf, 0x3a, 0x40, 0x57, + 0x55, 0xd5, 0x17, 0x9e, 0x44, 0x4e, 0x8e, 0x33, 0x7c, 0x48, 0x27, 0x77, 0x7b, 0x6f, 0x6a, 0x4c, + 0x21, 0x6a, 0xf4, 0x3f, 0x04, 0x03, 0x31, 0x3b, 0x92, 0x8b, 0x36, 0x2c, 0x48, 0xfd, 0x99, 0x1f, + 0xd4, 0x6f, 0xef, 0x4d, 0xf5, 0x14, 0xb6, 0x39, 0xad, 0x68, 0x0b, 0xa7, 0xa7, 0xa0, 0x4a, 0x15, + 0xbe, 0x26, 0x89, 0x63, 0xaf, 0x26, 0x4f, 0x78, 0x4a, 0xe1, 0x5b, 0xe1, 0x60, 0x2c, 0xcb, 0xdd, + 0x2f, 0x39, 0x30, 0xa6, 0x36, 0x2f, 0xaa, 0xbe, 0xa3, 0xeb, 0xfa, 0x36, 0xc7, 0x67, 0xca, 0x43, + 0x5d, 0x24, 0x8e, 0xd8, 0xc8, 0x0f, 0xde, 0x05, 0xdf, 0x05, 0xa3, 0x55, 0xd2, 0x22, 0x41, 0x95, + 0x04, 0x15, 0x7a, 0xfc, 0xa6, 0x33, 0x64, 0x78, 0x76, 0x82, 0x9e, 0x37, 0xe7, 0x35, 0x38, 0x36, + 0xb0, 0xdc, 0x6f, 0x38, 0x70, 0xbf, 0x22, 0x57, 0x26, 0x09, 0x26, 0x49, 0xb4, 0xab, 0xc2, 0x34, + 0x0f, 0xb7, 0x5b, 0xdd, 0xa4, 0xfa, 0x6f, 0x12, 0x71, 0xe6, 0x47, 0xdb, 0xae, 0x46, 0xb8, 0xb6, + 0xcc, 0x88, 0x60, 0x49, 0xcd, 0xfd, 0xd5, 0x3e, 0x38, 0xab, 0x37, 0x52, 0x09, 0x98, 0x5f, 0x72, + 0x00, 0xd4, 0x08, 0xd0, 0x0d, 0xb9, 0xcf, 0x8e, 0xef, 0xca, 0xf8, 0x52, 0xa9, 0x08, 0x52, 0xe0, + 0x18, 0x6b, 0x6c, 0xd1, 0x0b, 0x30, 0xba, 0x4d, 0x17, 0x05, 0x59, 0xa1, 0xea, 0x42, 0x5c, 0xea, + 0x63, 0xcd, 0x98, 0xca, 0xfb, 0x98, 0xcf, 0xa7, 0x78, 0xa9, 0x39, 0x40, 0x03, 0xc6, 0xd8, 0x20, + 0x45, 0x4f, 0x3a, 0x63, 0x91, 0xfe, 0x49, 0x84, 0x4d, 0xfc, 0x83, 0x16, 0xfb, 0x98, 0xfd, 0xea, + 0xb3, 0xa7, 0xf7, 0xf7, 0xa6, 0xc6, 0x0c, 0x10, 0x36, 0x1b, 0xe1, 0xbe, 0x00, 0x6c, 0x2c, 0xfc, + 0xa0, 0x4d, 0x56, 0x03, 0xf4, 0x88, 0xb4, 0xd1, 0x71, 0xbf, 0x8a, 0x92, 0x1c, 0xba, 0x9d, 0x8e, + 0x9e, 0x65, 0x37, 0x3d, 0xbf, 0xc1, 0xc2, 0x17, 0x29, 0x96, 0x3a, 0xcb, 0x2e, 0x30, 0x28, 0x16, + 0xa5, 0xee, 0x34, 0x0c, 0xce, 0xd1, 0xbe, 0x93, 0x88, 0xd2, 0xd5, 0xa3, 0x8e, 0xc7, 0x8c, 0xa8, + 0x63, 0x19, 0x5d, 0xbc, 0x0e, 0xe7, 0xe6, 0x22, 0xe2, 0x25, 0xa4, 0xfc, 0xf4, 0x6c, 0xbb, 0xb2, + 0x45, 0x12, 0x1e, 0xda, 0x15, 0xa3, 0xf7, 0xc2, 0x58, 0xc8, 0xb6, 0x8c, 0xe5, 0xb0, 0xb2, 0xe5, + 0x07, 0x35, 0x61, 0x72, 0x3d, 0x27, 0xa8, 0x8c, 0xad, 0xea, 0x85, 0xd8, 0xc4, 0x75, 0xff, 0x73, + 0x01, 0x46, 0xe7, 0xa2, 0x30, 0x90, 0x62, 0xf1, 0x04, 0xb6, 0xb2, 0xc4, 0xd8, 0xca, 0x2c, 0xb8, + 0x3b, 0xf5, 0xf6, 0x77, 0xdb, 0xce, 0xd0, 0xeb, 0x4a, 0x44, 0xf6, 0xd9, 0x3a, 0x82, 0x18, 0x7c, + 0x19, 0xed, 0xf4, 0x63, 0x9b, 0x02, 0xd4, 0xfd, 0x2f, 0x0e, 0x4c, 0xe8, 0xe8, 0x27, 0xb0, 0x83, + 0xc6, 0xe6, 0x0e, 0x7a, 0xdd, 0x6e, 0x7f, 0xbb, 0x6c, 0x9b, 0x9f, 0x1d, 0x30, 0xfb, 0xc9, 0x7c, + 0xdd, 0x5f, 0x71, 0x60, 0x74, 0x47, 0x03, 0x88, 0xce, 0xda, 0x56, 0x62, 0xde, 0x21, 0xc5, 0x8c, + 0x0e, 0xbd, 0x9d, 0xf9, 0x8d, 0x8d, 0x96, 0x50, 0xb9, 0x1f, 0x57, 0xea, 0xa4, 0xda, 0x6e, 0xc8, + 0xed, 0x5b, 0x0d, 0x69, 0x59, 0xc0, 0xb1, 0xc2, 0x40, 0x2f, 0xc1, 0xe9, 0x4a, 0x18, 0x54, 0xda, + 0x51, 0x44, 0x82, 0xca, 0xee, 0x1a, 0xbb, 0x23, 0x21, 0x36, 0xc4, 0x69, 0x51, 0xed, 0xf4, 0x5c, + 0x16, 0xe1, 0x76, 0x1e, 0x10, 0x77, 0x12, 0xe2, 0xce, 0x82, 0x98, 0x6e, 0x59, 0xe2, 0xc0, 0xa5, + 0x39, 0x0b, 0x18, 0x18, 0xcb, 0x72, 0x74, 0x03, 0x2e, 0xc4, 0x89, 0x17, 0x25, 0x7e, 0x50, 0x9b, + 0x27, 0x5e, 0xb5, 0xe1, 0x07, 0xf4, 0x28, 0x11, 0x06, 0x55, 0xee, 0x4a, 0xec, 0x9b, 0x7d, 0x60, + 0x7f, 0x6f, 0xea, 0x42, 0x39, 0x1f, 0x05, 0x77, 0xab, 0x8b, 0x3e, 0x04, 0x93, 0xc2, 0x1d, 0xb1, + 0xd9, 0x6e, 0x3c, 0x1b, 0x6e, 0xc4, 0x57, 0xfd, 0x98, 0x9e, 0xe3, 0x97, 0xfd, 0xa6, 0x9f, 0x30, + 0x87, 0x61, 0x71, 0xf6, 0xe2, 0xfe, 0xde, 0xd4, 0x64, 0xb9, 0x2b, 0x16, 0x3e, 0x80, 0x02, 0xc2, + 0x70, 0x9e, 0x0b, 0xbf, 0x0e, 0xda, 0x83, 0x8c, 0xf6, 0xe4, 0xfe, 0xde, 0xd4, 0xf9, 0x85, 0x5c, + 0x0c, 0xdc, 0xa5, 0x26, 0xfd, 0x82, 0x89, 0xdf, 0x24, 0xaf, 0x86, 0x01, 0x61, 0x81, 0x2a, 0xda, + 0x17, 0x5c, 0x17, 0x70, 0xac, 0x30, 0xd0, 0xcb, 0xe9, 0x4c, 0xa4, 0xcb, 0x45, 0x04, 0x9c, 0x1c, + 0x5e, 0xc2, 0xb1, 0xa3, 0xc9, 0x4d, 0x8d, 0x12, 0x8b, 0xa4, 0x34, 0x68, 0xbb, 0x7f, 0x54, 0x00, + 0xd4, 0x29, 0x22, 0xd0, 0x12, 0x0c, 0x78, 0x95, 0xc4, 0xdf, 0x96, 0x91, 0x79, 0x8f, 0xe4, 0x6d, + 0x9f, 0x9c, 0x15, 0x26, 0x9b, 0x84, 0xce, 0x10, 0x92, 0xca, 0x95, 0x19, 0x56, 0x15, 0x0b, 0x12, + 0x28, 0x84, 0xd3, 0x0d, 0x2f, 0x4e, 0xe4, 0x5c, 0xad, 0xd2, 0x2e, 0x0b, 0xc1, 0xfa, 0xb3, 0xbd, + 0x75, 0x8a, 0xd6, 0x98, 0x3d, 0x47, 0x67, 0xee, 0x72, 0x96, 0x10, 0xee, 0xa4, 0x8d, 0x3e, 0xc6, + 0xf4, 0x10, 0xae, 0x24, 0x4a, 0x05, 0x60, 0xc9, 0xca, 0x1e, 0xcd, 0x69, 0x1a, 0x3a, 0x88, 0x60, + 0x83, 0x35, 0x96, 0xee, 0xbf, 0x03, 0x18, 0x9c, 0x9f, 0x59, 0x5c, 0xf7, 0xe2, 0xad, 0x1e, 0x54, + 0x73, 0x3a, 0x3b, 0x84, 0x0e, 0x95, 0x5d, 0xdf, 0x52, 0xb7, 0xc2, 0x0a, 0x03, 0x05, 0x30, 0xe0, + 0x07, 0x74, 0x41, 0x94, 0xc6, 0x6d, 0x99, 0xbf, 0xd5, 0x31, 0x83, 0xd9, 0x27, 0xae, 0x31, 0xea, + 0x58, 0x70, 0x41, 0xaf, 0xc3, 0xb0, 0x27, 0x6f, 0xb6, 0x88, 0x6d, 0x69, 0xc9, 0x86, 0x5d, 0x57, + 0x90, 0xd4, 0x23, 0x6b, 0x04, 0x08, 0xa7, 0x0c, 0xd1, 0xc7, 0x1d, 0x18, 0x91, 0x5d, 0xc7, 0x64, + 0x53, 0xb8, 0x5c, 0x57, 0xec, 0xf5, 0x19, 0x93, 0x4d, 0x1e, 0x76, 0xa1, 0x01, 0xb0, 0xce, 0xb2, + 0x43, 0x95, 0x2f, 0xf6, 0xa2, 0xca, 0xa3, 0x1d, 0x18, 0xde, 0xf1, 0x93, 0x3a, 0xdb, 0x78, 0x84, + 0xab, 0x67, 0xe1, 0xee, 0x5b, 0x4d, 0xc9, 0xa5, 0x23, 0x76, 0x53, 0x32, 0xc0, 0x29, 0x2f, 0x74, + 0x89, 0x33, 0x66, 0x37, 0x83, 0x98, 0xc8, 0x1a, 0x36, 0x2b, 0xb0, 0x02, 0x9c, 0xe2, 0xd0, 0x21, + 0x1e, 0xa5, 0xbf, 0xca, 0xe4, 0x95, 0x36, 0x5d, 0xc7, 0x22, 0x94, 0xce, 0xc2, 0xbc, 0x92, 0x14, + 0xf9, 0x60, 0xdd, 0xd4, 0x78, 0x60, 0x83, 0x23, 0x5d, 0x23, 0x3b, 0x75, 0x12, 0x88, 0x50, 0x7f, + 0xb5, 0x46, 0x6e, 0xd6, 0x49, 0x80, 0x59, 0x09, 0x7a, 0x9d, 0x1f, 0x2d, 0xb8, 0x8e, 0x2b, 0xc2, + 0xe2, 0x96, 0xed, 0xa8, 0xdd, 0x9c, 0x26, 0x8f, 0xb6, 0x4f, 0x7f, 0x63, 0x8d, 0x1f, 0x55, 0x97, + 0xc3, 0xe0, 0xca, 0x2d, 0x3f, 0x11, 0x77, 0x04, 0x94, 0xa4, 0x5b, 0x65, 0x50, 0x2c, 0x4a, 0x79, + 0x48, 0x01, 0x9d, 0x04, 0x31, 0xbb, 0x10, 0x30, 0xac, 0x87, 0x14, 0x30, 0x30, 0x96, 0xe5, 0xe8, + 0x9f, 0x38, 0x50, 0xac, 0x87, 0xe1, 0x56, 0x5c, 0x1a, 0x63, 0x93, 0xc3, 0x82, 0xaa, 0x27, 0x24, + 0xce, 0xf4, 0x55, 0x4a, 0xd6, 0xbc, 0xf5, 0x54, 0x64, 0xb0, 0xdb, 0x7b, 0x53, 0xe3, 0xcb, 0xfe, + 0x26, 0xa9, 0xec, 0x56, 0x1a, 0x84, 0x41, 0xde, 0x7c, 0x5b, 0x83, 0x5c, 0xd9, 0x26, 0x41, 0x82, + 0x79, 0xab, 0x26, 0x3f, 0xeb, 0x00, 0xa4, 0x84, 0x72, 0x7c, 0x77, 0xc4, 0xf4, 0x76, 0x5b, 0x38, + 0xe7, 0x19, 0x4d, 0xd3, 0x9d, 0x81, 0xff, 0xde, 0x81, 0x11, 0xda, 0x39, 0x29, 0x02, 0x1f, 0x83, + 0x81, 0xc4, 0x8b, 0x6a, 0x44, 0xda, 0xaf, 0xd5, 0xe7, 0x58, 0x67, 0x50, 0x2c, 0x4a, 0x51, 0x00, + 0xc5, 0xc4, 0x8b, 0xb7, 0xa4, 0x76, 0x79, 0xcd, 0xda, 0x10, 0xa7, 0x8a, 0x25, 0xfd, 0x15, 0x63, + 0xce, 0x06, 0x3d, 0x0e, 0x43, 0x54, 0x01, 0x58, 0xf0, 0x62, 0x19, 0x52, 0x32, 0x4a, 0x85, 0xf8, + 0x82, 0x80, 0x61, 0x55, 0xea, 0xfe, 0x5a, 0x01, 0xfa, 0xe7, 0xf9, 0x39, 0x63, 0x20, 0x0e, 0xdb, + 0x51, 0x85, 0x08, 0x7d, 0xd3, 0xc2, 0x9c, 0xa6, 0x74, 0xcb, 0x8c, 0xa6, 0xa6, 0xe9, 0xb3, 0xdf, + 0x58, 0xf0, 0xa2, 0x07, 0xd9, 0xf1, 0x24, 0xf2, 0x82, 0x78, 0x93, 0x79, 0x0a, 0xfc, 0x30, 0x10, + 0x43, 0x64, 0x61, 0x16, 0xae, 0x1b, 0x74, 0xcb, 0x09, 0x69, 0xa5, 0x0e, 0x0b, 0xb3, 0x0c, 0x67, + 0xda, 0xe0, 0xfe, 0xba, 0x03, 0x90, 0xb6, 0x1e, 0x7d, 0xc6, 0x81, 0x31, 0x4f, 0x0f, 0x65, 0x14, + 0x63, 0xb4, 0x6a, 0xcf, 0xad, 0xc8, 0xc8, 0xf2, 0x23, 0xb6, 0x01, 0xc2, 0x26, 0x63, 0xf7, 0xdd, + 0x50, 0x64, 0xab, 0x83, 0xe9, 0xe2, 0xc2, 0x24, 0x9b, 0xb5, 0xc1, 0x48, 0x53, 0x2d, 0x56, 0x18, + 0xee, 0x4b, 0x30, 0x7e, 0xe5, 0x16, 0xa9, 0xb4, 0x93, 0x30, 0xe2, 0x06, 0xe9, 0x2e, 0x57, 0x57, + 0x9c, 0x23, 0x5d, 0x5d, 0xf9, 0xb6, 0x03, 0x23, 0x5a, 0x5c, 0x1b, 0xdd, 0xa9, 0x6b, 0x73, 0x65, + 0x7e, 0xee, 0x16, 0x43, 0xb5, 0x64, 0x25, 0x72, 0x8e, 0x93, 0x4c, 0xb7, 0x11, 0x05, 0xc2, 0x29, + 0xc3, 0x3b, 0xc4, 0x9d, 0xb9, 0xbf, 0xef, 0xc0, 0xb9, 0xdc, 0x20, 0xbc, 0x7b, 0xdc, 0x6c, 0xc3, + 0xf7, 0x5b, 0xe8, 0xc1, 0xf7, 0xfb, 0xdb, 0x0e, 0xa4, 0x94, 0xa8, 0x28, 0xda, 0x48, 0x5b, 0xae, + 0x89, 0x22, 0xc1, 0x49, 0x94, 0xa2, 0xd7, 0xe1, 0x82, 0xf9, 0x05, 0x8f, 0xe8, 0x06, 0xe0, 0x67, + 0xa6, 0x7c, 0x4a, 0xb8, 0x1b, 0x0b, 0xf7, 0xab, 0x0e, 0x14, 0x17, 0xbd, 0x76, 0x8d, 0xf4, 0x64, + 0xc5, 0xa1, 0x72, 0x2c, 0x22, 0x5e, 0x23, 0x91, 0x7a, 0xba, 0x90, 0x63, 0x58, 0xc0, 0xb0, 0x2a, + 0x45, 0x33, 0x30, 0x1c, 0xb6, 0x88, 0xe1, 0xba, 0x7a, 0x44, 0x8e, 0xde, 0xaa, 0x2c, 0xa0, 0xdb, + 0x0e, 0xe3, 0xae, 0x20, 0x38, 0xad, 0xe5, 0x7e, 0xbf, 0x08, 0x23, 0xda, 0x75, 0x0d, 0xaa, 0x0b, + 0x44, 0xa4, 0x15, 0x66, 0xf5, 0x65, 0x3a, 0x61, 0x30, 0x2b, 0xa1, 0x6b, 0x30, 0x22, 0xdb, 0x7e, + 0xcc, 0xc5, 0x96, 0xb1, 0x06, 0xb1, 0x80, 0x63, 0x85, 0x81, 0xa6, 0xa0, 0x58, 0x25, 0xad, 0xa4, + 0xce, 0x9a, 0xd7, 0xcf, 0x63, 0xd6, 0xe6, 0x29, 0x00, 0x73, 0x38, 0x45, 0xd8, 0x24, 0x49, 0xa5, + 0xce, 0x0c, 0x96, 0x22, 0xa8, 0x6d, 0x81, 0x02, 0x30, 0x87, 0xe7, 0x38, 0xd7, 0x8a, 0xc7, 0xef, + 0x5c, 0x1b, 0xb0, 0xec, 0x5c, 0x43, 0x2d, 0x38, 0x13, 0xc7, 0xf5, 0xb5, 0xc8, 0xdf, 0xf6, 0x12, + 0x92, 0xce, 0xbe, 0xc1, 0xc3, 0xf0, 0xb9, 0xc0, 0x2e, 0x50, 0x97, 0xaf, 0x66, 0xa9, 0xe0, 0x3c, + 0xd2, 0xa8, 0x0c, 0xe7, 0xfc, 0x20, 0x26, 0x95, 0x76, 0x44, 0xae, 0xd5, 0x82, 0x30, 0x22, 0x57, + 0xc3, 0x98, 0x92, 0x13, 0xd7, 0x3f, 0x55, 0x98, 0xe7, 0xb5, 0x3c, 0x24, 0x9c, 0x5f, 0x17, 0x2d, + 0xc2, 0xe9, 0xaa, 0x1f, 0x7b, 0x1b, 0x0d, 0x52, 0x6e, 0x6f, 0x34, 0x43, 0x7a, 0xe8, 0xe3, 0x57, + 0x32, 0x86, 0x66, 0xef, 0x97, 0xe6, 0x8d, 0xf9, 0x2c, 0x02, 0xee, 0xac, 0x83, 0x9e, 0x81, 0xd1, + 0xd8, 0x0f, 0x6a, 0x0d, 0x32, 0x1b, 0x79, 0x41, 0xa5, 0x2e, 0xee, 0x8d, 0x2a, 0x33, 0x70, 0x59, + 0x2b, 0xc3, 0x06, 0x26, 0x5b, 0xf3, 0xbc, 0x4e, 0x46, 0x1b, 0x14, 0xd8, 0xa2, 0xd4, 0xfd, 0x81, + 0x03, 0xa3, 0x7a, 0x88, 0x35, 0xd5, 0xb4, 0xa1, 0x3e, 0xbf, 0x50, 0xe6, 0x7b, 0x81, 0xbd, 0x1d, + 0xff, 0xaa, 0xa2, 0x99, 0x9e, 0x4c, 0x53, 0x18, 0xd6, 0x78, 0xf6, 0x70, 0x61, 0xfa, 0x11, 0x28, + 0x6e, 0x86, 0x54, 0x21, 0xe9, 0x33, 0xed, 0xc7, 0x0b, 0x14, 0x88, 0x79, 0x99, 0xfb, 0xbf, 0x1c, + 0x38, 0x9f, 0x1f, 0x3d, 0xfe, 0xd3, 0xd0, 0xc9, 0xcb, 0x00, 0xb4, 0x2b, 0x86, 0x50, 0xd7, 0x52, + 0x26, 0xc8, 0x12, 0xac, 0x61, 0xf5, 0xd6, 0xed, 0x3f, 0xa3, 0x4a, 0x71, 0xca, 0xe7, 0x73, 0x0e, + 0x8c, 0x51, 0xb6, 0x4b, 0xd1, 0x86, 0xd1, 0xdb, 0x55, 0x3b, 0xbd, 0x55, 0x64, 0x53, 0x33, 0xb9, + 0x01, 0xc6, 0x26, 0x73, 0xf4, 0x73, 0x30, 0xec, 0x55, 0xab, 0x11, 0x89, 0x63, 0xe5, 0x70, 0x62, + 0xbe, 0xf0, 0x19, 0x09, 0xc4, 0x69, 0x39, 0x15, 0xa2, 0xf5, 0xea, 0x66, 0x4c, 0xe5, 0x92, 0x10, + 0xdc, 0x4a, 0x88, 0x52, 0x26, 0x14, 0x8e, 0x15, 0x86, 0xfb, 0x2b, 0xfd, 0x60, 0xf2, 0x46, 0x55, + 0x38, 0xb5, 0x15, 0x6d, 0xcc, 0x31, 0x7f, 0xfd, 0x51, 0xfc, 0xe6, 0xcc, 0x9f, 0xbd, 0x64, 0x52, + 0xc0, 0x59, 0x92, 0x82, 0xcb, 0x12, 0xd9, 0x4d, 0xbc, 0x8d, 0x23, 0x7b, 0xcd, 0x97, 0x4c, 0x0a, + 0x38, 0x4b, 0x12, 0xbd, 0x1b, 0x46, 0xb6, 0xa2, 0x0d, 0x29, 0xa2, 0xb3, 0x21, 0x18, 0x4b, 0x69, + 0x11, 0xd6, 0xf1, 0xe8, 0x10, 0x6e, 0x45, 0x1b, 0x74, 0x57, 0x94, 0x09, 0x04, 0xd4, 0x10, 0x2e, + 0x09, 0x38, 0x56, 0x18, 0xa8, 0x05, 0x68, 0x4b, 0x8e, 0x9e, 0x8a, 0x4e, 0x10, 0x3b, 0x49, 0xef, + 0xc1, 0x0d, 0x2c, 0x2c, 0x7c, 0xa9, 0x83, 0x0e, 0xce, 0xa1, 0x8d, 0x5e, 0x80, 0x0b, 0x5b, 0xd1, + 0x86, 0x50, 0x16, 0xd6, 0x22, 0x3f, 0xa8, 0xf8, 0x2d, 0x23, 0x59, 0xc0, 0x94, 0x68, 0xee, 0x85, + 0xa5, 0x7c, 0x34, 0xdc, 0xad, 0xbe, 0xfb, 0x3b, 0xfd, 0xc0, 0xae, 0x39, 0x52, 0x59, 0xd8, 0x24, + 0x49, 0x3d, 0xac, 0x66, 0xf5, 0x9f, 0x15, 0x06, 0xc5, 0xa2, 0x54, 0x06, 0x3f, 0x16, 0xba, 0x04, + 0x3f, 0xee, 0xc0, 0x60, 0x9d, 0x78, 0x55, 0x12, 0x49, 0x73, 0xdd, 0xb2, 0x9d, 0x8b, 0x99, 0x57, + 0x19, 0xd1, 0xf4, 0x18, 0xce, 0x7f, 0xc7, 0x58, 0x72, 0x43, 0xef, 0x81, 0x71, 0xaa, 0xc8, 0x84, + 0xed, 0x44, 0xda, 0xa6, 0xfb, 0x99, 0x6d, 0x9a, 0xed, 0xa8, 0xeb, 0x46, 0x09, 0xce, 0x60, 0xa2, + 0x79, 0x98, 0x10, 0x76, 0x64, 0x65, 0x06, 0x14, 0x03, 0xab, 0xb2, 0x38, 0x94, 0x33, 0xe5, 0xb8, + 0xa3, 0x06, 0x0b, 0x5e, 0x0b, 0xab, 0xdc, 0x95, 0xa8, 0x07, 0xaf, 0x85, 0xd5, 0x5d, 0xcc, 0x4a, + 0xd0, 0xab, 0x30, 0x44, 0xff, 0x2e, 0x44, 0x61, 0x53, 0xd8, 0x66, 0xd6, 0xec, 0x8c, 0x0e, 0xe5, + 0x21, 0x4e, 0x8a, 0x4c, 0xc1, 0x9b, 0x15, 0x5c, 0xb0, 0xe2, 0x47, 0xcf, 0x2b, 0x72, 0x1f, 0x2e, + 0x6f, 0xf9, 0xad, 0xe7, 0x49, 0xe4, 0x6f, 0xee, 0x32, 0xa5, 0x61, 0x28, 0x3d, 0xaf, 0x5c, 0xeb, + 0xc0, 0xc0, 0x39, 0xb5, 0xdc, 0xcf, 0x15, 0x60, 0x54, 0xbf, 0x2d, 0x7b, 0xa7, 0x88, 0xd8, 0x38, + 0x9d, 0x14, 0xfc, 0x74, 0x7a, 0xd5, 0x42, 0xb7, 0xef, 0x34, 0x21, 0xea, 0xd0, 0xef, 0xb5, 0x85, + 0xb6, 0x68, 0xc5, 0x08, 0xc6, 0x7a, 0xdc, 0x4e, 0xea, 0xfc, 0x5a, 0x15, 0x8b, 0x55, 0x65, 0x1c, + 0xdc, 0x4f, 0xf6, 0xc1, 0x90, 0x2c, 0x44, 0x9f, 0x70, 0x00, 0xd2, 0x98, 0x21, 0x21, 0x4a, 0xd7, + 0x6c, 0x04, 0x94, 0xe8, 0xe1, 0x4e, 0x9a, 0xe1, 0x5a, 0xc1, 0xb1, 0xc6, 0x17, 0x25, 0x30, 0x10, + 0xd2, 0xc6, 0x5d, 0xb6, 0x77, 0xe3, 0x7b, 0x95, 0x32, 0xbe, 0xcc, 0xb8, 0xa7, 0x66, 0x33, 0x06, + 0xc3, 0x82, 0x17, 0x3d, 0x01, 0x6e, 0xc8, 0x50, 0x36, 0x7b, 0x26, 0x66, 0x15, 0x1d, 0x97, 0x1e, + 0xe8, 0x14, 0x08, 0xa7, 0x0c, 0xdd, 0xa7, 0x60, 0xdc, 0x5c, 0x0c, 0xf4, 0x44, 0xb0, 0xb1, 0x9b, + 0x10, 0x6e, 0x6f, 0x18, 0xe5, 0x27, 0x82, 0x59, 0x0a, 0xc0, 0x1c, 0xee, 0x7e, 0x8f, 0xea, 0x01, + 0x4a, 0xbc, 0xf4, 0x60, 0xe2, 0x7f, 0x44, 0x37, 0x96, 0x75, 0x3b, 0x76, 0x7d, 0x0c, 0x86, 0xd9, + 0x3f, 0x6c, 0xa1, 0xf7, 0xd9, 0x72, 0x3c, 0xa7, 0xed, 0x14, 0x4b, 0x9d, 0xe9, 0x04, 0xcf, 0x4b, + 0x46, 0x38, 0xe5, 0xe9, 0x86, 0x30, 0x91, 0xc5, 0x46, 0x1f, 0x84, 0xd1, 0x58, 0x6e, 0xab, 0xe9, + 0xdd, 0xaf, 0x1e, 0xb7, 0x5f, 0x66, 0xf7, 0x2d, 0x6b, 0xd5, 0xb1, 0x41, 0xcc, 0x5d, 0x85, 0x01, + 0xab, 0x43, 0xe8, 0x7e, 0xcb, 0x81, 0x61, 0xe6, 0x79, 0xab, 0x45, 0x5e, 0x33, 0xad, 0xd2, 0x77, + 0xc0, 0xa8, 0xc7, 0x30, 0xc8, 0xcf, 0xe8, 0x32, 0x62, 0xc5, 0x82, 0x94, 0xe1, 0x89, 0xda, 0x52, + 0x29, 0xc3, 0x8d, 0x01, 0x31, 0x96, 0x9c, 0xdc, 0x4f, 0x15, 0x60, 0xe0, 0x5a, 0xd0, 0x6a, 0xff, + 0xb5, 0x4f, 0x16, 0xb6, 0x02, 0xfd, 0xd7, 0x12, 0xd2, 0x34, 0x73, 0xda, 0x8d, 0xce, 0x3e, 0xaa, + 0xe7, 0xb3, 0x2b, 0x99, 0xf9, 0xec, 0xb0, 0xb7, 0x23, 0x03, 0xba, 0x84, 0x8d, 0x38, 0xbd, 0xff, + 0xf6, 0x24, 0x0c, 0x2f, 0x7b, 0x1b, 0xa4, 0xb1, 0x44, 0x76, 0xd9, 0x6d, 0x35, 0x1e, 0x5c, 0xe0, + 0xa4, 0x07, 0x7b, 0x23, 0x10, 0x60, 0x1e, 0xc6, 0x19, 0xb6, 0x5a, 0x0c, 0xf4, 0xe4, 0x40, 0xd2, + 0x84, 0x40, 0x8e, 0x79, 0x72, 0xd0, 0x92, 0x01, 0x69, 0x58, 0xee, 0x34, 0x8c, 0xa4, 0x54, 0x7a, + 0xe0, 0xfa, 0x93, 0x02, 0x8c, 0x19, 0xa6, 0x6e, 0xc3, 0x01, 0xe8, 0xdc, 0xd1, 0x01, 0x68, 0x38, + 0xe4, 0x0a, 0xf7, 0xda, 0x21, 0xd7, 0x77, 0xf2, 0x0e, 0x39, 0xf3, 0x23, 0xf5, 0xf7, 0xf4, 0x91, + 0x1a, 0xd0, 0xbf, 0xec, 0x07, 0x5b, 0xbd, 0xc9, 0x99, 0xb8, 0x12, 0xb6, 0x3a, 0xe4, 0x4c, 0x99, + 0x02, 0x31, 0x2f, 0x93, 0x9a, 0x4b, 0x5f, 0xbe, 0xe6, 0xe2, 0x7e, 0xc2, 0x81, 0xd1, 0x15, 0x2f, + 0xf0, 0x37, 0x49, 0x9c, 0xb0, 0x79, 0x95, 0x1c, 0xeb, 0xad, 0xa5, 0xd1, 0x2e, 0xf7, 0xef, 0xdf, + 0x74, 0xe0, 0xf4, 0x0a, 0x69, 0x86, 0xfe, 0xab, 0x5e, 0x1a, 0x2f, 0x49, 0xdb, 0x5e, 0xf7, 0x13, + 0x11, 0x1e, 0xa6, 0xda, 0x7e, 0xd5, 0x4f, 0x30, 0x85, 0xdf, 0xc1, 0x8e, 0xcb, 0xee, 0x03, 0xd0, + 0x03, 0x9a, 0x76, 0x93, 0x2e, 0x8d, 0x84, 0x94, 0x05, 0x38, 0xc5, 0x71, 0x7f, 0xd7, 0x81, 0x41, + 0xde, 0x08, 0x15, 0x62, 0xea, 0x74, 0xa1, 0x5d, 0x87, 0x22, 0xab, 0x27, 0x66, 0xf5, 0xa2, 0x05, + 0xf5, 0x87, 0x92, 0xe3, 0x6b, 0x90, 0xfd, 0x8b, 0x39, 0x03, 0x76, 0x6c, 0xf1, 0x6e, 0xcd, 0xa8, + 0x50, 0xd1, 0xf4, 0xd8, 0xc2, 0xa0, 0x58, 0x94, 0xba, 0x5f, 0xeb, 0x83, 0x21, 0x95, 0x76, 0x8a, + 0x25, 0x05, 0x08, 0x82, 0x30, 0xf1, 0x78, 0x60, 0x01, 0x97, 0xd5, 0x1f, 0xb4, 0x97, 0xf6, 0x6a, + 0x7a, 0x26, 0xa5, 0xce, 0xfd, 0x77, 0xea, 0x10, 0xaa, 0x95, 0x60, 0xbd, 0x11, 0xe8, 0xa3, 0x30, + 0xd0, 0xa0, 0xd2, 0x47, 0x8a, 0xee, 0xe7, 0x2d, 0x36, 0x87, 0x89, 0x35, 0xd1, 0x12, 0x35, 0x42, + 0x1c, 0x88, 0x05, 0xd7, 0xc9, 0xf7, 0xc1, 0x44, 0xb6, 0xd5, 0x77, 0xba, 0xe8, 0x37, 0xac, 0x5f, + 0x13, 0xfc, 0xdb, 0x42, 0x7a, 0x1e, 0xbe, 0xaa, 0xfb, 0x1c, 0x8c, 0xac, 0x90, 0x24, 0xf2, 0x2b, + 0x8c, 0xc0, 0x9d, 0x26, 0x57, 0x4f, 0xfa, 0xc3, 0xa7, 0xd9, 0x64, 0xa5, 0x34, 0x63, 0xf4, 0x3a, + 0x40, 0x2b, 0x0a, 0xe9, 0xf9, 0x95, 0xb4, 0xe5, 0xc7, 0xb6, 0xa0, 0x0f, 0xaf, 0x29, 0x9a, 0xdc, + 0xe5, 0x9c, 0xfe, 0xc6, 0x1a, 0x3f, 0xf7, 0x45, 0x28, 0xae, 0xb4, 0x13, 0x72, 0xab, 0x07, 0x89, + 0x75, 0xd8, 0x9b, 0xef, 0xee, 0x07, 0x61, 0x94, 0xd1, 0xbe, 0x1a, 0x36, 0xe8, 0xb6, 0x4a, 0x87, + 0xa6, 0x49, 0x7f, 0x67, 0x9d, 0x02, 0x0c, 0x09, 0xf3, 0x32, 0xba, 0x64, 0xea, 0x61, 0xa3, 0xaa, + 0x6e, 0x01, 0xa9, 0x09, 0x71, 0x95, 0x41, 0xb1, 0x28, 0x75, 0x7f, 0xa9, 0x00, 0x23, 0xac, 0xa2, + 0x10, 0x37, 0xbb, 0x30, 0x58, 0xe7, 0x7c, 0xc4, 0x18, 0x5a, 0x08, 0xa9, 0xd3, 0x5b, 0xaf, 0x9d, + 0xe5, 0x38, 0x00, 0x4b, 0x7e, 0x94, 0xf5, 0x8e, 0xe7, 0x27, 0x94, 0x75, 0xe1, 0x78, 0x59, 0xdf, + 0xe4, 0x6c, 0xb0, 0xe4, 0xe7, 0xfe, 0x22, 0xb0, 0xdb, 0xb5, 0x0b, 0x0d, 0xaf, 0xc6, 0x47, 0x2e, + 0xdc, 0x22, 0x55, 0x21, 0x73, 0xb5, 0x91, 0xa3, 0x50, 0x2c, 0x4a, 0xf9, 0x8d, 0xc5, 0x24, 0xf2, + 0x55, 0x54, 0xae, 0x76, 0x63, 0x91, 0x81, 0x65, 0x0c, 0x76, 0xd5, 0xfd, 0x52, 0x01, 0x80, 0x25, + 0x29, 0xe3, 0x97, 0x62, 0x7f, 0x1e, 0x8a, 0xad, 0xba, 0x17, 0x67, 0x1d, 0x89, 0xc5, 0x35, 0x0a, + 0xbc, 0x2d, 0xae, 0xfd, 0xb2, 0x1f, 0x98, 0x23, 0xea, 0xc1, 0xf2, 0x85, 0x83, 0x83, 0xe5, 0x51, + 0x0b, 0x06, 0xc3, 0x76, 0x42, 0x75, 0x55, 0xb1, 0xd9, 0x5b, 0xf0, 0xa3, 0xaf, 0x72, 0x82, 0x3c, + 0xc2, 0x5c, 0xfc, 0xc0, 0x92, 0x0d, 0x7a, 0x06, 0x86, 0x5a, 0x51, 0x58, 0xa3, 0x7b, 0xb7, 0xd8, + 0xde, 0x1f, 0x94, 0xfa, 0xd0, 0x9a, 0x80, 0xdf, 0xd6, 0xfe, 0xc7, 0x0a, 0xdb, 0xfd, 0xe3, 0x09, + 0x3e, 0x2e, 0x62, 0xee, 0x4d, 0x42, 0xc1, 0x97, 0x96, 0x29, 0x10, 0x24, 0x0a, 0xd7, 0xe6, 0x71, + 0xc1, 0xaf, 0xaa, 0x75, 0x55, 0xe8, 0xba, 0xae, 0xde, 0x0d, 0x23, 0x55, 0x3f, 0x6e, 0x35, 0xbc, + 0xdd, 0xeb, 0x39, 0x66, 0xc1, 0xf9, 0xb4, 0x08, 0xeb, 0x78, 0xe8, 0x49, 0x71, 0x35, 0xa2, 0xdf, + 0x30, 0x05, 0xc9, 0xab, 0x11, 0xe9, 0xa5, 0x6b, 0x7e, 0x2b, 0x22, 0x7b, 0x39, 0xbd, 0xd8, 0xf3, + 0xe5, 0xf4, 0xac, 0x26, 0x36, 0x70, 0xf2, 0x9a, 0xd8, 0x7b, 0x61, 0x4c, 0xfe, 0x64, 0xea, 0x51, + 0xe9, 0x2c, 0x6b, 0xbd, 0x32, 0x57, 0xaf, 0xeb, 0x85, 0xd8, 0xc4, 0x4d, 0x27, 0xed, 0x60, 0xaf, + 0x93, 0xf6, 0x32, 0xc0, 0x46, 0xd8, 0x0e, 0xaa, 0x5e, 0xb4, 0x7b, 0x6d, 0x5e, 0x04, 0x52, 0x2a, + 0xc5, 0x6f, 0x56, 0x95, 0x60, 0x0d, 0x4b, 0x9f, 0xe8, 0xc3, 0x77, 0x98, 0xe8, 0x1f, 0x84, 0x61, + 0x16, 0x74, 0x4a, 0xaa, 0x33, 0x89, 0x08, 0x31, 0x3a, 0x4c, 0x7c, 0xa2, 0x92, 0xb9, 0x65, 0x49, + 0x04, 0xa7, 0xf4, 0xd0, 0x87, 0x00, 0x36, 0xfd, 0xc0, 0x8f, 0xeb, 0x8c, 0xfa, 0xc8, 0xa1, 0xa9, + 0xab, 0x7e, 0x2e, 0x28, 0x2a, 0x58, 0xa3, 0x88, 0x5e, 0x82, 0xd3, 0x24, 0x4e, 0xfc, 0xa6, 0x97, + 0x90, 0xaa, 0xba, 0x4c, 0x58, 0x62, 0xb6, 0x4c, 0x15, 0xf6, 0x7b, 0x25, 0x8b, 0x70, 0x3b, 0x0f, + 0x88, 0x3b, 0x09, 0x19, 0x2b, 0x72, 0xf2, 0x30, 0x2b, 0x12, 0xfd, 0xb9, 0x03, 0xa7, 0x23, 0xc2, + 0xe3, 0x4e, 0x62, 0xd5, 0xb0, 0x73, 0x4c, 0x1c, 0x57, 0x6c, 0xe4, 0xff, 0x56, 0x89, 0x3e, 0x70, + 0x96, 0x0b, 0x57, 0x5c, 0x88, 0xec, 0x7d, 0x47, 0xf9, 0xed, 0x3c, 0xe0, 0x9b, 0x6f, 0x4f, 0x4d, + 0x75, 0xe6, 0xa1, 0x57, 0xc4, 0xe9, 0xca, 0xfb, 0xfb, 0x6f, 0x4f, 0x4d, 0xc8, 0xdf, 0xe9, 0xa0, + 0x75, 0x74, 0x92, 0x6e, 0xab, 0xad, 0xb0, 0x7a, 0x6d, 0x4d, 0xc4, 0x82, 0xa9, 0x6d, 0x75, 0x8d, + 0x02, 0x31, 0x2f, 0x43, 0x8f, 0xc3, 0x50, 0xd5, 0x23, 0xcd, 0x30, 0x50, 0x99, 0x5c, 0x99, 0x36, + 0x3f, 0x2f, 0x60, 0x58, 0x95, 0xd2, 0x33, 0x44, 0x20, 0xb6, 0x94, 0xd2, 0x03, 0xb6, 0xce, 0x10, + 0x72, 0x93, 0xe2, 0x5c, 0xe5, 0x2f, 0xac, 0x38, 0xa1, 0x06, 0x0c, 0xf8, 0xcc, 0x50, 0x21, 0xc2, + 0x4d, 0x2d, 0x58, 0x47, 0xb8, 0xe1, 0x43, 0x06, 0x9b, 0x32, 0xd1, 0x2f, 0x78, 0xe8, 0x7b, 0xcd, + 0xa9, 0x93, 0xd9, 0x6b, 0x1e, 0x87, 0xa1, 0x4a, 0xdd, 0x6f, 0x54, 0x23, 0x12, 0x94, 0x26, 0xd8, + 0x89, 0x9d, 0x8d, 0xc4, 0x9c, 0x80, 0x61, 0x55, 0x8a, 0xfe, 0x16, 0x8c, 0x85, 0xed, 0x84, 0x89, + 0x16, 0x3a, 0x4e, 0x71, 0xe9, 0x34, 0x43, 0x67, 0xc1, 0x43, 0xab, 0x7a, 0x01, 0x36, 0xf1, 0xa8, + 0x88, 0xaf, 0x87, 0x31, 0xcb, 0x49, 0xc3, 0x44, 0xfc, 0x79, 0x53, 0xc4, 0x5f, 0xd5, 0xca, 0xb0, + 0x81, 0x89, 0xbe, 0xe2, 0xc0, 0xe9, 0x66, 0xf6, 0x00, 0x57, 0xba, 0xc0, 0x46, 0xa6, 0x6c, 0x43, + 0xd1, 0xcf, 0x90, 0xe6, 0x31, 0xd6, 0x1d, 0x60, 0xdc, 0xd9, 0x08, 0x96, 0x1d, 0x2a, 0xde, 0x0d, + 0x2a, 0xf5, 0x28, 0x0c, 0xcc, 0xe6, 0xdd, 0x6f, 0xeb, 0x4e, 0x14, 0x5b, 0xdb, 0x79, 0x2c, 0x66, + 0xef, 0xdf, 0xdf, 0x9b, 0x3a, 0x97, 0x5b, 0x84, 0xf3, 0x1b, 0x35, 0x39, 0x0f, 0xe7, 0xf3, 0xe5, + 0xc3, 0x9d, 0x4e, 0x1c, 0x7d, 0xfa, 0x89, 0x63, 0x01, 0xee, 0xef, 0xda, 0x28, 0xba, 0xd3, 0x48, + 0x6d, 0xd3, 0x31, 0x77, 0x9a, 0x0e, 0xed, 0x70, 0x1c, 0x46, 0xf5, 0x87, 0x0b, 0xdc, 0xff, 0xd7, + 0x07, 0x90, 0xda, 0xc9, 0x91, 0x07, 0xe3, 0xdc, 0x26, 0x7f, 0x6d, 0xfe, 0xc8, 0xb7, 0xb9, 0xe7, + 0x0c, 0x02, 0x38, 0x43, 0x10, 0x35, 0x01, 0x71, 0x08, 0xff, 0x7d, 0x14, 0xdf, 0x2a, 0x73, 0x45, + 0xce, 0x75, 0x10, 0xc1, 0x39, 0x84, 0x69, 0x8f, 0x92, 0x70, 0x8b, 0x04, 0x37, 0xf0, 0xf2, 0x51, + 0x52, 0x02, 0x70, 0x6f, 0x9c, 0x41, 0x00, 0x67, 0x08, 0x22, 0x17, 0x06, 0x98, 0x6d, 0x46, 0x06, + 0x68, 0x33, 0xf1, 0xc2, 0x34, 0x8d, 0x18, 0x8b, 0x12, 0xf4, 0x25, 0x07, 0xc6, 0x65, 0x66, 0x03, + 0x66, 0x0d, 0x95, 0xa1, 0xd9, 0x37, 0x6c, 0xf9, 0x39, 0xae, 0xe8, 0xd4, 0xd3, 0xc0, 0x47, 0x03, + 0x1c, 0xe3, 0x4c, 0x23, 0xdc, 0x17, 0xe0, 0x4c, 0x4e, 0x75, 0x2b, 0x27, 0xda, 0x6f, 0x3b, 0x30, + 0xa2, 0x25, 0xdc, 0x43, 0xaf, 0xc3, 0x70, 0x58, 0xb6, 0x1e, 0x6d, 0xb7, 0x5a, 0xee, 0x88, 0xb6, + 0x53, 0x20, 0x9c, 0x32, 0xec, 0x25, 0x48, 0x30, 0x37, 0x3b, 0xe0, 0x3d, 0x6e, 0xf6, 0xa1, 0x83, + 0x04, 0x7f, 0xa5, 0x08, 0x29, 0xa5, 0x43, 0x66, 0xdc, 0x48, 0x43, 0x0a, 0x0b, 0x07, 0x86, 0x14, + 0x56, 0xe1, 0x94, 0xc7, 0x7c, 0xc9, 0x47, 0xcc, 0xb3, 0xc1, 0xf3, 0xad, 0x9a, 0x14, 0x70, 0x96, + 0x24, 0xe5, 0x12, 0xa7, 0x55, 0x19, 0x97, 0xfe, 0x43, 0x73, 0x29, 0x9b, 0x14, 0x70, 0x96, 0x24, + 0x7a, 0x09, 0x4a, 0x15, 0x76, 0x6f, 0x94, 0xf7, 0xf1, 0xda, 0xe6, 0xf5, 0x30, 0x59, 0x8b, 0x48, + 0x4c, 0x82, 0x44, 0x64, 0xd4, 0x7a, 0x58, 0x8c, 0x42, 0x69, 0xae, 0x0b, 0x1e, 0xee, 0x4a, 0x81, + 0x1e, 0x53, 0x98, 0x33, 0xda, 0x4f, 0x76, 0x99, 0x10, 0x11, 0x5e, 0x7a, 0x75, 0x4c, 0x29, 0xeb, + 0x85, 0xd8, 0xc4, 0x45, 0xbf, 0xec, 0xc0, 0x58, 0x43, 0x9a, 0xeb, 0x71, 0xbb, 0x21, 0xd3, 0x43, + 0x62, 0x2b, 0xd3, 0x6f, 0x59, 0xa7, 0xcc, 0x75, 0x09, 0x03, 0x84, 0x4d, 0xde, 0xd9, 0xa4, 0x27, + 0x43, 0x3d, 0x26, 0x3d, 0xf9, 0x9e, 0x03, 0x13, 0x59, 0x6e, 0x68, 0x0b, 0x1e, 0x6a, 0x7a, 0xd1, + 0xd6, 0xb5, 0x60, 0x33, 0x62, 0x17, 0x31, 0x12, 0x3e, 0x19, 0x66, 0x36, 0x13, 0x12, 0xcd, 0x7b, + 0xbb, 0xdc, 0xfd, 0x59, 0x54, 0xef, 0x0b, 0x3d, 0xb4, 0x72, 0x10, 0x32, 0x3e, 0x98, 0x16, 0x2a, + 0xc3, 0x39, 0x8a, 0xc0, 0x72, 0xa2, 0xf9, 0x61, 0x90, 0x32, 0x29, 0x30, 0x26, 0x2a, 0x18, 0x70, + 0x25, 0x0f, 0x09, 0xe7, 0xd7, 0x75, 0xaf, 0xc0, 0x00, 0xbf, 0x84, 0x76, 0x57, 0xfe, 0x23, 0xf7, + 0x3f, 0x14, 0x40, 0x2a, 0x86, 0x7f, 0xbd, 0xdd, 0x71, 0x74, 0x13, 0x8d, 0x98, 0x49, 0x49, 0x58, + 0x3b, 0xd8, 0x26, 0x2a, 0xb2, 0x0f, 0x8a, 0x12, 0xaa, 0x31, 0x93, 0x5b, 0x7e, 0x32, 0x17, 0x56, + 0xa5, 0x8d, 0x83, 0x69, 0xcc, 0x57, 0x04, 0x0c, 0xab, 0x52, 0xf7, 0x13, 0x0e, 0x8c, 0xd1, 0x5e, + 0x36, 0x1a, 0xa4, 0x51, 0x4e, 0x48, 0x2b, 0x46, 0x31, 0x14, 0x63, 0xfa, 0x8f, 0x3d, 0x53, 0x60, + 0x7a, 0x71, 0x91, 0xb4, 0x34, 0x67, 0x0d, 0x65, 0x82, 0x39, 0x2f, 0xf7, 0xad, 0x3e, 0x18, 0x56, + 0x83, 0xdd, 0x83, 0x3d, 0xf5, 0x72, 0x9a, 0x18, 0x94, 0x4b, 0xe0, 0x92, 0x96, 0x14, 0xf4, 0x36, + 0x1d, 0xba, 0x60, 0x97, 0x67, 0x48, 0x48, 0x33, 0x84, 0x3e, 0x69, 0xba, 0x9a, 0xcf, 0xeb, 0xf3, + 0x4f, 0xc3, 0x17, 0x3e, 0xe7, 0x5b, 0xba, 0xa7, 0xbf, 0xdf, 0xd6, 0x6e, 0xa6, 0xdc, 0x98, 0xdd, + 0x5d, 0xfc, 0x99, 0x37, 0x63, 0x8a, 0x3d, 0xbd, 0x19, 0xf3, 0x04, 0xf4, 0x93, 0xa0, 0xdd, 0x64, + 0xaa, 0xd2, 0x30, 0x3b, 0x22, 0xf4, 0x5f, 0x09, 0xda, 0x4d, 0xb3, 0x67, 0x0c, 0x05, 0xbd, 0x0f, + 0x46, 0xaa, 0x24, 0xae, 0x44, 0x3e, 0xbb, 0xf6, 0x2f, 0x2c, 0x3b, 0x0f, 0x32, 0x73, 0x59, 0x0a, + 0x36, 0x2b, 0xea, 0x15, 0xdc, 0x57, 0x61, 0x60, 0xad, 0xd1, 0xae, 0xf9, 0x01, 0x6a, 0xc1, 0x00, + 0x4f, 0x02, 0x20, 0x76, 0x7b, 0x0b, 0xe7, 0x4e, 0x2e, 0x2a, 0xb4, 0x28, 0x14, 0x7e, 0x7f, 0x55, + 0xf0, 0x71, 0x7f, 0xab, 0x00, 0xf4, 0x68, 0xbe, 0x38, 0x87, 0xfe, 0x6e, 0xc7, 0x13, 0x29, 0x3f, + 0x93, 0xf3, 0x44, 0xca, 0x18, 0x43, 0xce, 0x79, 0x1d, 0xa5, 0x01, 0x63, 0xcc, 0x39, 0x22, 0xf7, + 0x40, 0xa1, 0x56, 0x3f, 0xdd, 0xe3, 0xbd, 0x79, 0xbd, 0xaa, 0xd8, 0x11, 0x74, 0x10, 0x36, 0x89, + 0xa3, 0x5d, 0x38, 0xc3, 0xf3, 0x4b, 0xce, 0x93, 0x86, 0xb7, 0x6b, 0xe4, 0x91, 0xea, 0xf9, 0xae, + 0xbe, 0xac, 0xc5, 0x03, 0xbc, 0xe7, 0x3b, 0xc9, 0xe1, 0x3c, 0x1e, 0xee, 0xef, 0xf5, 0x83, 0xe6, + 0xbe, 0xe8, 0x61, 0x65, 0xbd, 0x92, 0x71, 0x56, 0xad, 0x58, 0x71, 0x56, 0x49, 0x0f, 0x10, 0x97, + 0x56, 0xa6, 0x7f, 0x8a, 0x36, 0xaa, 0x4e, 0x1a, 0x2d, 0xb1, 0x2e, 0x55, 0xa3, 0xae, 0x92, 0x46, + 0x0b, 0xb3, 0x12, 0x75, 0xf9, 0xb0, 0xbf, 0xeb, 0xe5, 0xc3, 0x3a, 0x14, 0x6b, 0x5e, 0xbb, 0x46, + 0x44, 0xb4, 0xa6, 0x05, 0xbf, 0x24, 0xbb, 0x0e, 0xc1, 0xfd, 0x92, 0xec, 0x5f, 0xcc, 0x19, 0x50, + 0xc1, 0x50, 0x97, 0xe1, 0x2b, 0xc2, 0xa0, 0x6b, 0x41, 0x30, 0xa8, 0x88, 0x18, 0x2e, 0x18, 0xd4, + 0x4f, 0x9c, 0x32, 0x43, 0x2d, 0x18, 0xac, 0xf0, 0x4c, 0x1f, 0x42, 0xbf, 0xb9, 0x66, 0xe3, 0x76, + 0x25, 0x23, 0xc8, 0x2d, 0x2f, 0xe2, 0x07, 0x96, 0x6c, 0xdc, 0x4b, 0x30, 0xa2, 0xbd, 0xea, 0x40, + 0x3f, 0x83, 0x4a, 0x32, 0xa1, 0x7d, 0x86, 0x79, 0x2f, 0xf1, 0x30, 0x2b, 0x71, 0xbf, 0xd1, 0x0f, + 0xca, 0xee, 0xa6, 0xdf, 0x05, 0xf4, 0x2a, 0x5a, 0x4a, 0x1c, 0xe3, 0x12, 0x7a, 0x18, 0x60, 0x51, + 0x4a, 0x75, 0xc0, 0x26, 0x89, 0x6a, 0xea, 0xcc, 0x2d, 0x44, 0xbb, 0xd2, 0x01, 0x57, 0xf4, 0x42, + 0x6c, 0xe2, 0x52, 0x05, 0xbe, 0x29, 0xdc, 0xf9, 0xd9, 0x60, 0x69, 0xe9, 0xe6, 0xc7, 0x0a, 0x03, + 0x7d, 0xc2, 0x81, 0xd1, 0xa6, 0xe6, 0xfd, 0x17, 0x41, 0x9b, 0x36, 0x9c, 0x4f, 0x1a, 0x55, 0x1e, + 0x5c, 0xa5, 0x43, 0xb0, 0xc1, 0x15, 0x2d, 0xc2, 0xe9, 0x98, 0x24, 0xab, 0x3b, 0x01, 0x89, 0xd4, + 0x1d, 0x7d, 0x91, 0xb4, 0x41, 0xdd, 0x94, 0x28, 0x67, 0x11, 0x70, 0x67, 0x9d, 0xdc, 0x38, 0xd7, + 0xe2, 0xa1, 0xe3, 0x5c, 0xe7, 0x61, 0x62, 0xd3, 0xf3, 0x1b, 0xed, 0x88, 0x74, 0x8d, 0x96, 0x5d, + 0xc8, 0x94, 0xe3, 0x8e, 0x1a, 0xec, 0xb2, 0x4e, 0xc3, 0xab, 0xc5, 0xa5, 0x41, 0xed, 0xb2, 0x0e, + 0x05, 0x60, 0x0e, 0x77, 0x7f, 0xc3, 0x01, 0x9e, 0x2d, 0x67, 0x66, 0x73, 0xd3, 0x0f, 0xfc, 0x64, + 0x17, 0x7d, 0xd5, 0x81, 0x89, 0x20, 0xac, 0x92, 0x99, 0x20, 0xf1, 0x25, 0xd0, 0x5e, 0x0a, 0x73, + 0xc6, 0xeb, 0x7a, 0x86, 0x3c, 0x4f, 0xbd, 0x90, 0x85, 0xe2, 0x8e, 0x66, 0xb8, 0x17, 0xe0, 0x5c, + 0x2e, 0x01, 0xf7, 0x7b, 0x7d, 0x60, 0x26, 0xfd, 0x41, 0xcf, 0x41, 0xb1, 0xc1, 0xd2, 0x50, 0x38, + 0x47, 0xcc, 0xe6, 0xc4, 0xc6, 0x8a, 0xe7, 0xa9, 0xe0, 0x94, 0xd0, 0x3c, 0x8c, 0xb0, 0x4c, 0x42, + 0x22, 0x49, 0x08, 0x5f, 0x11, 0x6e, 0xfa, 0x72, 0x9a, 0x2a, 0xba, 0x6d, 0xfe, 0xc4, 0x7a, 0x35, + 0xf4, 0x1a, 0x0c, 0x6e, 0xf0, 0x7c, 0x8a, 0xf6, 0xfc, 0x83, 0x22, 0x41, 0x23, 0xd3, 0xa3, 0x64, + 0xb6, 0xc6, 0xdb, 0xe9, 0xbf, 0x58, 0x72, 0x44, 0xbb, 0x30, 0xe4, 0xc9, 0x6f, 0xda, 0x6f, 0xeb, + 0xf2, 0x85, 0x31, 0x7f, 0x44, 0x74, 0x8d, 0xfc, 0x86, 0x8a, 0x5d, 0x26, 0x0c, 0xa9, 0xd8, 0x53, + 0x18, 0xd2, 0xb7, 0x1c, 0x80, 0xf4, 0xf1, 0x09, 0x74, 0x0b, 0x86, 0xe2, 0xa7, 0x0d, 0xa3, 0x86, + 0x8d, 0x5b, 0xf7, 0x82, 0xa2, 0x76, 0x33, 0x55, 0x40, 0xb0, 0xe2, 0x76, 0x27, 0x43, 0xcc, 0x4f, + 0x1c, 0x38, 0x9b, 0xf7, 0x48, 0xc6, 0x3d, 0x6c, 0xf1, 0x61, 0x6d, 0x30, 0xa2, 0xc2, 0x5a, 0x44, + 0x36, 0xfd, 0x5b, 0x39, 0x59, 0x7d, 0x79, 0x01, 0x4e, 0x71, 0xdc, 0x37, 0x07, 0x41, 0x31, 0x3e, + 0x26, 0x9b, 0xcd, 0x63, 0xf4, 0x7c, 0x55, 0x4b, 0x2f, 0x4b, 0x2a, 0x3c, 0xcc, 0xa0, 0x58, 0x94, + 0xd2, 0x33, 0x96, 0x0c, 0xa0, 0x17, 0x22, 0x9b, 0xcd, 0x42, 0x19, 0x68, 0x8f, 0x55, 0x69, 0x9e, + 0x15, 0xa8, 0x78, 0x22, 0x56, 0xa0, 0x01, 0xfb, 0x56, 0xa0, 0x27, 0x60, 0x30, 0x0a, 0x1b, 0x64, + 0x06, 0x5f, 0x17, 0x27, 0x87, 0x34, 0x00, 0x82, 0x83, 0xb1, 0x2c, 0x3f, 0xa2, 0x1d, 0x04, 0xfd, + 0xb6, 0x73, 0x80, 0xa1, 0x69, 0xd8, 0xd6, 0x9e, 0x90, 0x9b, 0x02, 0x8d, 0x1d, 0x83, 0x8e, 0x62, + 0xbd, 0xfa, 0x9a, 0x03, 0xa7, 0x49, 0x50, 0x89, 0x76, 0x19, 0x1d, 0x41, 0x4d, 0xf8, 0xa7, 0x6f, + 0xd8, 0x58, 0x7c, 0x57, 0xb2, 0xc4, 0xb9, 0x1b, 0xa8, 0x03, 0x8c, 0x3b, 0x9b, 0x81, 0x56, 0x61, + 0xa8, 0xe2, 0x89, 0x19, 0x31, 0x72, 0x98, 0x19, 0xc1, 0xbd, 0x6c, 0x33, 0x62, 0x2a, 0x28, 0x22, + 0xee, 0x8f, 0x0a, 0x70, 0x26, 0xa7, 0x49, 0xec, 0xb2, 0x55, 0x93, 0xce, 0xc8, 0x6b, 0xd5, 0xec, + 0x7a, 0x5c, 0x12, 0x70, 0xac, 0x30, 0xd0, 0x1a, 0x9c, 0xdd, 0x6a, 0xc6, 0x29, 0x95, 0xb9, 0x30, + 0x48, 0xc8, 0x2d, 0xb9, 0x3a, 0xa5, 0xef, 0xfa, 0xec, 0x52, 0x0e, 0x0e, 0xce, 0xad, 0x49, 0xd5, + 0x17, 0x12, 0x78, 0x1b, 0x0d, 0x92, 0x16, 0x89, 0xab, 0x82, 0x4a, 0x7d, 0xb9, 0x92, 0x29, 0xc7, + 0x1d, 0x35, 0xd0, 0x67, 0x1c, 0x78, 0x20, 0x26, 0xd1, 0x36, 0x89, 0xca, 0x7e, 0x95, 0xcc, 0xb5, + 0xe3, 0x24, 0x6c, 0x92, 0xe8, 0x88, 0xa6, 0xd5, 0xa9, 0xfd, 0xbd, 0xa9, 0x07, 0xca, 0xdd, 0xa9, + 0xe1, 0x83, 0x58, 0xb9, 0x9f, 0x71, 0x60, 0xbc, 0xcc, 0x0e, 0xde, 0x4a, 0x97, 0xb6, 0x9d, 0x04, + 0xf3, 0x31, 0x95, 0xdc, 0x22, 0x23, 0x15, 0xcd, 0x74, 0x14, 0xee, 0xcb, 0x30, 0x51, 0x26, 0x4d, + 0xaf, 0x55, 0x67, 0xf7, 0x7c, 0x79, 0xec, 0xd6, 0x25, 0x18, 0x8e, 0x25, 0x2c, 0xfb, 0xee, 0x8d, + 0x42, 0xc6, 0x29, 0x0e, 0x7a, 0x94, 0xc7, 0x99, 0xc9, 0xdb, 0x42, 0xc3, 0xfc, 0xd4, 0xc1, 0x83, + 0xd3, 0x62, 0x2c, 0xcb, 0xdc, 0xb7, 0x1c, 0x18, 0x4d, 0xeb, 0x93, 0x4d, 0x54, 0x83, 0x53, 0x15, + 0xed, 0xa6, 0x5d, 0x7a, 0xc7, 0xa1, 0xf7, 0x4b, 0x79, 0x3c, 0x37, 0xaf, 0x49, 0x04, 0x67, 0xa9, + 0x1e, 0x3e, 0x4c, 0xef, 0xf3, 0x05, 0x38, 0xa5, 0x9a, 0x2a, 0x9c, 0x8c, 0x6f, 0x64, 0xa3, 0xe9, + 0xb0, 0x8d, 0x34, 0x3d, 0xe6, 0xd8, 0x1f, 0x10, 0x51, 0xf7, 0x46, 0x36, 0xa2, 0xee, 0x58, 0xd9, + 0x77, 0xf8, 0x4d, 0xbf, 0x55, 0x80, 0x21, 0x95, 0x34, 0xe8, 0x39, 0x28, 0xb2, 0xa3, 0xe4, 0xdd, + 0x29, 0xc4, 0xec, 0x58, 0x8a, 0x39, 0x25, 0x4a, 0x92, 0x45, 0xec, 0x1c, 0x39, 0x63, 0xea, 0x30, + 0x37, 0x3e, 0x7a, 0x51, 0x82, 0x39, 0x25, 0xb4, 0x04, 0x7d, 0x24, 0xa8, 0x0a, 0xcd, 0xf8, 0xf0, + 0x04, 0xd9, 0x0b, 0x55, 0x57, 0x82, 0x2a, 0xa6, 0x54, 0x58, 0xda, 0x4e, 0xae, 0x00, 0x65, 0xde, + 0x23, 0x11, 0xda, 0x8f, 0x28, 0x75, 0x7f, 0xb9, 0x0f, 0x06, 0xca, 0xed, 0x0d, 0xaa, 0xe3, 0x7f, + 0xd3, 0x81, 0x33, 0x3b, 0x99, 0x74, 0xc2, 0xe9, 0x1c, 0xbf, 0x61, 0xcf, 0x00, 0xab, 0x47, 0x8d, + 0x3d, 0x20, 0x1f, 0x5b, 0xcf, 0x29, 0xc4, 0x79, 0xcd, 0x31, 0x32, 0x7a, 0xf6, 0x1d, 0x4b, 0x46, + 0xcf, 0x5b, 0xc7, 0x7c, 0x6d, 0x62, 0xac, 0xdb, 0x95, 0x09, 0xf7, 0xf7, 0x8a, 0x00, 0xfc, 0x6b, + 0xac, 0xb6, 0x92, 0x5e, 0xcc, 0x64, 0xcf, 0xc0, 0x68, 0x8d, 0x04, 0x24, 0x92, 0x31, 0x81, 0x99, + 0xa7, 0x6e, 0x16, 0xb5, 0x32, 0x6c, 0x60, 0xb2, 0x33, 0x49, 0x90, 0x44, 0xbb, 0x5c, 0x6f, 0xcd, + 0x5e, 0x8d, 0x50, 0x25, 0x58, 0xc3, 0x42, 0xd3, 0x86, 0xc7, 0x83, 0x3b, 0xcf, 0xc7, 0x0f, 0x70, + 0x50, 0xbc, 0x0f, 0xc6, 0xcd, 0x3c, 0x23, 0x42, 0x59, 0x53, 0xce, 0x6e, 0x33, 0x3d, 0x09, 0xce, + 0x60, 0xd3, 0x49, 0x5c, 0x8d, 0x76, 0x71, 0x3b, 0x10, 0x5a, 0x9b, 0x9a, 0xc4, 0xf3, 0x0c, 0x8a, + 0x45, 0x29, 0x4b, 0xd0, 0xc0, 0xf6, 0x2f, 0x0e, 0x17, 0x49, 0x1e, 0xd2, 0x04, 0x0d, 0x5a, 0x19, + 0x36, 0x30, 0x29, 0x07, 0x61, 0x66, 0x04, 0x73, 0x99, 0x64, 0x6c, 0x83, 0x2d, 0x18, 0x0f, 0x4d, + 0xf3, 0x08, 0x57, 0x61, 0xde, 0xd5, 0xe3, 0xd4, 0x33, 0xea, 0xf2, 0x20, 0x85, 0x8c, 0x35, 0x25, + 0x43, 0x9f, 0xaa, 0xad, 0xfa, 0x0d, 0x82, 0x51, 0x33, 0xa4, 0xb4, 0x6b, 0x90, 0xff, 0x1a, 0x9c, + 0x6d, 0x85, 0xd5, 0xb5, 0xc8, 0x0f, 0x23, 0x3f, 0xd9, 0x9d, 0x6b, 0x78, 0x71, 0xcc, 0x26, 0xc6, + 0x98, 0xa9, 0xce, 0xac, 0xe5, 0xe0, 0xe0, 0xdc, 0x9a, 0xf4, 0x80, 0xd1, 0x12, 0x40, 0x16, 0xd8, + 0x55, 0xe4, 0x0a, 0x99, 0x44, 0xc4, 0xaa, 0xd4, 0x3d, 0x03, 0xa7, 0xcb, 0xed, 0x56, 0xab, 0xe1, + 0x93, 0xaa, 0xf2, 0x28, 0xb8, 0xef, 0x87, 0x53, 0x22, 0xdf, 0xa7, 0x52, 0x1e, 0x0e, 0x95, 0x9d, + 0xda, 0xfd, 0x73, 0x07, 0x4e, 0x65, 0xc2, 0x68, 0xd0, 0x6b, 0xd9, 0x2d, 0xdf, 0x8a, 0xcd, 0x4c, + 0xdf, 0xec, 0xf9, 0x22, 0xcd, 0x55, 0x1f, 0xea, 0x32, 0x06, 0xde, 0xda, 0xdd, 0x13, 0x16, 0x29, + 0xce, 0x77, 0x04, 0x3d, 0x90, 0xde, 0xfd, 0x74, 0x01, 0xf2, 0x63, 0x97, 0xd0, 0x47, 0x3b, 0x07, + 0xe0, 0x39, 0x8b, 0x03, 0x20, 0x82, 0xa7, 0xba, 0x8f, 0x41, 0x60, 0x8e, 0xc1, 0x8a, 0xa5, 0x31, + 0x10, 0x7c, 0x3b, 0x47, 0xe2, 0x7f, 0x3b, 0x30, 0xb2, 0xbe, 0xbe, 0xac, 0x4c, 0x5c, 0x18, 0xce, + 0xc7, 0xfc, 0x6e, 0x3d, 0x73, 0xf1, 0xce, 0x85, 0xcd, 0x16, 0xf7, 0xf8, 0x0a, 0x4f, 0x34, 0x4b, + 0xbd, 0x5a, 0xce, 0xc5, 0xc0, 0x5d, 0x6a, 0xa2, 0x6b, 0x70, 0x46, 0x2f, 0x29, 0x6b, 0x2f, 0xdd, + 0x15, 0x45, 0x3e, 0x9b, 0xce, 0x62, 0x9c, 0x57, 0x27, 0x4b, 0x4a, 0x58, 0x2b, 0xd9, 0x76, 0x95, + 0x43, 0x4a, 0x14, 0xe3, 0xbc, 0x3a, 0xee, 0x2a, 0x8c, 0xac, 0x7b, 0x91, 0xea, 0xf8, 0x07, 0x60, + 0xa2, 0x12, 0x36, 0xa5, 0x95, 0x68, 0x99, 0x6c, 0x93, 0x86, 0xe8, 0x32, 0x7f, 0x5e, 0x22, 0x53, + 0x86, 0x3b, 0xb0, 0xdd, 0xff, 0x71, 0x11, 0xd4, 0x5d, 0xc1, 0x1e, 0x76, 0x98, 0x96, 0x8a, 0xea, + 0x2c, 0x5a, 0x8e, 0xea, 0x54, 0xb2, 0x36, 0x13, 0xd9, 0x99, 0xa4, 0x91, 0x9d, 0x03, 0xb6, 0x23, + 0x3b, 0x95, 0xc2, 0xd8, 0x11, 0xdd, 0xf9, 0x65, 0x07, 0x46, 0x83, 0xb0, 0x4a, 0x94, 0x2b, 0x6e, + 0x90, 0x69, 0xad, 0x2f, 0xd9, 0x0b, 0x92, 0xe7, 0x51, 0x8a, 0x82, 0x3c, 0x8f, 0x38, 0x56, 0x5b, + 0x94, 0x5e, 0x84, 0x8d, 0x76, 0xa0, 0x05, 0xcd, 0x6e, 0xc9, 0xdd, 0x03, 0x0f, 0xe6, 0x1d, 0x37, + 0xee, 0x68, 0x84, 0xbc, 0xa5, 0xe9, 0x4d, 0xc3, 0xb6, 0xec, 0x71, 0xf2, 0x02, 0x98, 0xe6, 0xe5, + 0x90, 0xd9, 0x83, 0x53, 0x7d, 0xca, 0x85, 0x01, 0x1e, 0x9a, 0x2c, 0x32, 0x27, 0x31, 0xe7, 0x1b, + 0x0f, 0x5b, 0xc6, 0xa2, 0x04, 0x25, 0xd2, 0xdd, 0x3f, 0x62, 0xeb, 0x2d, 0x00, 0x23, 0x9c, 0x20, + 0xdf, 0xdf, 0x8f, 0x9e, 0xd5, 0x8f, 0xb1, 0xa3, 0xbd, 0x1c, 0x63, 0xc7, 0xba, 0x1e, 0x61, 0x3f, + 0xe7, 0xc0, 0x68, 0x45, 0xcb, 0xcd, 0x5f, 0x7a, 0xdc, 0xd6, 0x1b, 0xc4, 0x79, 0x4f, 0x28, 0x70, + 0x9f, 0x8e, 0xf1, 0x16, 0x80, 0xc1, 0x9d, 0xa5, 0x8b, 0x64, 0x67, 0x76, 0xb6, 0xf5, 0x5b, 0xc9, + 0x10, 0x61, 0xda, 0x00, 0x64, 0xd8, 0x24, 0x85, 0x61, 0xc1, 0x0b, 0xbd, 0x0e, 0x43, 0x32, 0xba, + 0x5d, 0x44, 0x81, 0x63, 0x1b, 0x46, 0x76, 0xd3, 0x93, 0x27, 0x73, 0xcc, 0x71, 0x28, 0x56, 0x1c, + 0x51, 0x1d, 0xfa, 0xaa, 0x5e, 0x4d, 0xc4, 0x83, 0xaf, 0xd8, 0xc9, 0xe1, 0x29, 0x79, 0xb2, 0xe3, + 0xd5, 0xfc, 0xcc, 0x22, 0xa6, 0x2c, 0xd0, 0xad, 0x34, 0xb9, 0xf9, 0x84, 0xb5, 0xdd, 0xd7, 0x54, + 0x93, 0xb8, 0x55, 0xa2, 0x23, 0x57, 0x7a, 0x55, 0x38, 0x3f, 0xff, 0x06, 0x63, 0xbb, 0x60, 0x27, + 0x09, 0x28, 0xcf, 0x38, 0x92, 0x3a, 0x50, 0x29, 0x97, 0x7a, 0x92, 0xb4, 0x4a, 0x3f, 0x6b, 0x8b, + 0x0b, 0xcb, 0x9b, 0xc1, 0x9f, 0x8b, 0x5e, 0x5f, 0x5f, 0xc3, 0x8c, 0x3a, 0x6a, 0xc0, 0x40, 0x8b, + 0xc5, 0x70, 0x94, 0x7e, 0xce, 0xd6, 0xde, 0xc2, 0x63, 0x42, 0xf8, 0xdc, 0xe4, 0xff, 0x63, 0xc1, + 0x03, 0x5d, 0x81, 0x41, 0xfe, 0x46, 0x07, 0x8f, 0xc7, 0x1f, 0xb9, 0x3c, 0xd9, 0xfd, 0xa5, 0x8f, + 0x74, 0xa3, 0xe0, 0xbf, 0x63, 0x2c, 0xeb, 0xa2, 0xcf, 0x3b, 0x30, 0x4e, 0x25, 0x6a, 0xfa, 0xa8, + 0x48, 0x09, 0xd9, 0x92, 0x59, 0x37, 0x62, 0xaa, 0x91, 0x48, 0x59, 0xa3, 0x8e, 0x49, 0xd7, 0x0c, + 0x76, 0x38, 0xc3, 0x1e, 0xbd, 0x01, 0x43, 0xb1, 0x5f, 0x25, 0x15, 0x2f, 0x8a, 0x4b, 0x67, 0x8e, + 0xa7, 0x29, 0xa9, 0xbb, 0x45, 0x30, 0xc2, 0x8a, 0x65, 0xee, 0xdb, 0xff, 0x67, 0xef, 0xf1, 0xdb, + 0xff, 0x7f, 0xcf, 0x81, 0x73, 0x3c, 0xa7, 0x7c, 0xf6, 0x41, 0x81, 0x73, 0x47, 0x34, 0xaf, 0xb0, + 0x8b, 0x04, 0x33, 0x79, 0x24, 0x71, 0x3e, 0x27, 0x96, 0x94, 0xd6, 0x7c, 0x03, 0xe6, 0xbc, 0x55, + 0xb7, 0x63, 0xef, 0xef, 0xbe, 0xa0, 0xa7, 0x60, 0xa4, 0x25, 0xb6, 0x43, 0x3f, 0x6e, 0xb2, 0x6b, + 0x21, 0x7d, 0xfc, 0xc2, 0xde, 0x5a, 0x0a, 0xc6, 0x3a, 0x8e, 0x91, 0xa1, 0xf8, 0x89, 0x83, 0x32, + 0x14, 0xa3, 0x1b, 0x30, 0x92, 0x84, 0x0d, 0x91, 0xa4, 0x33, 0x2e, 0x95, 0xd8, 0x0c, 0xbc, 0x98, + 0xb7, 0xb6, 0xd6, 0x15, 0x5a, 0x7a, 0x92, 0x4d, 0x61, 0x31, 0xd6, 0xe9, 0xb0, 0x50, 0x5c, 0x91, + 0xab, 0x3f, 0x62, 0x47, 0xd8, 0xfb, 0x33, 0xa1, 0xb8, 0x7a, 0x21, 0x36, 0x71, 0xd1, 0x22, 0x9c, + 0x6e, 0x75, 0x9c, 0x81, 0xf9, 0x75, 0x34, 0x15, 0xd1, 0xd0, 0x79, 0x00, 0xee, 0xac, 0x63, 0x9c, + 0x7e, 0x1f, 0x38, 0xe8, 0xf4, 0xdb, 0x25, 0x5f, 0xef, 0x83, 0x47, 0xc9, 0xd7, 0x8b, 0xaa, 0xf0, + 0xa0, 0xd7, 0x4e, 0x42, 0x96, 0x1b, 0xc6, 0xac, 0xc2, 0xa3, 0x92, 0x1f, 0xe6, 0x81, 0xce, 0xfb, + 0x7b, 0x53, 0x0f, 0xce, 0x1c, 0x80, 0x87, 0x0f, 0xa4, 0x82, 0x5e, 0x85, 0x21, 0x22, 0x72, 0x0e, + 0x97, 0x7e, 0xc6, 0x96, 0x92, 0x60, 0x66, 0x31, 0x96, 0x01, 0x9f, 0x1c, 0x86, 0x15, 0x3f, 0xb4, + 0x0e, 0x23, 0xf5, 0x30, 0x4e, 0x66, 0x1a, 0xbe, 0x17, 0x93, 0xb8, 0xf4, 0x10, 0x9b, 0x34, 0xb9, + 0xba, 0xd7, 0x55, 0x89, 0x96, 0xce, 0x99, 0xab, 0x69, 0x4d, 0xac, 0x93, 0x41, 0x84, 0x39, 0x1f, + 0x59, 0x48, 0xb6, 0xf4, 0xe3, 0x5c, 0x64, 0x1d, 0x7b, 0x2c, 0x8f, 0xf2, 0x5a, 0x58, 0x2d, 0x9b, + 0xd8, 0xca, 0xfb, 0xa8, 0x03, 0x71, 0x96, 0x26, 0x7a, 0x06, 0x46, 0x5b, 0x61, 0xb5, 0xdc, 0x22, + 0x95, 0x35, 0x2f, 0xa9, 0xd4, 0x4b, 0x53, 0xa6, 0xd5, 0x6d, 0x4d, 0x2b, 0xc3, 0x06, 0x26, 0x6a, + 0xc1, 0x60, 0x93, 0x27, 0x0d, 0x28, 0x3d, 0x62, 0xeb, 0x6c, 0x23, 0xb2, 0x10, 0x70, 0x7d, 0x41, + 0xfc, 0xc0, 0x92, 0x0d, 0xfa, 0x67, 0x0e, 0x9c, 0xca, 0x5c, 0x74, 0x2a, 0xbd, 0xc3, 0x9a, 0xca, + 0x62, 0x12, 0x9e, 0x7d, 0x8c, 0x0d, 0x9f, 0x09, 0xbc, 0xdd, 0x09, 0xc2, 0xd9, 0x16, 0xf1, 0x71, + 0x61, 0x99, 0x3f, 0x4a, 0x8f, 0xda, 0x1b, 0x17, 0x46, 0x50, 0x8e, 0x0b, 0xfb, 0x81, 0x25, 0x1b, + 0xf4, 0x04, 0x0c, 0x8a, 0x24, 0x7d, 0xa5, 0xc7, 0x4c, 0x0f, 0xb2, 0xc8, 0xe5, 0x87, 0x65, 0xf9, + 0xe4, 0xfb, 0xe1, 0x74, 0xc7, 0xd1, 0xed, 0x50, 0xe9, 0x27, 0x7e, 0xdd, 0x01, 0xfd, 0x66, 0xb4, + 0xf5, 0x87, 0x3e, 0x9e, 0x81, 0xd1, 0x0a, 0x7f, 0x0e, 0x90, 0xdf, 0xad, 0xee, 0x37, 0xed, 0x9f, + 0x73, 0x5a, 0x19, 0x36, 0x30, 0xdd, 0xab, 0x80, 0x3a, 0xb3, 0xb0, 0x1f, 0x29, 0xb7, 0xd1, 0xbf, + 0x70, 0x60, 0xcc, 0xd0, 0x19, 0xac, 0xfb, 0x08, 0x17, 0x00, 0x35, 0xfd, 0x28, 0x0a, 0x23, 0xfd, + 0xdd, 0x35, 0x91, 0xff, 0x80, 0xdd, 0x30, 0x5b, 0xe9, 0x28, 0xc5, 0x39, 0x35, 0xdc, 0xdf, 0xea, + 0x87, 0x34, 0xe2, 0x59, 0xa5, 0xb9, 0x75, 0xba, 0xa6, 0xb9, 0x7d, 0x12, 0x86, 0x5e, 0x8e, 0xc3, + 0x60, 0x2d, 0x4d, 0x86, 0xab, 0xbe, 0xc5, 0xb3, 0xe5, 0xd5, 0xeb, 0x0c, 0x53, 0x61, 0x30, 0xec, + 0x57, 0x16, 0xfc, 0x46, 0xd2, 0x99, 0x2d, 0xf5, 0xd9, 0xe7, 0x38, 0x1c, 0x2b, 0x0c, 0xf6, 0x04, + 0xdb, 0x36, 0x51, 0x86, 0xf1, 0xf4, 0x09, 0x36, 0xfe, 0xc0, 0x02, 0x2b, 0x43, 0x97, 0x60, 0x58, + 0x19, 0xd5, 0x85, 0xa5, 0x5e, 0x8d, 0x94, 0xb2, 0xbc, 0xe3, 0x14, 0x87, 0x29, 0x84, 0xc2, 0x10, + 0x2b, 0x4c, 0x28, 0x65, 0x1b, 0xc7, 0x93, 0x8c, 0x69, 0x97, 0xcb, 0x76, 0x09, 0xc6, 0x8a, 0x65, + 0x9e, 0x9f, 0x74, 0xf8, 0x58, 0xfc, 0xa4, 0x5a, 0xf8, 0x7d, 0xb1, 0xd7, 0xf0, 0x7b, 0x73, 0x6e, + 0x0f, 0xf5, 0x34, 0xb7, 0x3f, 0xd9, 0x07, 0x83, 0xcf, 0x93, 0x88, 0x25, 0x09, 0x7f, 0x02, 0x06, + 0xb7, 0xf9, 0xbf, 0xd9, 0xbb, 0x9b, 0x02, 0x03, 0xcb, 0x72, 0xfa, 0xdd, 0x36, 0xda, 0x7e, 0xa3, + 0x3a, 0x9f, 0xae, 0xe2, 0x34, 0xbf, 0xa0, 0x2c, 0xc0, 0x29, 0x0e, 0xad, 0x50, 0xa3, 0x9a, 0x7d, + 0xb3, 0xe9, 0x77, 0xbc, 0x2e, 0xbe, 0x28, 0x0b, 0x70, 0x8a, 0x83, 0x1e, 0x83, 0x81, 0x9a, 0x9f, + 0xac, 0x7b, 0xb5, 0xac, 0x97, 0x6f, 0x91, 0x41, 0xb1, 0x28, 0x65, 0x6e, 0x22, 0x3f, 0x59, 0x8f, + 0x08, 0xb3, 0xec, 0x76, 0xa4, 0x8e, 0x58, 0xd4, 0xca, 0xb0, 0x81, 0xc9, 0x9a, 0x14, 0x8a, 0x9e, + 0x89, 0x20, 0xcc, 0xb4, 0x49, 0xb2, 0x00, 0xa7, 0x38, 0x74, 0xfe, 0x57, 0xc2, 0x66, 0xcb, 0x6f, + 0x88, 0xf0, 0x60, 0x6d, 0xfe, 0xcf, 0x09, 0x38, 0x56, 0x18, 0x14, 0x9b, 0x8a, 0x30, 0x2a, 0x7e, + 0xb2, 0xcf, 0x5d, 0xad, 0x09, 0x38, 0x56, 0x18, 0xee, 0xf3, 0x30, 0xc6, 0x57, 0xf2, 0x5c, 0xc3, + 0xf3, 0x9b, 0x8b, 0x73, 0xe8, 0x4a, 0x47, 0xf8, 0xfd, 0x13, 0x39, 0xe1, 0xf7, 0xe7, 0x8c, 0x4a, + 0x9d, 0x61, 0xf8, 0xee, 0x0f, 0x0a, 0x30, 0x74, 0x82, 0x2f, 0x06, 0x9e, 0xf8, 0xe3, 0xb7, 0xe8, + 0x56, 0xe6, 0xb5, 0xc0, 0x35, 0x9b, 0xb7, 0x69, 0x0e, 0x7c, 0x29, 0xf0, 0xbf, 0x16, 0xe0, 0xbc, + 0x44, 0x95, 0x67, 0xb9, 0xc5, 0x39, 0xf6, 0xdc, 0xd5, 0xf1, 0x0f, 0x74, 0x64, 0x0c, 0xf4, 0x9a, + 0xbd, 0xd3, 0xe8, 0xe2, 0x5c, 0xd7, 0xa1, 0x7e, 0x35, 0x33, 0xd4, 0xd8, 0x2a, 0xd7, 0x83, 0x07, + 0xfb, 0x2f, 0x1c, 0x98, 0xcc, 0x1f, 0xec, 0x13, 0x78, 0xa0, 0xf1, 0x0d, 0xf3, 0x81, 0xc6, 0x5f, + 0xb0, 0x37, 0xc5, 0xcc, 0xae, 0x74, 0x79, 0xaa, 0xf1, 0xcf, 0x1c, 0x38, 0x2b, 0x2b, 0xb0, 0xdd, + 0x73, 0xd6, 0x0f, 0x58, 0x20, 0xca, 0xf1, 0x4f, 0xb3, 0xd7, 0x8d, 0x69, 0xf6, 0xa2, 0xbd, 0x8e, + 0xeb, 0xfd, 0xe8, 0xfa, 0xb0, 0xf5, 0x9f, 0x3a, 0x50, 0xca, 0xab, 0x70, 0x02, 0x9f, 0xfc, 0x35, + 0xf3, 0x93, 0x3f, 0x7f, 0x3c, 0x3d, 0xef, 0xfe, 0xc1, 0x4b, 0xdd, 0x06, 0x0a, 0x35, 0xa4, 0x5e, + 0xe5, 0xd8, 0xf2, 0xd1, 0x72, 0x16, 0xf9, 0x0a, 0x5a, 0x03, 0x06, 0x62, 0x16, 0xb5, 0x21, 0xa6, + 0xc0, 0x55, 0x1b, 0xda, 0x16, 0xa5, 0x27, 0x6c, 0xec, 0xec, 0x7f, 0x2c, 0x78, 0xb8, 0xbf, 0x51, + 0x80, 0x0b, 0xea, 0xe1, 0x55, 0xb2, 0x4d, 0x1a, 0xe9, 0xfa, 0x60, 0x4f, 0x2a, 0x78, 0xea, 0xa7, + 0xbd, 0x27, 0x15, 0x52, 0x16, 0xe9, 0x5a, 0x48, 0x61, 0x58, 0xe3, 0x89, 0xca, 0x70, 0x8e, 0x3d, + 0x81, 0xb0, 0xe0, 0x07, 0x5e, 0xc3, 0x7f, 0x95, 0x44, 0x98, 0x34, 0xc3, 0x6d, 0xaf, 0x21, 0x34, + 0x75, 0x75, 0x7d, 0x77, 0x21, 0x0f, 0x09, 0xe7, 0xd7, 0xed, 0x38, 0x71, 0xf7, 0xf5, 0x7a, 0xe2, + 0x76, 0xff, 0xc4, 0x81, 0xd1, 0x13, 0x7c, 0xa6, 0x36, 0x34, 0x97, 0xc4, 0xb3, 0xf6, 0x96, 0x44, + 0x97, 0x65, 0xb0, 0x57, 0x84, 0x8e, 0x97, 0x3b, 0xd1, 0xa7, 0x1c, 0x15, 0xd7, 0xc2, 0x63, 0xff, + 0x3e, 0x64, 0xaf, 0x1d, 0x87, 0xc9, 0xf9, 0x88, 0xbe, 0x96, 0x49, 0x84, 0x59, 0xb0, 0x95, 0xcd, + 0xa9, 0xa3, 0x35, 0x47, 0x48, 0x88, 0xf9, 0x65, 0x07, 0x80, 0xb7, 0x53, 0xe4, 0xd1, 0xa6, 0x6d, + 0xdb, 0x38, 0xb6, 0x91, 0xa2, 0x4c, 0x78, 0xd3, 0xd4, 0x12, 0x4a, 0x0b, 0xb0, 0xd6, 0x92, 0xbb, + 0xc8, 0x74, 0x79, 0xd7, 0x49, 0x36, 0x3f, 0xef, 0xc0, 0xa9, 0x4c, 0x73, 0x73, 0xea, 0x6f, 0x9a, + 0x2f, 0xfa, 0x59, 0xd0, 0xac, 0xcc, 0xec, 0xca, 0xba, 0xf1, 0xe4, 0xbf, 0xbb, 0x60, 0x3c, 0x79, + 0x8c, 0x5e, 0x83, 0x61, 0x69, 0xf9, 0x90, 0xd3, 0xdb, 0xe6, 0xcb, 0xa6, 0xea, 0x78, 0x23, 0x21, + 0x31, 0x4e, 0xf9, 0x65, 0xc2, 0xe6, 0x0a, 0x3d, 0x85, 0xcd, 0xdd, 0xdb, 0x77, 0x51, 0xf3, 0xed, + 0xd2, 0xfd, 0xc7, 0x62, 0x97, 0x7e, 0xd0, 0xba, 0x5d, 0xfa, 0xa1, 0x13, 0xb6, 0x4b, 0x6b, 0x4e, + 0xc2, 0xe2, 0x5d, 0x38, 0x09, 0x5f, 0x83, 0xb3, 0xdb, 0xe9, 0xa1, 0x53, 0xcd, 0x24, 0x91, 0x43, + 0xe8, 0x89, 0x5c, 0x6b, 0x34, 0x3d, 0x40, 0xc7, 0x09, 0x09, 0x12, 0xed, 0xb8, 0x9a, 0x46, 0xec, + 0x3d, 0x9f, 0x43, 0x0e, 0xe7, 0x32, 0xc9, 0x7a, 0x7b, 0x06, 0x7b, 0xf0, 0xf6, 0xbc, 0xe5, 0xc0, + 0x39, 0xaf, 0xe3, 0x0e, 0x17, 0x26, 0x9b, 0x22, 0xe4, 0xe4, 0xa6, 0x3d, 0x15, 0xc2, 0x20, 0x2f, + 0xdc, 0x6a, 0x79, 0x45, 0x38, 0xbf, 0x41, 0xe8, 0xd1, 0xd4, 0xf5, 0xce, 0xe3, 0x3c, 0xf3, 0xfd, + 0xe4, 0x5f, 0xcb, 0xc6, 0xf3, 0x00, 0x1b, 0xfa, 0x8f, 0xd8, 0x3d, 0x6d, 0x5b, 0x88, 0xe9, 0x19, + 0xb9, 0x8b, 0x98, 0x9e, 0x8c, 0xeb, 0x6d, 0xd4, 0x92, 0xeb, 0x2d, 0x80, 0x09, 0xbf, 0xe9, 0xd5, + 0xc8, 0x5a, 0xbb, 0xd1, 0xe0, 0x77, 0x40, 0xe4, 0xdb, 0xb3, 0xb9, 0x16, 0xbc, 0xe5, 0xb0, 0xe2, + 0x35, 0xb2, 0x4f, 0x7c, 0xab, 0xbb, 0x2e, 0xd7, 0x32, 0x94, 0x70, 0x07, 0x6d, 0x3a, 0x61, 0x59, + 0x32, 0x3b, 0x92, 0xd0, 0xd1, 0x66, 0x81, 0x23, 0x43, 0x7c, 0xc2, 0x5e, 0x4d, 0xc1, 0x58, 0xc7, + 0x41, 0x4b, 0x30, 0x5c, 0x0d, 0x62, 0x71, 0x1d, 0xf5, 0x14, 0x13, 0x66, 0xef, 0xa4, 0x22, 0x70, + 0xfe, 0x7a, 0x59, 0x5d, 0x44, 0x7d, 0x30, 0x27, 0x3b, 0xa3, 0x2a, 0xc7, 0x69, 0x7d, 0xb4, 0xc2, + 0x88, 0x89, 0x87, 0xb9, 0x78, 0x3c, 0xc7, 0xc3, 0x5d, 0x1c, 0x46, 0xf3, 0xd7, 0xe5, 0xd3, 0x62, + 0x63, 0x82, 0x9d, 0x78, 0x61, 0x2b, 0xa5, 0xa0, 0xbd, 0x01, 0x7c, 0xfa, 0xc0, 0x37, 0x80, 0x59, + 0x5a, 0xd6, 0xa4, 0xa1, 0xdc, 0xc3, 0x17, 0xad, 0xa5, 0x65, 0x4d, 0x23, 0x25, 0x45, 0x5a, 0xd6, + 0x14, 0x80, 0x75, 0x96, 0x68, 0xb5, 0x9b, 0x9b, 0xfc, 0x0c, 0x13, 0x1a, 0x87, 0x77, 0x7a, 0xeb, + 0xfe, 0xd2, 0xb3, 0x07, 0xfa, 0x4b, 0x3b, 0xfc, 0xbb, 0xe7, 0x0e, 0xe1, 0xdf, 0xad, 0xb3, 0x84, + 0x99, 0x8b, 0x73, 0xc2, 0xa5, 0x6e, 0xe1, 0x7c, 0xc7, 0x52, 0x74, 0xf0, 0xc8, 0x53, 0xf6, 0x2f, + 0xe6, 0x0c, 0xba, 0x06, 0x54, 0x5f, 0x38, 0x72, 0x40, 0x35, 0x15, 0xcf, 0x29, 0x9c, 0x65, 0x5e, + 0x2d, 0x0a, 0xf1, 0x9c, 0x82, 0xb1, 0x8e, 0x93, 0xf5, 0x96, 0xde, 0x7f, 0x6c, 0xde, 0xd2, 0xc9, + 0x13, 0xf0, 0x96, 0x3e, 0xd0, 0xb3, 0xb7, 0xf4, 0x16, 0x9c, 0x69, 0x85, 0xd5, 0x79, 0x3f, 0x8e, + 0xda, 0xec, 0x52, 0xdc, 0x6c, 0xbb, 0x5a, 0x23, 0x09, 0x73, 0xb7, 0x8e, 0x5c, 0x7e, 0xa7, 0xde, + 0xc8, 0x16, 0x5b, 0xc8, 0x72, 0x8d, 0x66, 0x2a, 0x30, 0xd3, 0x09, 0x8b, 0xba, 0xcd, 0x29, 0xc4, + 0x79, 0x2c, 0x74, 0x3f, 0xed, 0xc3, 0x27, 0xe3, 0xa7, 0xfd, 0x00, 0x0c, 0xc5, 0xf5, 0x76, 0x52, + 0x0d, 0x77, 0x02, 0xe6, 0x8c, 0x1f, 0x9e, 0x7d, 0x87, 0x32, 0x65, 0x0b, 0xf8, 0xed, 0xbd, 0xa9, + 0x09, 0xf9, 0xbf, 0x66, 0xc5, 0x16, 0x10, 0xf4, 0xf5, 0x2e, 0xf7, 0x77, 0xdc, 0xe3, 0xbc, 0xbf, + 0x73, 0xe1, 0x50, 0x77, 0x77, 0xf2, 0x9c, 0xd1, 0x8f, 0xfc, 0xd4, 0x39, 0xa3, 0xbf, 0xea, 0xc0, + 0xd8, 0xb6, 0xee, 0x32, 0x10, 0x0e, 0x73, 0x0b, 0x81, 0x3b, 0x86, 0x27, 0x62, 0xd6, 0xa5, 0x72, + 0xce, 0x00, 0xdd, 0xce, 0x02, 0xb0, 0xd9, 0x92, 0x9c, 0xa0, 0xa2, 0x47, 0xef, 0x55, 0x50, 0xd1, + 0x1b, 0x4c, 0x8e, 0xc9, 0x43, 0x2e, 0xf3, 0xa2, 0xdb, 0x8d, 0x29, 0x96, 0x32, 0x51, 0x85, 0x14, + 0xeb, 0xfc, 0xd0, 0xe7, 0x1c, 0x98, 0x90, 0xe7, 0x32, 0xe1, 0xf2, 0x8b, 0x45, 0x54, 0xa4, 0xcd, + 0xe3, 0x20, 0x0b, 0xab, 0x5f, 0xcf, 0xf0, 0xc1, 0x1d, 0x9c, 0xa9, 0x54, 0x57, 0x41, 0x68, 0xb5, + 0x98, 0x05, 0xff, 0x0a, 0x1d, 0x66, 0x26, 0x05, 0x63, 0x1d, 0x07, 0x7d, 0x43, 0x3d, 0xec, 0xff, + 0x04, 0x13, 0xe8, 0x2f, 0x58, 0xd6, 0x4d, 0x6d, 0xbc, 0xee, 0x8f, 0xbe, 0xe8, 0xc0, 0xc4, 0x4e, + 0xc6, 0xa0, 0x21, 0xc2, 0x42, 0xb1, 0x7d, 0x53, 0x09, 0x1f, 0xee, 0x2c, 0x14, 0x77, 0xb4, 0x00, + 0x7d, 0xd6, 0x34, 0x74, 0xf2, 0xf8, 0x51, 0x8b, 0x03, 0x98, 0x31, 0xac, 0xf2, 0x6b, 0x6e, 0xf9, + 0x16, 0xcf, 0xbb, 0x8e, 0x0f, 0x99, 0xa4, 0x9d, 0x49, 0x3f, 0x56, 0x4e, 0x55, 0x62, 0xda, 0x5b, + 0x2c, 0x2c, 0x76, 0xe3, 0xf3, 0xeb, 0xe6, 0x96, 0x2f, 0x9e, 0x87, 0x71, 0xd3, 0xb7, 0x87, 0xde, + 0x65, 0xbe, 0x19, 0x71, 0x31, 0x9b, 0x7e, 0x7f, 0x4c, 0xe2, 0x1b, 0x29, 0xf8, 0x8d, 0x1c, 0xf9, + 0x85, 0x63, 0xcd, 0x91, 0xdf, 0x77, 0x32, 0x39, 0xf2, 0x27, 0x8e, 0x23, 0x47, 0xfe, 0xe9, 0x43, + 0xe5, 0xc8, 0xd7, 0xde, 0x28, 0xe8, 0xbf, 0xc3, 0x1b, 0x05, 0x33, 0x70, 0x4a, 0xde, 0xfd, 0x21, + 0x22, 0x0d, 0x39, 0x77, 0xfb, 0x5f, 0x10, 0x55, 0x4e, 0xcd, 0x99, 0xc5, 0x38, 0x8b, 0x4f, 0x17, + 0x59, 0x31, 0x60, 0x35, 0x07, 0x6c, 0x3d, 0x60, 0x64, 0x4e, 0x2d, 0x76, 0x7c, 0x16, 0x22, 0x4a, + 0x46, 0x3b, 0x17, 0x19, 0xec, 0xb6, 0xfc, 0x07, 0xf3, 0x16, 0xa0, 0x97, 0xa0, 0x14, 0x6e, 0x6e, + 0x36, 0x42, 0xaf, 0x9a, 0x26, 0xf2, 0x97, 0x71, 0x09, 0xfc, 0xee, 0xa6, 0xca, 0xfb, 0xba, 0xda, + 0x05, 0x0f, 0x77, 0xa5, 0x80, 0xde, 0xa2, 0x8a, 0x49, 0x12, 0x46, 0xa4, 0x9a, 0xda, 0x6a, 0x86, + 0x59, 0x9f, 0x89, 0xf5, 0x3e, 0x97, 0x4d, 0x3e, 0xbc, 0xf7, 0xea, 0xa3, 0x64, 0x4a, 0x71, 0xb6, + 0x59, 0x28, 0x82, 0xf3, 0xad, 0x3c, 0x53, 0x51, 0x2c, 0x6e, 0x2c, 0x1d, 0x64, 0xb0, 0x92, 0x4b, + 0xf7, 0x7c, 0xae, 0xb1, 0x29, 0xc6, 0x5d, 0x28, 0xeb, 0xc9, 0xf6, 0x87, 0x4e, 0x26, 0xd9, 0xfe, + 0xc7, 0x00, 0x2a, 0x32, 0x95, 0x97, 0x34, 0x3e, 0x2c, 0x59, 0xb9, 0x4a, 0xc3, 0x69, 0x6a, 0xef, + 0x9b, 0x2a, 0x36, 0x58, 0x63, 0x89, 0xfe, 0x6f, 0xee, 0x6b, 0x14, 0xdc, 0xc2, 0x52, 0xb3, 0x3e, + 0x27, 0x7e, 0xea, 0x5e, 0xa4, 0xf8, 0xe7, 0x0e, 0x4c, 0xf2, 0x99, 0x97, 0x55, 0xee, 0xa9, 0x6a, + 0x21, 0xee, 0xf6, 0xd8, 0x0e, 0x5d, 0x61, 0x51, 0x7c, 0x65, 0x83, 0x2b, 0x73, 0x74, 0x1f, 0xd0, + 0x12, 0xf4, 0xe5, 0x9c, 0x23, 0xc5, 0x29, 0x5b, 0x36, 0xcb, 0xfc, 0x37, 0x05, 0xce, 0xec, 0xf7, + 0x72, 0x8a, 0xf8, 0x97, 0x5d, 0x4d, 0xaa, 0x88, 0x35, 0xef, 0x17, 0x8f, 0xc9, 0xa4, 0xaa, 0x3f, + 0x7c, 0x70, 0x28, 0xc3, 0xea, 0xe7, 0x1d, 0x98, 0xf0, 0x32, 0xa1, 0x26, 0xcc, 0x0e, 0x64, 0xc5, + 0x26, 0x35, 0x13, 0xa5, 0xf1, 0x2b, 0x4c, 0xc9, 0xcb, 0x46, 0xb5, 0xe0, 0x0e, 0xe6, 0xe8, 0x07, + 0x0e, 0x3c, 0x90, 0x78, 0xf1, 0x16, 0x4f, 0x2b, 0x1c, 0xa7, 0x77, 0x75, 0x45, 0xe3, 0xce, 0xb2, + 0xd5, 0xf8, 0x8a, 0xf5, 0xd5, 0xb8, 0xde, 0x9d, 0x27, 0x5f, 0x97, 0x8f, 0x88, 0x75, 0xf9, 0xc0, + 0x01, 0x98, 0xf8, 0xa0, 0xa6, 0x4f, 0x7e, 0xca, 0xe1, 0xcf, 0x4f, 0x75, 0x55, 0xf9, 0x36, 0x4c, + 0x95, 0x6f, 0xd9, 0xe6, 0x03, 0x38, 0xba, 0xee, 0xf9, 0xab, 0x0e, 0x9c, 0xcd, 0xdb, 0x91, 0x72, + 0x9a, 0xf4, 0x11, 0xb3, 0x49, 0x16, 0x4f, 0x59, 0x7a, 0x83, 0xac, 0xbc, 0xbf, 0x31, 0x79, 0x1d, + 0x1e, 0xbe, 0xd3, 0x57, 0xbc, 0x13, 0xbd, 0x21, 0x5d, 0x2d, 0xfe, 0xd3, 0x61, 0xcd, 0x0b, 0x99, + 0x90, 0x96, 0xf5, 0x18, 0xee, 0x00, 0x06, 0xfc, 0xa0, 0xe1, 0x07, 0x44, 0xdc, 0xd7, 0xb4, 0x79, + 0x86, 0x15, 0xef, 0xe7, 0x50, 0xea, 0x58, 0x70, 0xb9, 0xc7, 0x4e, 0xc9, 0xec, 0x8b, 0x64, 0xfd, + 0x27, 0xff, 0x22, 0xd9, 0x0e, 0x0c, 0xef, 0xf8, 0x49, 0x9d, 0x05, 0x53, 0x08, 0x5f, 0x9f, 0x85, + 0x7b, 0x8e, 0x94, 0x5c, 0xda, 0xf7, 0x9b, 0x92, 0x01, 0x4e, 0x79, 0xa1, 0x4b, 0x9c, 0x31, 0x8b, + 0xdc, 0xce, 0x86, 0xd4, 0xde, 0x94, 0x05, 0x38, 0xc5, 0xa1, 0x83, 0x35, 0x4a, 0x7f, 0xc9, 0x7c, + 0x46, 0x22, 0xed, 0xae, 0x8d, 0x74, 0x8a, 0x82, 0x22, 0xbf, 0x4d, 0x7c, 0x53, 0xe3, 0x81, 0x0d, + 0x8e, 0x2a, 0xf3, 0xf1, 0x50, 0xd7, 0xcc, 0xc7, 0xaf, 0x33, 0x85, 0x2d, 0xf1, 0x83, 0x36, 0x59, + 0x0d, 0x44, 0xbc, 0xf7, 0xb2, 0x9d, 0xbb, 0xcf, 0x9c, 0x26, 0x3f, 0x82, 0xa7, 0xbf, 0xb1, 0xc6, + 0x4f, 0x73, 0xb9, 0x8c, 0x1c, 0xe8, 0x72, 0x49, 0x4d, 0x2e, 0xa3, 0xd6, 0x4d, 0x2e, 0x09, 0x69, + 0x59, 0x31, 0xb9, 0xfc, 0x54, 0x99, 0x03, 0xfe, 0xc2, 0x01, 0xa4, 0xf4, 0x2e, 0x25, 0x50, 0x4f, + 0x20, 0xa8, 0xf2, 0xe3, 0x0e, 0x40, 0xa0, 0xde, 0xad, 0xb4, 0xbb, 0x0b, 0x72, 0x9a, 0x69, 0x03, + 0x52, 0x18, 0xd6, 0x78, 0xba, 0xff, 0xd3, 0x49, 0x63, 0x97, 0xd3, 0xbe, 0x9f, 0x40, 0x10, 0xd9, + 0xae, 0x19, 0x44, 0xb6, 0x6e, 0xd1, 0x74, 0xaf, 0xba, 0xd1, 0x25, 0x9c, 0xec, 0xc7, 0x05, 0x38, + 0xa5, 0x23, 0x97, 0xc9, 0x49, 0x7c, 0xec, 0x1d, 0x23, 0x82, 0xf6, 0x86, 0xdd, 0xfe, 0x96, 0x85, + 0x07, 0x28, 0x2f, 0x5a, 0xfb, 0x63, 0x99, 0x68, 0xed, 0x9b, 0xf6, 0x59, 0x1f, 0x1c, 0xb2, 0xfd, + 0xdf, 0x1c, 0x38, 0x93, 0xa9, 0x71, 0x02, 0x13, 0x6c, 0xdb, 0x9c, 0x60, 0xcf, 0x59, 0xef, 0x75, + 0x97, 0xd9, 0xf5, 0xcd, 0x42, 0x47, 0x6f, 0xd9, 0x21, 0xee, 0x93, 0x0e, 0x14, 0xa9, 0xb6, 0x2c, + 0xe3, 0xb9, 0x3e, 0x72, 0x2c, 0x33, 0x80, 0xe9, 0xf5, 0x42, 0x3a, 0xab, 0xf6, 0x31, 0x18, 0xe6, + 0xdc, 0x27, 0x3f, 0xe1, 0x00, 0xa4, 0x48, 0xf7, 0x4a, 0x05, 0x76, 0xbf, 0x5d, 0x80, 0x73, 0xb9, + 0xd3, 0x08, 0x7d, 0x5a, 0x59, 0xe4, 0x1c, 0xdb, 0xd1, 0x8a, 0x06, 0x23, 0xdd, 0x30, 0x37, 0x66, + 0x18, 0xe6, 0x84, 0x3d, 0xee, 0x5e, 0x1d, 0x60, 0x84, 0x98, 0xd6, 0x06, 0xeb, 0x47, 0x4e, 0x1a, + 0x00, 0xab, 0xf2, 0x1a, 0xfd, 0x15, 0xbc, 0xc4, 0xe3, 0xfe, 0x58, 0xbb, 0xe1, 0x20, 0x3b, 0x7a, + 0x02, 0xb2, 0x62, 0xc7, 0x94, 0x15, 0xd8, 0xbe, 0x1f, 0xb9, 0x8b, 0xb0, 0x78, 0x05, 0xf2, 0x1c, + 0xcb, 0xbd, 0x25, 0x45, 0x34, 0xae, 0xc3, 0x16, 0x7a, 0xbe, 0x0e, 0x3b, 0x06, 0x23, 0x2f, 0xfa, + 0x2d, 0xe5, 0x03, 0x9d, 0xfe, 0xce, 0x0f, 0x2f, 0xde, 0xf7, 0xdd, 0x1f, 0x5e, 0xbc, 0xef, 0x07, + 0x3f, 0xbc, 0x78, 0xdf, 0xc7, 0xf7, 0x2f, 0x3a, 0xdf, 0xd9, 0xbf, 0xe8, 0x7c, 0x77, 0xff, 0xa2, + 0xf3, 0x83, 0xfd, 0x8b, 0xce, 0x7f, 0xdc, 0xbf, 0xe8, 0xfc, 0x83, 0xff, 0x74, 0xf1, 0xbe, 0x17, + 0x87, 0x64, 0xc7, 0xfe, 0x32, 0x00, 0x00, 0xff, 0xff, 0xa6, 0xf7, 0x2e, 0xb8, 0x5a, 0xd5, 0x00, + 0x00, } func (m *Amount) Marshal() (dAtA []byte, err error) { @@ -6213,6 +6320,11 @@ func (m *ArtifactoryArtifactRepository) MarshalToSizedBuffer(dAtA []byte) (int, _ = i var l int _ = l + i -= len(m.KeyFormat) + copy(dAtA[i:], m.KeyFormat) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KeyFormat))) + i-- + dAtA[i] = 0x1a i -= len(m.RepoURL) copy(dAtA[i:], m.RepoURL) i = encodeVarintGenerated(dAtA, i, uint64(len(m.RepoURL))) @@ -6671,6 +6783,44 @@ func (m *ClusterWorkflowTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *Column) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Column) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Column) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Condition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7687,6 +7837,11 @@ func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x1a if m.Realtime != nil { i-- if *m.Realtime { @@ -8952,6 +9107,11 @@ func (m *Mutex) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -9044,6 +9204,45 @@ func (m *MutexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *NodeFlag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeFlag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeFlag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Retried { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Hooked { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + func (m *NodeResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9114,6 +9313,20 @@ func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.NodeFlag != nil { + { + size, err := m.NodeFlag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } i -= len(m.Progress) copy(dAtA[i:], m.Progress) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Progress))) @@ -9590,6 +9803,14 @@ func (m *OSSBucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.UseSDKCreds { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 if m.LifecycleRule != nil { { size, err := m.LifecycleRule.MarshalToSizedBuffer(dAtA[:i]) @@ -9701,6 +9922,13 @@ func (m *Object) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Value != nil { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } @@ -9934,6 +10162,18 @@ func (m *PodGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.DeleteDelayDuration != nil { + { + size, err := m.DeleteDelayDuration.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } if m.LabelSelector != nil { { size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -10375,6 +10615,18 @@ func (m *S3Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.CASecret != nil { + { + size, err := m.CASecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } if m.EncryptionOptions != nil { { size, err := m.EncryptionOptions.MarshalToSizedBuffer(dAtA[:i]) @@ -10612,6 +10864,11 @@ func (m *SemaphoreRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 if m.ConfigMapKeyRef != nil { { size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) @@ -12184,6 +12441,52 @@ func (m *WorkflowEventBindingSpec) MarshalToSizedBuffer(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *WorkflowLevelArtifactGC) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowLevelArtifactGC) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowLevelArtifactGC) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PodSpecPatch) + copy(dAtA[i:], m.PodSpecPatch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodSpecPatch))) + i-- + dAtA[i] = 0x1a + i-- + if m.ForceFinalizerRemoval { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + { + size, err := m.ArtifactGC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *WorkflowList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -12881,6 +13184,35 @@ func (m *WorkflowStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.TaskResultsCompletionStatus) > 0 { + keysForTaskResultsCompletionStatus := make([]string, 0, len(m.TaskResultsCompletionStatus)) + for k := range m.TaskResultsCompletionStatus { + keysForTaskResultsCompletionStatus = append(keysForTaskResultsCompletionStatus, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTaskResultsCompletionStatus) + for iNdEx := len(keysForTaskResultsCompletionStatus) - 1; iNdEx >= 0; iNdEx-- { + v := m.TaskResultsCompletionStatus[string(keysForTaskResultsCompletionStatus[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForTaskResultsCompletionStatus[iNdEx]) + copy(dAtA[i:], keysForTaskResultsCompletionStatus[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTaskResultsCompletionStatus[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } if m.ArtifactGCStatus != nil { { size, err := m.ArtifactGCStatus.MarshalToSizedBuffer(dAtA[:i]) @@ -14132,6 +14464,8 @@ func (m *ArtifactoryArtifactRepository) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.RepoURL) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KeyFormat) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -14290,6 +14624,21 @@ func (m *ClusterWorkflowTemplateList) Size() (n int) { return n } +func (m *Column) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Condition) Size() (n int) { if m == nil { return 0 @@ -14666,6 +15015,8 @@ func (m *Gauge) Size() (n int) { if m.Realtime != nil { n += 2 } + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -15136,6 +15487,8 @@ func (m *Mutex) Size() (n int) { _ = l l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -15173,6 +15526,17 @@ func (m *MutexStatus) Size() (n int) { return n } +func (m *NodeFlag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + func (m *NodeResult) Size() (n int) { if m == nil { return 0 @@ -15270,6 +15634,10 @@ func (m *NodeStatus) Size() (n int) { } l = len(m.Progress) n += 2 + l + sovGenerated(uint64(l)) + if m.NodeFlag != nil { + l = m.NodeFlag.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -15390,6 +15758,7 @@ func (m *OSSBucket) Size() (n int) { l = m.LifecycleRule.Size() n += 1 + l + sovGenerated(uint64(l)) } + n += 2 return n } @@ -15410,6 +15779,10 @@ func (m *Object) Size() (n int) { } var l int _ = l + if m.Value != nil { + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -15515,6 +15888,10 @@ func (m *PodGC) Size() (n int) { l = m.LabelSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.DeleteDelayDuration != nil { + l = m.DeleteDelayDuration.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -15701,6 +16078,10 @@ func (m *S3Bucket) Size() (n int) { l = m.EncryptionOptions.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.CASecret != nil { + l = m.CASecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -15762,6 +16143,8 @@ func (m *SemaphoreRef) Size() (n int) { l = m.ConfigMapKeyRef.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -16306,6 +16689,20 @@ func (m *WorkflowEventBindingSpec) Size() (n int) { return n } +func (m *WorkflowLevelArtifactGC) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ArtifactGC.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.PodSpecPatch) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *WorkflowList) Size() (n int) { if m == nil { return 0 @@ -16610,6 +17007,14 @@ func (m *WorkflowStatus) Size() (n int) { l = m.ArtifactGCStatus.Size() n += 2 + l + sovGenerated(uint64(l)) } + if len(m.TaskResultsCompletionStatus) > 0 { + for k, v := range m.TaskResultsCompletionStatus { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -17131,6 +17536,7 @@ func (this *ArtifactoryArtifactRepository) String() string { s := strings.Join([]string{`&ArtifactoryArtifactRepository{`, `ArtifactoryAuth:` + strings.Replace(strings.Replace(this.ArtifactoryAuth.String(), "ArtifactoryAuth", "ArtifactoryAuth", 1), `&`, ``, 1) + `,`, `RepoURL:` + fmt.Sprintf("%v", this.RepoURL) + `,`, + `KeyFormat:` + fmt.Sprintf("%v", this.KeyFormat) + `,`, `}`, }, "") return s @@ -17252,6 +17658,18 @@ func (this *ClusterWorkflowTemplateList) String() string { }, "") return s } +func (this *Column) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Column{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} func (this *Condition) String() string { if this == nil { return "nil" @@ -17548,6 +17966,7 @@ func (this *Gauge) String() string { s := strings.Join([]string{`&Gauge{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `Realtime:` + valueToStringGenerated(this.Realtime) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, `}`, }, "") return s @@ -17906,6 +18325,7 @@ func (this *Mutex) String() string { } s := strings.Join([]string{`&Mutex{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `}`, }, "") return s @@ -17942,6 +18362,17 @@ func (this *MutexStatus) String() string { }, "") return s } +func (this *NodeFlag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeFlag{`, + `Hooked:` + fmt.Sprintf("%v", this.Hooked) + `,`, + `Retried:` + fmt.Sprintf("%v", this.Retried) + `,`, + `}`, + }, "") + return s +} func (this *NodeResult) String() string { if this == nil { return "nil" @@ -17994,6 +18425,7 @@ func (this *NodeStatus) String() string { `EstimatedDuration:` + fmt.Sprintf("%v", this.EstimatedDuration) + `,`, `SynchronizationStatus:` + strings.Replace(this.SynchronizationStatus.String(), "NodeSynchronizationStatus", "NodeSynchronizationStatus", 1) + `,`, `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, + `NodeFlag:` + strings.Replace(this.NodeFlag.String(), "NodeFlag", "NodeFlag", 1) + `,`, `}`, }, "") return s @@ -18081,6 +18513,7 @@ func (this *OSSBucket) String() string { `CreateBucketIfNotPresent:` + fmt.Sprintf("%v", this.CreateBucketIfNotPresent) + `,`, `SecurityToken:` + fmt.Sprintf("%v", this.SecurityToken) + `,`, `LifecycleRule:` + strings.Replace(this.LifecycleRule.String(), "OSSLifecycleRule", "OSSLifecycleRule", 1) + `,`, + `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, `}`, }, "") return s @@ -18101,6 +18534,7 @@ func (this *Object) String() string { return "nil" } s := strings.Join([]string{`&Object{`, + `Value:` + valueToStringGenerated(this.Value) + `,`, `}`, }, "") return s @@ -18176,6 +18610,7 @@ func (this *PodGC) String() string { s := strings.Join([]string{`&PodGC{`, `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v11.LabelSelector", 1) + `,`, + `DeleteDelayDuration:` + strings.Replace(fmt.Sprintf("%v", this.DeleteDelayDuration), "Duration", "v11.Duration", 1) + `,`, `}`, }, "") return s @@ -18299,6 +18734,7 @@ func (this *S3Bucket) String() string { `UseSDKCreds:` + fmt.Sprintf("%v", this.UseSDKCreds) + `,`, `CreateBucketIfNotPresent:` + strings.Replace(this.CreateBucketIfNotPresent.String(), "CreateS3BucketOptions", "CreateS3BucketOptions", 1) + `,`, `EncryptionOptions:` + strings.Replace(this.EncryptionOptions.String(), "S3EncryptionOptions", "S3EncryptionOptions", 1) + `,`, + `CASecret:` + strings.Replace(fmt.Sprintf("%v", this.CASecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `}`, }, "") return s @@ -18344,6 +18780,7 @@ func (this *SemaphoreRef) String() string { } s := strings.Join([]string{`&SemaphoreRef{`, `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `}`, }, "") return s @@ -18722,6 +19159,18 @@ func (this *WorkflowEventBindingSpec) String() string { }, "") return s } +func (this *WorkflowLevelArtifactGC) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowLevelArtifactGC{`, + `ArtifactGC:` + strings.Replace(strings.Replace(this.ArtifactGC.String(), "ArtifactGC", "ArtifactGC", 1), `&`, ``, 1) + `,`, + `ForceFinalizerRemoval:` + fmt.Sprintf("%v", this.ForceFinalizerRemoval) + `,`, + `PodSpecPatch:` + fmt.Sprintf("%v", this.PodSpecPatch) + `,`, + `}`, + }, "") + return s +} func (this *WorkflowList) String() string { if this == nil { return "nil" @@ -18864,7 +19313,7 @@ func (this *WorkflowSpec) String() string { `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, `Executor:` + strings.Replace(this.Executor.String(), "ExecutorConfig", "ExecutorConfig", 1) + `,`, `TTLStrategy:` + strings.Replace(this.TTLStrategy.String(), "TTLStrategy", "TTLStrategy", 1) + `,`, - `PodDisruptionBudget:` + strings.Replace(fmt.Sprintf("%v", this.PodDisruptionBudget), "PodDisruptionBudgetSpec", "v1beta1.PodDisruptionBudgetSpec", 1) + `,`, + `PodDisruptionBudget:` + strings.Replace(fmt.Sprintf("%v", this.PodDisruptionBudget), "PodDisruptionBudgetSpec", "v12.PodDisruptionBudgetSpec", 1) + `,`, `Metrics:` + strings.Replace(this.Metrics.String(), "Metrics", "Metrics", 1) + `,`, `Shutdown:` + fmt.Sprintf("%v", this.Shutdown) + `,`, `WorkflowTemplateRef:` + strings.Replace(this.WorkflowTemplateRef.String(), "WorkflowTemplateRef", "WorkflowTemplateRef", 1) + `,`, @@ -18876,7 +19325,7 @@ func (this *WorkflowSpec) String() string { `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, `Hooks:` + mapStringForHooks + `,`, `WorkflowMetadata:` + strings.Replace(this.WorkflowMetadata.String(), "WorkflowMetadata", "WorkflowMetadata", 1) + `,`, - `ArtifactGC:` + strings.Replace(this.ArtifactGC.String(), "ArtifactGC", "ArtifactGC", 1) + `,`, + `ArtifactGC:` + strings.Replace(this.ArtifactGC.String(), "WorkflowLevelArtifactGC", "WorkflowLevelArtifactGC", 1) + `,`, `}`, }, "") return s @@ -18925,6 +19374,16 @@ func (this *WorkflowStatus) String() string { mapStringForResourcesDuration += fmt.Sprintf("%v: %v,", k, this.ResourcesDuration[k8s_io_api_core_v1.ResourceName(k)]) } mapStringForResourcesDuration += "}" + keysForTaskResultsCompletionStatus := make([]string, 0, len(this.TaskResultsCompletionStatus)) + for k := range this.TaskResultsCompletionStatus { + keysForTaskResultsCompletionStatus = append(keysForTaskResultsCompletionStatus, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForTaskResultsCompletionStatus) + mapStringForTaskResultsCompletionStatus := "map[string]bool{" + for _, k := range keysForTaskResultsCompletionStatus { + mapStringForTaskResultsCompletionStatus += fmt.Sprintf("%v: %v,", k, this.TaskResultsCompletionStatus[k]) + } + mapStringForTaskResultsCompletionStatus += "}" s := strings.Join([]string{`&WorkflowStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, @@ -18944,6 +19403,7 @@ func (this *WorkflowStatus) String() string { `Progress:` + fmt.Sprintf("%v", this.Progress) + `,`, `ArtifactRepositoryRef:` + strings.Replace(fmt.Sprintf("%v", this.ArtifactRepositoryRef), "ArtifactRepositoryRefStatus", "ArtifactRepositoryRefStatus", 1) + `,`, `ArtifactGCStatus:` + strings.Replace(this.ArtifactGCStatus.String(), "ArtGCStatus", "ArtGCStatus", 1) + `,`, + `TaskResultsCompletionStatus:` + mapStringForTaskResultsCompletionStatus + `,`, `}`, }, "") return s @@ -23027,214 +23487,246 @@ func (m *ArtifactoryArtifactRepository) Unmarshal(dAtA []byte) error { } m.RepoURL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ArtifactoryAuth) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ArtifactoryAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ArtifactoryAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UsernameSecret == nil { - m.UsernameSecret = &v1.SecretKeySelector{} - } - if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PasswordSecret == nil { - m.PasswordSecret = &v1.SecretKeySelector{} - } - if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureBlobContainer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AzureBlobContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyFormat", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyFormat = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactoryAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactoryAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactoryAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsernameSecret == nil { + m.UsernameSecret = &v1.SecretKeySelector{} + } + if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PasswordSecret == nil { + m.PasswordSecret = &v1.SecretKeySelector{} + } + if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureBlobContainer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AzureBlobContainer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -24022,10 +24514,126 @@ func (m *ClientCertAuth) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ClientKeySecret == nil { - m.ClientKeySecret = &v1.SecretKeySelector{} + if m.ClientKeySecret == nil { + m.ClientKeySecret = &v1.SecretKeySelector{} + } + if err := m.ClientKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterWorkflowTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterWorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if err := m.ClientKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24050,7 +24658,7 @@ func (m *ClientCertAuth) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { +func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24073,15 +24681,15 @@ func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterWorkflowTemplate: wiretype end group for non-group") + return fmt.Errorf("proto: ClusterWorkflowTemplateList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterWorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClusterWorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24108,13 +24716,13 @@ func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -24141,7 +24749,8 @@ func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ClusterWorkflowTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -24166,7 +24775,7 @@ func (m *ClusterWorkflowTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { +func (m *Column) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -24189,17 +24798,17 @@ func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ClusterWorkflowTemplateList: wiretype end group for non-group") + return fmt.Errorf("proto: Column: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterWorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Column: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24209,30 +24818,29 @@ func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -24242,25 +24850,55 @@ func (m *ClusterWorkflowTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ClusterWorkflowTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -27321,6 +27959,38 @@ func (m *Gauge) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.Realtime = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = GaugeOperation(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31190,15 +31860,129 @@ func (m *Mutex) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Mutex: wiretype end group for non-group") + return fmt.Errorf("proto: Mutex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mutex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutexHolding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutexHolding: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Mutex: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutexHolding: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31226,7 +32010,39 @@ func (m *Mutex) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Mutex = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Holder", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Holder = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -31249,7 +32065,7 @@ func (m *Mutex) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutexHolding) Unmarshal(dAtA []byte) error { +func (m *MutexStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31272,17 +32088,17 @@ func (m *MutexHolding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutexHolding: wiretype end group for non-group") + return fmt.Errorf("proto: MutexStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutexHolding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutexStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mutex", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Holding", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31292,29 +32108,31 @@ func (m *MutexHolding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Mutex = string(dAtA[iNdEx:postIndex]) + m.Holding = append(m.Holding, MutexHolding{}) + if err := m.Holding[len(m.Holding)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Holder", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31324,23 +32142,25 @@ func (m *MutexHolding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Holder = string(dAtA[iNdEx:postIndex]) + m.Waiting = append(m.Waiting, MutexHolding{}) + if err := m.Waiting[len(m.Waiting)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -31363,7 +32183,7 @@ func (m *MutexHolding) Unmarshal(dAtA []byte) error { } return nil } -func (m *MutexStatus) Unmarshal(dAtA []byte) error { +func (m *NodeFlag) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31386,17 +32206,17 @@ func (m *MutexStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutexStatus: wiretype end group for non-group") + return fmt.Errorf("proto: NodeFlag: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutexStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NodeFlag: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Holding", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hooked", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31406,31 +32226,17 @@ func (m *MutexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Holding = append(m.Holding, MutexHolding{}) - if err := m.Holding[len(m.Holding)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.Hooked = bool(v != 0) case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retried", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31440,26 +32246,12 @@ func (m *MutexStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Waiting = append(m.Waiting, MutexHolding{}) - if err := m.Waiting[len(m.Waiting)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.Retried = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32539,6 +33331,42 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } m.Progress = Progress(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeFlag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeFlag == nil { + m.NodeFlag = &NodeFlag{} + } + if err := m.NodeFlag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33513,6 +34341,26 @@ func (m *OSSBucket) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseSDKCreds", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseSDKCreds = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33557,36 +34405,105 @@ func (m *OSSLifecycleRule) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OSSLifecycleRule: wiretype end group for non-group") + return fmt.Errorf("proto: OSSLifecycleRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OSSLifecycleRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MarkInfrequentAccessAfterDays", wireType) + } + m.MarkInfrequentAccessAfterDays = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MarkInfrequentAccessAfterDays |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MarkDeletionAfterDays", wireType) + } + m.MarkDeletionAfterDays = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MarkDeletionAfterDays |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Object) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Object: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OSSLifecycleRule: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MarkInfrequentAccessAfterDays", wireType) - } - m.MarkInfrequentAccessAfterDays = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MarkInfrequentAccessAfterDays |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MarkDeletionAfterDays", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - m.MarkDeletionAfterDays = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33596,61 +34513,26 @@ func (m *OSSLifecycleRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MarkDeletionAfterDays |= int32(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if byteLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Object) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Object: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34401,6 +35283,42 @@ func (m *PodGC) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteDelayDuration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteDelayDuration == nil { + m.DeleteDelayDuration = &v11.Duration{} + } + if err := m.DeleteDelayDuration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -36052,6 +36970,42 @@ func (m *S3Bucket) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CASecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CASecret == nil { + m.CASecret = &v1.SecretKeySelector{} + } + if err := m.CASecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -36381,17 +37335,131 @@ func (m *SemaphoreHolding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SemaphoreHolding: wiretype end group for non-group") + return fmt.Errorf("proto: SemaphoreHolding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SemaphoreHolding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Semaphore = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Holders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Holders = append(m.Holders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SemaphoreRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SemaphoreRef: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SemaphoreHolding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SemaphoreRef: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Semaphore", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36401,27 +37469,31 @@ func (m *SemaphoreHolding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Semaphore = string(dAtA[iNdEx:postIndex]) + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &v1.ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Holders", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36449,93 +37521,7 @@ func (m *SemaphoreHolding) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Holders = append(m.Holders, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SemaphoreRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SemaphoreRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SemaphoreRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &v1.ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -40454,7 +41440,156 @@ func (m *VolumeClaimGC) Unmarshal(dAtA []byte) error { } return nil } -func (m *Workflow) Unmarshal(dAtA []byte) error { +func (m *Workflow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40477,10 +41612,10 @@ func (m *Workflow) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowArtifactGCTask: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowArtifactGCTask: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -40603,7 +41738,7 @@ func (m *Workflow) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { +func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40626,15 +41761,15 @@ func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowArtifactGCTask: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowArtifactGCTaskList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowArtifactGCTask: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowArtifactGCTaskList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40661,46 +41796,13 @@ func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40727,7 +41829,8 @@ func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, WorkflowArtifactGCTask{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -40752,7 +41855,7 @@ func (m *WorkflowArtifactGCTask) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { +func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40775,15 +41878,15 @@ func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowArtifactGCTaskList: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowEventBinding: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowArtifactGCTaskList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowEventBinding: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40810,13 +41913,13 @@ func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40843,8 +41946,7 @@ func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, WorkflowArtifactGCTask{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -40869,7 +41971,7 @@ func (m *WorkflowArtifactGCTaskList) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { +func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -40892,15 +41994,15 @@ func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowEventBinding: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowEventBindingList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowEventBinding: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowEventBindingList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40927,13 +42029,13 @@ func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -40960,7 +42062,8 @@ func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, WorkflowEventBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -40985,7 +42088,7 @@ func (m *WorkflowEventBinding) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { +func (m *WorkflowEventBindingSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -41008,15 +42111,15 @@ func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowEventBindingList: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowEventBindingSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowEventBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowEventBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -41043,13 +42146,13 @@ func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Submit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -41076,8 +42179,10 @@ func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, WorkflowEventBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Submit == nil { + m.Submit = &Submit{} + } + if err := m.Submit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -41102,7 +42207,7 @@ func (m *WorkflowEventBindingList) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowEventBindingSpec) Unmarshal(dAtA []byte) error { +func (m *WorkflowLevelArtifactGC) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -41125,15 +42230,15 @@ func (m *WorkflowEventBindingSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowEventBindingSpec: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowLevelArtifactGC: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowEventBindingSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowLevelArtifactGC: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactGC", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -41160,15 +42265,35 @@ func (m *WorkflowEventBindingSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceFinalizerRemoval", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForceFinalizerRemoval = bool(v != 0) + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Submit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodSpecPatch", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41178,27 +42303,23 @@ func (m *WorkflowEventBindingSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Submit == nil { - m.Submit = &Submit{} - } - if err := m.Submit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PodSpecPatch = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -42814,7 +43935,7 @@ func (m *WorkflowSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.PodDisruptionBudget == nil { - m.PodDisruptionBudget = &v1beta1.PodDisruptionBudgetSpec{} + m.PodDisruptionBudget = &v12.PodDisruptionBudgetSpec{} } if err := m.PodDisruptionBudget.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -43320,7 +44441,7 @@ func (m *WorkflowSpec) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ArtifactGC == nil { - m.ArtifactGC = &ArtifactGC{} + m.ArtifactGC = &WorkflowLevelArtifactGC{} } if err := m.ArtifactGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -44240,6 +45361,121 @@ func (m *WorkflowStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskResultsCompletionStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TaskResultsCompletionStatus == nil { + m.TaskResultsCompletionStatus = make(map[string]bool) + } + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.TaskResultsCompletionStatus[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto index d4025f8d9dd..9ad301e981f 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/generated.proto @@ -6,7 +6,7 @@ syntax = "proto2"; package github.com.argoproj.argo_workflows.v3.pkg.apis.workflow.v1alpha1; import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/api/policy/v1beta1/generated.proto"; +import "k8s.io/api/policy/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -100,7 +100,7 @@ message Artifact { optional bool deleted = 13; } -// ArtifactGC describes how to delete artifacts from completed Workflows +// ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed message ArtifactGC { // Strategy is the strategy to use. // +kubebuilder:validation:Enum="";OnWorkflowCompletion;OnWorkflowDeletion;Never @@ -275,6 +275,9 @@ message ArtifactoryArtifactRepository { // RepoURL is the url for artifactory repo. optional string repoURL = 2; + + // KeyFormat defines the format of how to store keys and can reference workflow variables. + optional string keyFormat = 3; } // ArtifactoryAuth describes the secret selectors required for authenticating to artifactory @@ -325,7 +328,10 @@ message Backoff { // Factor is a factor to multiply the base duration after each failed retry optional k8s.io.apimachinery.pkg.util.intstr.IntOrString factor = 2; - // MaxDuration is the maximum amount of time allowed for the backoff strategy + // MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. + // It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. + // However, when the workflow fails, the pod's deadline is then overridden by maxDuration. + // This ensures that the workflow does not exceed the specified maximum duration when retries are involved. optional string maxDuration = 3; } @@ -371,6 +377,20 @@ message ClusterWorkflowTemplateList { repeated ClusterWorkflowTemplate items = 2; } +// Column is a custom column that will be exposed in the Workflow List View. +// +patchStrategy=merge +// +patchMergeKey=name +message Column { + // The name of this column, e.g., "Workflow Completed". + optional string name = 1; + + // The type of this column, "label" or "annotation". + optional string type = 2; + + // The key of the label or annotation, e.g., "workflows.argoproj.io/completed". + optional string key = 3; +} + message Condition { // Type is the type of condition optional string type = 1; @@ -388,12 +408,14 @@ message ContainerNode { repeated string dependencies = 2; } +// ContainerSetRetryStrategy provides controls on how to retry a container set message ContainerSetRetryStrategy { // Duration is the time between each retry, examples values are "300ms", "1s" or "5m". // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". optional string duration = 1; - // Nbr of retries + // Retries is the maximum number of retry attempts for each container. It does not include the + // first, original attempt; the maximum number of total attempts will be `retries + 1`. optional k8s.io.apimachinery.pkg.util.intstr.IntOrString retries = 2; } @@ -402,8 +424,8 @@ message ContainerSetTemplate { repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 3; - // RetryStrategy describes how to retry a container nodes in the container set if it fails. - // Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set. + // RetryStrategy describes how to retry container nodes if the container set fails. + // Note that this works differently from the template-level `retryStrategy` as it is a process-level retry that does not create new Pods or containers. optional ContainerSetRetryStrategy retryStrategy = 5; } @@ -579,7 +601,7 @@ message DataSource { } message Event { - // Selector (https://github.com/antonmedv/expr) that we must must match the event. E.g. `payload.message == "test"` + // Selector (https://github.com/expr-lang/expr) that we must must match the event. E.g. `payload.message == "test"` optional string selector = 1; } @@ -601,7 +623,7 @@ message GCSArtifact { message GCSArtifactRepository { optional GCSBucket gCSBucket = 1; - // KeyFormat is defines the format of how to store keys. Can reference workflow variables + // KeyFormat defines the format of how to store keys and can reference workflow variables. optional string keyFormat = 2; } @@ -616,11 +638,16 @@ message GCSBucket { // Gauge is a Gauge prometheus metric message Gauge { - // Value is the value of the metric + // Value is the value to be used in the operation with the metric's current value. If no operation is set, + // value is the value of the metric optional string value = 1; // Realtime emits this metric in real time if applicable optional bool realtime = 2; + + // Operation defines the operation to apply with value and the metrics' current value + // +optional + optional string operation = 3; } // GitArtifact is the location of an git artifact @@ -860,7 +887,7 @@ message Link { // The name of the link, E.g. "Workflow Logs" or "Pod Logs" optional string name = 1; - // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs" or "chat" + // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs", "workflow-list" or "chat" optional string scope = 2; // The URL. Can contain "${metadata.namespace}", "${metadata.name}", "${status.startedAt}", "${status.finishedAt}" or any other element in workflow yaml, e.g. "${workflow.metadata.annotations.userDefinedKey}" @@ -921,6 +948,9 @@ message Metrics { message Mutex { // name of the mutex optional string name = 1; + + // Namespace is the namespace of the mutex, default: [namespace of workflow] + optional string namespace = 2; } // MutexHolding describes the mutex and the object which is holding it. @@ -950,6 +980,14 @@ message MutexStatus { repeated MutexHolding waiting = 2; } +message NodeFlag { + // Hooked tracks whether or not this node was triggered by hook or onExit + optional bool hooked = 1; + + // Retried tracks whether or not this node was retried by retryStrategy + optional bool retried = 2; +} + message NodeResult { optional string phase = 1; @@ -988,6 +1026,8 @@ message NodeStatus { // Phase a simple, high-level summary of where the node is in its lifecycle. // Can be used as a state machine. + // Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", + // "Skipped", "Failed", "Error", or "Omitted" as a final state. optional string phase = 7; // BoundaryID indicates the node ID of the associated template root node in which this node belongs to @@ -1017,6 +1057,9 @@ message NodeStatus { // Daemoned tracks whether or not this node was daemoned and need to be terminated optional bool daemoned = 13; + // NodeFlag tracks some history of node. e.g.) hooked, retried, etc. + optional NodeFlag nodeFlag = 27; + // Inputs captures input parameter values and artifact locations supplied to this template invocation optional Inputs inputs = 14; @@ -1096,7 +1139,7 @@ message OSSArtifact { message OSSArtifactRepository { optional OSSBucket oSSBucket = 1; - // KeyFormat is defines the format of how to store keys. Can reference workflow variables + // KeyFormat defines the format of how to store keys and can reference workflow variables. optional string keyFormat = 2; } @@ -1122,6 +1165,9 @@ message OSSBucket { // LifecycleRule specifies how to manage bucket's lifecycle optional OSSLifecycleRule lifecycleRule = 7; + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + optional bool useSDKCreds = 8; } // OSSLifecycleRule specifies how to manage bucket's lifecycle @@ -1135,6 +1181,7 @@ message OSSLifecycleRule { // +kubebuilder:validation:Type=object message Object { + optional bytes value = 1; } // Outputs hold parameters, artifacts, and results from a step @@ -1194,11 +1241,14 @@ message Plugin { // PodGC describes how to delete completed pods as they complete message PodGC { - // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess" + // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods optional string strategy = 1; // LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 2; + + // DeleteDelayDuration specifies the duration before pods in the GC queue get deleted. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration deleteDelayDuration = 3; } // Prometheus is a prometheus metric to be emitted @@ -1308,7 +1358,7 @@ message S3Artifact { message S3ArtifactRepository { optional S3Bucket s3Bucket = 1; - // KeyFormat is defines the format of how to store keys. Can reference workflow variables + // KeyFormat defines the format of how to store keys and can reference workflow variables. optional string keyFormat = 2; // KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. @@ -1346,6 +1396,9 @@ message S3Bucket { optional CreateS3BucketOptions createBucketIfNotPresent = 9; optional S3EncryptionOptions encryptionOptions = 10; + + // CASecret specifies the secret that contains the CA, used to verify the TLS connection + optional k8s.io.api.core.v1.SecretKeySelector caSecret = 11; } // S3EncryptionOptions used to determine encryption options during s3 operations @@ -1384,6 +1437,9 @@ message SemaphoreHolding { message SemaphoreRef { // ConfigMapKeyRef is configmap selector for Semaphore configuration optional k8s.io.api.core.v1.ConfigMapKeySelector configMapKeyRef = 1; + + // Namespace is the namespace of the configmap, default: [namespace of workflow] + optional string namespace = 2; } message SemaphoreStatus { @@ -1466,7 +1522,8 @@ message SuppliedValueFrom { // SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time message SuspendTemplate { - // Duration is the seconds to wait before automatically resuming a template + // Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. + // Could also be a Duration, e.g.: "2m", "6h" optional string duration = 1; } @@ -1529,7 +1586,7 @@ message Template { // Metdata sets the pods's metadata, i.e. annotations and labels optional Metadata metadata = 9; - // Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness + // Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness optional bool daemon = 10; // Steps define a series of sequential/parallel workflow steps @@ -1695,7 +1752,7 @@ message ValueFrom { // JQFilter expression against the resource object in resource templates optional string jqFilter = 3; - // Selector (https://github.com/antonmedv/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` + // Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` optional string event = 7; // Parameter reference to a step or dag task in which to retrieve an output parameter value from @@ -1735,7 +1792,7 @@ message Version { // VolumeClaimGC describes how to delete volumes from completed Workflows message VolumeClaimGC { - // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess" + // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess" optional string strategy = 1; } @@ -1804,6 +1861,18 @@ message WorkflowEventBindingSpec { optional Submit submit = 2; } +// WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level +message WorkflowLevelArtifactGC { + // ArtifactGC is an embedded struct + optional ArtifactGC artifactGC = 1; + + // ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails + optional bool forceFinalizerRemoval = 2; + + // PodSpecPatch holds strategic merge patch to apply against the artgc pod spec. + optional string podSpecPatch = 3; +} + // WorkflowList is list of Workflow resources // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message WorkflowList { @@ -1853,8 +1922,6 @@ message WorkflowSpec { // VolumeClaimTemplates is a list of claims that containers are allowed to reference. // The Workflow controller will create the claims at the beginning of the workflow // and delete the claims upon completion of the workflow - // +patchStrategy=merge - // +patchMergeKey=name repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 6; // Parallelism limits the max total parallel pods that can execute at the same time in a workflow @@ -1891,7 +1958,7 @@ message WorkflowSpec { // Host networking requested for this workflow pod. Default to false. optional bool hostNetwork = 14; - // Set DNS policy for the pod. + // Set DNS policy for workflow pods. // Defaults to "ClusterFirst". // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. @@ -1955,7 +2022,7 @@ message WorkflowSpec { // Controller will automatically add the selector with workflow name, if selector is empty. // Optional: Defaults to empty. // +optional - optional k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec podDisruptionBudget = 31; + optional k8s.io.api.policy.v1.PodDisruptionBudgetSpec podDisruptionBudget = 31; // Metrics are a list of metrics emitted from this Workflow optional Metrics metrics = 32; @@ -1993,12 +2060,14 @@ message WorkflowSpec { // ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts // unless Artifact.ArtifactGC is specified, which overrides this) - optional ArtifactGC artifactGC = 43; + optional WorkflowLevelArtifactGC artifactGC = 43; } // WorkflowStatus contains overall status information about a workflow message WorkflowStatus { // Phase a simple, high-level summary of where the workflow is in its lifecycle. + // Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", + // "Failed" or "Error" once the workflow has completed. optional string phase = 1; // Time at which this workflow started @@ -2053,6 +2122,9 @@ message WorkflowStatus { // ArtifactGCStatus maintains the status of Artifact Garbage Collection optional ArtGCStatus artifactGCStatus = 19; + + // TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection. + map taskResultsCompletionStatus = 20; } // WorkflowStep is a reference to a template to execute in a series of step diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go index 1a17e5af04c..19390e43172 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/info.go @@ -6,8 +6,20 @@ package v1alpha1 type Link struct { // The name of the link, E.g. "Workflow Logs" or "Pod Logs" Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs" or "chat" + // "workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs", "workflow-list" or "chat" Scope string `json:"scope" protobuf:"bytes,2,opt,name=scope"` // The URL. Can contain "${metadata.namespace}", "${metadata.name}", "${status.startedAt}", "${status.finishedAt}" or any other element in workflow yaml, e.g. "${workflow.metadata.annotations.userDefinedKey}" URL string `json:"url" protobuf:"bytes,3,opt,name=url"` } + +// Column is a custom column that will be exposed in the Workflow List View. +// +patchStrategy=merge +// +patchMergeKey=name +type Column struct { + // The name of this column, e.g., "Workflow Completed". + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The type of this column, "label" or "annotation". + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // The key of the label or annotation, e.g., "workflows.argoproj.io/completed". + Key string `json:"key" protobuf:"bytes,3,opt,name=key"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go index f851806943f..34bf0d33089 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/marshall.go @@ -3,7 +3,7 @@ package v1alpha1 import ( "encoding/json" "fmt" - "io/ioutil" + "os" "path/filepath" "sigs.k8s.io/yaml" @@ -22,7 +22,7 @@ func MustUnmarshal(text, v interface{}) { } if x[0] == '@' { filename := string(x[1:]) - y, err := ioutil.ReadFile(filepath.Clean(filename)) + y, err := os.ReadFile(filepath.Clean(filename)) if err != nil { panic(fmt.Errorf("failed to read file %s: %w", filename, err)) } diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go index 1fa9498b582..a21ee90c114 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/object_types.go @@ -6,7 +6,7 @@ import ( // +kubebuilder:validation:Type=object type Object struct { - Value json.RawMessage `json:"-"` + Value json.RawMessage `json:"-" protobuf:"bytes,1,opt,name=value,casttype=encoding/json.RawMessage"` } func (i *Object) UnmarshalJSON(value []byte) error { diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go deleted file mode 100644 index 66ad4cd339a..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ /dev/null @@ -1,8151 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by openapi-gen. DO NOT EDIT. - -// This file was autogenerated by openapi-gen. Do not edit it manually! - -package v1alpha1 - -import ( - common "k8s.io/kube-openapi/pkg/common" - spec "k8s.io/kube-openapi/pkg/validation/spec" -) - -func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { - return map[string]common.OpenAPIDefinition{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount": schema_pkg_apis_workflow_v1alpha1_Amount(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy": schema_pkg_apis_workflow_v1alpha1_ArchiveStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments": schema_pkg_apis_workflow_v1alpha1_Arguments(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus": schema_pkg_apis_workflow_v1alpha1_ArtGCStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact": schema_pkg_apis_workflow_v1alpha1_Artifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC": schema_pkg_apis_workflow_v1alpha1_ArtifactGC(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec": schema_pkg_apis_workflow_v1alpha1_ArtifactGCSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactGCStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation": schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec": schema_pkg_apis_workflow_v1alpha1_ArtifactNodeSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths": schema_pkg_apis_workflow_v1alpha1_ArtifactPaths(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository": schema_pkg_apis_workflow_v1alpha1_ArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef": schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRefStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult": schema_pkg_apis_workflow_v1alpha1_ArtifactResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus": schema_pkg_apis_workflow_v1alpha1_ArtifactResultNodeStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactSearchQuery": schema_pkg_apis_workflow_v1alpha1_ArtifactSearchQuery(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactSearchResult": schema_pkg_apis_workflow_v1alpha1_ArtifactSearchResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact": schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository": schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryAuth": schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact": schema_pkg_apis_workflow_v1alpha1_AzureArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository": schema_pkg_apis_workflow_v1alpha1_AzureArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureBlobContainer": schema_pkg_apis_workflow_v1alpha1_AzureBlobContainer(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff": schema_pkg_apis_workflow_v1alpha1_Backoff(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth": schema_pkg_apis_workflow_v1alpha1_BasicAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache": schema_pkg_apis_workflow_v1alpha1_Cache(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth": schema_pkg_apis_workflow_v1alpha1_ClientCertAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate": schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplateList": schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplateList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition": schema_pkg_apis_workflow_v1alpha1_Condition(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode": schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy": schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate": schema_pkg_apis_workflow_v1alpha1_ContainerSetTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn": schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter": schema_pkg_apis_workflow_v1alpha1_Counter(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions": schema_pkg_apis_workflow_v1alpha1_CreateS3BucketOptions(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow": schema_pkg_apis_workflow_v1alpha1_CronWorkflow(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowList": schema_pkg_apis_workflow_v1alpha1_CronWorkflowList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec": schema_pkg_apis_workflow_v1alpha1_CronWorkflowSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus": schema_pkg_apis_workflow_v1alpha1_CronWorkflowStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask": schema_pkg_apis_workflow_v1alpha1_DAGTask(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate": schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data": schema_pkg_apis_workflow_v1alpha1_Data(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource": schema_pkg_apis_workflow_v1alpha1_DataSource(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event": schema_pkg_apis_workflow_v1alpha1_Event(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig": schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact": schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_GCSArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSBucket": schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge": schema_pkg_apis_workflow_v1alpha1_Gauge(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact": schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact": schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_HDFSArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSConfig": schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSKrbConfig": schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP": schema_pkg_apis_workflow_v1alpha1_HTTP(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact": schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth": schema_pkg_apis_workflow_v1alpha1_HTTPAuth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource": schema_pkg_apis_workflow_v1alpha1_HTTPBodySource(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader": schema_pkg_apis_workflow_v1alpha1_HTTPHeader(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource": schema_pkg_apis_workflow_v1alpha1_HTTPHeaderSource(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header": schema_pkg_apis_workflow_v1alpha1_Header(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram": schema_pkg_apis_workflow_v1alpha1_Histogram(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs": schema_pkg_apis_workflow_v1alpha1_Inputs(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item": schema_pkg_apis_workflow_v1alpha1_Item(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelKeys": schema_pkg_apis_workflow_v1alpha1_LabelKeys(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom": schema_pkg_apis_workflow_v1alpha1_LabelValueFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValues": schema_pkg_apis_workflow_v1alpha1_LabelValues(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook": schema_pkg_apis_workflow_v1alpha1_LifecycleHook(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Link": schema_pkg_apis_workflow_v1alpha1_Link(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom": schema_pkg_apis_workflow_v1alpha1_ManifestFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus": schema_pkg_apis_workflow_v1alpha1_MemoizationStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize": schema_pkg_apis_workflow_v1alpha1_Memoize(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata": schema_pkg_apis_workflow_v1alpha1_Metadata(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel": schema_pkg_apis_workflow_v1alpha1_MetricLabel(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics": schema_pkg_apis_workflow_v1alpha1_Metrics(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex": schema_pkg_apis_workflow_v1alpha1_Mutex(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding": schema_pkg_apis_workflow_v1alpha1_MutexHolding(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus": schema_pkg_apis_workflow_v1alpha1_MutexStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult": schema_pkg_apis_workflow_v1alpha1_NodeResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus": schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus": schema_pkg_apis_workflow_v1alpha1_NodeSynchronizationStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy": schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth": schema_pkg_apis_workflow_v1alpha1_OAuth2Auth(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam": schema_pkg_apis_workflow_v1alpha1_OAuth2EndpointParam(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact": schema_pkg_apis_workflow_v1alpha1_OSSArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository": schema_pkg_apis_workflow_v1alpha1_OSSArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSBucket": schema_pkg_apis_workflow_v1alpha1_OSSBucket(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule": schema_pkg_apis_workflow_v1alpha1_OSSLifecycleRule(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Object": schema_pkg_apis_workflow_v1alpha1_Object(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs": schema_pkg_apis_workflow_v1alpha1_Outputs(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps": schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter": schema_pkg_apis_workflow_v1alpha1_Parameter(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin": schema_pkg_apis_workflow_v1alpha1_Plugin(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC": schema_pkg_apis_workflow_v1alpha1_PodGC(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus": schema_pkg_apis_workflow_v1alpha1_Prometheus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact": schema_pkg_apis_workflow_v1alpha1_RawArtifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate": schema_pkg_apis_workflow_v1alpha1_ResourceTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity": schema_pkg_apis_workflow_v1alpha1_RetryAffinity(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity": schema_pkg_apis_workflow_v1alpha1_RetryNodeAntiAffinity(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy": schema_pkg_apis_workflow_v1alpha1_RetryStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact": schema_pkg_apis_workflow_v1alpha1_S3Artifact(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository": schema_pkg_apis_workflow_v1alpha1_S3ArtifactRepository(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Bucket": schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions": schema_pkg_apis_workflow_v1alpha1_S3EncryptionOptions(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate": schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding": schema_pkg_apis_workflow_v1alpha1_SemaphoreHolding(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef": schema_pkg_apis_workflow_v1alpha1_SemaphoreRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus": schema_pkg_apis_workflow_v1alpha1_SemaphoreStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence": schema_pkg_apis_workflow_v1alpha1_Sequence(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit": schema_pkg_apis_workflow_v1alpha1_Submit(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SubmitOpts": schema_pkg_apis_workflow_v1alpha1_SubmitOpts(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom": schema_pkg_apis_workflow_v1alpha1_SuppliedValueFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate": schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization": schema_pkg_apis_workflow_v1alpha1_Synchronization(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus": schema_pkg_apis_workflow_v1alpha1_SynchronizationStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy": schema_pkg_apis_workflow_v1alpha1_TTLStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy": schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template": schema_pkg_apis_workflow_v1alpha1_Template(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef": schema_pkg_apis_workflow_v1alpha1_TemplateRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep": schema_pkg_apis_workflow_v1alpha1_TransformationStep(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer": schema_pkg_apis_workflow_v1alpha1_UserContainer(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom": schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Version": schema_pkg_apis_workflow_v1alpha1_Version(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC": schema_pkg_apis_workflow_v1alpha1_VolumeClaimGC(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow": schema_pkg_apis_workflow_v1alpha1_Workflow(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask": schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTask(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTaskList": schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTaskList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBinding(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingList": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowList": schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata": schema_pkg_apis_workflow_v1alpha1_WorkflowMetadata(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus": schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStep": schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResult(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResultList": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResultList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSet(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetList": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetSpec(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus": schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetStatus(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplate(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateList": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateList(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateRef(ref), - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy": schema_pkg_apis_workflow_v1alpha1_ZipStrategy(ref), - } -} - -func schema_pkg_apis_workflow_v1alpha1_Amount(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Amount represent a numeric amount.", - Type: Amount{}.OpenAPISchemaType(), - Format: Amount{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArchiveStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArchiveStrategy describes how to archive files/directory when saving artifacts", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "tar": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy"), - }, - }, - "none": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy"), - }, - }, - "zip": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NoneStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TarStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ZipStrategy"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Arguments(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Arguments to a template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "parameters": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Parameters is the list of parameters to pass to the template or workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), - }, - }, - }, - }, - }, - "artifacts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Artifacts is the list of artifacts to pass to the template or workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtGCStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtGCStatus maintains state related to ArtifactGC", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategiesProcessed": { - SchemaProps: spec.SchemaProps{ - Description: "have Pods been started to perform this strategy? (enables us not to re-process what we've already done)", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - "podsRecouped": { - SchemaProps: spec.SchemaProps{ - Description: "have completed Pods been processed? (mapped by Pod name) used to prevent re-processing the Status of a Pod more than once", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - "notSpecified": { - SchemaProps: spec.SchemaProps{ - Description: "if this is true, we already checked to see if we need to do it and we don't", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Artifact indicates an artifact to place at a specified path", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name of the artifact. must be unique within a template's inputs/outputs.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the container path to the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "mode": { - SchemaProps: spec.SchemaProps{ - Description: "mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "from": { - SchemaProps: spec.SchemaProps{ - Description: "From allows an artifact to reference an artifact from a previous step", - Type: []string{"string"}, - Format: "", - }, - }, - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 contains S3 artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), - }, - }, - "git": { - SchemaProps: spec.SchemaProps{ - Description: "Git contains git artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP contains HTTP artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory contains artifactory artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS contains HDFS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), - }, - }, - "raw": { - SchemaProps: spec.SchemaProps{ - Description: "Raw contains raw artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS contains OSS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS contains GCS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure contains Azure Storage artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), - }, - }, - "globalName": { - SchemaProps: spec.SchemaProps{ - Description: "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", - Type: []string{"string"}, - Format: "", - }, - }, - "archive": { - SchemaProps: spec.SchemaProps{ - Description: "Archive controls how the artifact will be saved to the artifact repository.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy"), - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Make Artifacts optional, if Artifacts doesn't generate or exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "subPath": { - SchemaProps: spec.SchemaProps{ - Description: "SubPath allows an artifact to be sourced from a subpath within the specified source", - Type: []string{"string"}, - Format: "", - }, - }, - "recurseMode": { - SchemaProps: spec.SchemaProps{ - Description: "If mode is set, apply the permission recursively into the artifact if it is a folder", - Type: []string{"boolean"}, - Format: "", - }, - }, - "fromExpression": { - SchemaProps: spec.SchemaProps{ - Description: "FromExpression, if defined, is evaluated to specify the value for the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "artifactGC": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC"), - }, - }, - "deleted": { - SchemaProps: spec.SchemaProps{ - Description: "Has this been deleted?", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactGC(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes how to delete artifacts from completed Workflows", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategy": { - SchemaProps: spec.SchemaProps{ - Description: "Strategy is the strategy to use.", - Type: []string{"string"}, - Format: "", - }, - }, - "podMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "PodMetadata is an optional field for specifying the Labels and Annotations that should be assigned to the Pod doing the deletion", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is an optional field for specifying the Service Account that should be assigned to the Pod doing the deletion", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactGCSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGCSpec specifies the Artifacts that need to be deleted", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactsByNode": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactsByNode maps Node name to information pertaining to Artifacts on that Node", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactNodeSpec"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactGCStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGCStatus describes the result of the deletion", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactResultsByNode": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResultsByNode maps Node name to result", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResultNodeStatus"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactLocation(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactLocation describes a location for a single or multiple artifacts. It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). It is also used to describe the location of multiple artifacts such as the archive location of a single workflow step, which the executor will use as a default location to store its files.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 contains S3 artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), - }, - }, - "git": { - SchemaProps: spec.SchemaProps{ - Description: "Git contains git artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP contains HTTP artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory contains artifactory artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS contains HDFS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), - }, - }, - "raw": { - SchemaProps: spec.SchemaProps{ - Description: "Raw contains raw artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS contains OSS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS contains GCS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure contains Azure Storage artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactNodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactNodeSpec specifies the Artifacts that need to be deleted for a given Node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "archiveLocation": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLocation is the template-level Artifact location specification", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"), - }, - }, - "artifacts": { - SchemaProps: spec.SchemaProps{ - Description: "Artifacts maps artifact name to Artifact description", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactPaths(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactPaths expands a step from a collection of artifacts", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name of the artifact. must be unique within a template's inputs/outputs.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the container path to the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "mode": { - SchemaProps: spec.SchemaProps{ - Description: "mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "from": { - SchemaProps: spec.SchemaProps{ - Description: "From allows an artifact to reference an artifact from a previous step", - Type: []string{"string"}, - Format: "", - }, - }, - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 contains S3 artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"), - }, - }, - "git": { - SchemaProps: spec.SchemaProps{ - Description: "Git contains git artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP contains HTTP artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory contains artifactory artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS contains HDFS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact"), - }, - }, - "raw": { - SchemaProps: spec.SchemaProps{ - Description: "Raw contains raw artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS contains OSS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS contains GCS artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure contains Azure Storage artifact location details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact"), - }, - }, - "globalName": { - SchemaProps: spec.SchemaProps{ - Description: "GlobalName exports an output artifact to the global scope, making it available as '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts", - Type: []string{"string"}, - Format: "", - }, - }, - "archive": { - SchemaProps: spec.SchemaProps{ - Description: "Archive controls how the artifact will be saved to the artifact repository.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy"), - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Make Artifacts optional, if Artifacts doesn't generate or exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "subPath": { - SchemaProps: spec.SchemaProps{ - Description: "SubPath allows an artifact to be sourced from a subpath within the specified source", - Type: []string{"string"}, - Format: "", - }, - }, - "recurseMode": { - SchemaProps: spec.SchemaProps{ - Description: "If mode is set, apply the permission recursively into the artifact if it is a folder", - Type: []string{"boolean"}, - Format: "", - }, - }, - "fromExpression": { - SchemaProps: spec.SchemaProps{ - Description: "FromExpression, if defined, is evaluated to specify the value for the artifact", - Type: []string{"string"}, - Format: "", - }, - }, - "artifactGC": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes the strategy to use when to deleting an artifact from completed or deleted workflows", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC"), - }, - }, - "deleted": { - SchemaProps: spec.SchemaProps{ - Description: "Has this been deleted?", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArchiveStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GitArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RawArtifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactRepository represents an artifact repository in which a controller will store its artifacts", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs enables log archiving", - Type: []string{"boolean"}, - Format: "", - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "S3 stores artifact in a S3-compliant object store", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository"), - }, - }, - "artifactory": { - SchemaProps: spec.SchemaProps{ - Description: "Artifactory stores artifacts to JFrog Artifactory", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository"), - }, - }, - "hdfs": { - SchemaProps: spec.SchemaProps{ - Description: "HDFS stores artifacts in HDFS", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository"), - }, - }, - "oss": { - SchemaProps: spec.SchemaProps{ - Description: "OSS stores artifact in a OSS-compliant object store", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "GCS stores artifact in a GCS object store", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository"), - }, - }, - "azure": { - SchemaProps: spec.SchemaProps{ - Description: "Azure stores artifact in an Azure Storage account", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactoryArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.AzureArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.GCSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HDFSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSArtifactRepository", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3ArtifactRepository"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the config map. Defaults to \"artifact-repositories\".", - Type: []string{"string"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The config map key. Defaults to the value of the \"workflows.argoproj.io/default-artifact-repository\" annotation.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactRepositoryRefStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the config map. Defaults to \"artifact-repositories\".", - Type: []string{"string"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The config map key. Defaults to the value of the \"workflows.argoproj.io/default-artifact-repository\" annotation.", - Type: []string{"string"}, - Format: "", - }, - }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "The namespace of the config map. Defaults to the workflow's namespace, or the controller's namespace (if found).", - Type: []string{"string"}, - Format: "", - }, - }, - "default": { - SchemaProps: spec.SchemaProps{ - Description: "If this ref represents the default artifact repository, rather than a config map.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "artifactRepository": { - SchemaProps: spec.SchemaProps{ - Description: "The repository the workflow will use. This maybe empty before v3.1.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepository"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResult describes the result of attempting to delete a given Artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the Artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "success": { - SchemaProps: spec.SchemaProps{ - Description: "Success describes whether the deletion succeeded", - Type: []string{"boolean"}, - Format: "", - }, - }, - "error": { - SchemaProps: spec.SchemaProps{ - Description: "Error is an optional error message which should be set if Success==false", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactResultNodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResultNodeStatus describes the result of the deletion on a given node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactResults": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactResults maps Artifact name to result of the deletion", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactResult"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactSearchQuery(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactGCStrategies": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - "artifactName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "templateName": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "nodeId": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "deleted": { - SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", - }, - }, - "nodeTypes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactSearchResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "Artifact": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - "NodeID": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"Artifact", "NodeID"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactoryArtifact is the location of an artifactory artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "url": { - SchemaProps: spec.SchemaProps{ - Description: "URL of the artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactoryArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactoryArtifactRepository defines the controller configuration for an artifactory artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "repoURL": { - SchemaProps: spec.SchemaProps{ - Description: "RepoURL is the url for artifactory repo.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ArtifactoryAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_AzureArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureArtifact is the location of a an Azure Storage artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the service url associated with an account. It is most likely \"https://.blob.core.windows.net\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the container where resources will be stored", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "accountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "blob": { - SchemaProps: spec.SchemaProps{ - Description: "Blob is the blob name (i.e., path) in the container where the artifact resides", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"endpoint", "container", "blob"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_AzureArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureArtifactRepository defines the controller configuration for an Azure Blob Storage artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the service url associated with an account. It is most likely \"https://.blob.core.windows.net\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the container where resources will be stored", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "accountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "blobNameFormat": { - SchemaProps: spec.SchemaProps{ - Description: "BlobNameFormat is defines the format of how to store blob names. Can reference workflow variables", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"endpoint", "container"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_AzureBlobContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureBlobContainer contains the access information for interfacing with an Azure Blob Storage container", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the service url associated with an account. It is most likely \"https://.blob.core.windows.net\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the container where resources will be stored", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "accountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccountKeySecret is the secret selector to the Azure Blob Storage account access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"endpoint", "container"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Backoff(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Backoff is a backoff strategy to use within retryStrategy", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "duration": { - SchemaProps: spec.SchemaProps{ - Description: "Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\")", - Type: []string{"string"}, - Format: "", - }, - }, - "factor": { - SchemaProps: spec.SchemaProps{ - Description: "Factor is a factor to multiply the base duration after each failed retry", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "maxDuration": { - SchemaProps: spec.SchemaProps{ - Description: "MaxDuration is the maximum amount of time allowed for the backoff strategy", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_BasicAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "BasicAuth describes the secret selectors required for basic authentication", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Cache(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Cache is the configuration for the type of cache to be used", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMap sets a ConfigMap-based cache", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - }, - Required: []string{"configMap"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ClientCertAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClientCertAuth holds necessary information for client authentication via certificates", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "clientCertSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "clientKeySecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterWorkflowTemplate is the definition of a workflow template resource in cluster scope", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ClusterWorkflowTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterWorkflowTemplateList is list of ClusterWorkflowTemplate resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClusterWorkflowTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type is the type of condition", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status is the status of the condition", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Message is the condition message", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContainerNode(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "args": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "workingDir": { - SchemaProps: spec.SchemaProps{ - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerPort"), - }, - }, - }, - }, - }, - "envFrom": { - SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, - }, - }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the container. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "volumeDevices": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), - }, - }, - }, - }, - }, - "livenessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "readinessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "startupProbe": { - SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "lifecycle": { - SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), - }, - }, - "terminationMessagePath": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdinOnce": { - SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "dependencies": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContainerSetRetryStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "duration": { - SchemaProps: spec.SchemaProps{ - Description: "Duration is the time between each retry, examples values are \"300ms\", \"1s\" or \"5m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", - Type: []string{"string"}, - Format: "", - }, - }, - "retries": { - SchemaProps: spec.SchemaProps{ - Description: "Nbr of retries", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - }, - Required: []string{"retries"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContainerSetTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "containers": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode"), - }, - }, - }, - }, - }, - "volumeMounts": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "retryStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy describes how to retry a container nodes in the container set if it fails. Nbr of retries(default 0) and sleep duration between retries(default 0s, instant retry) can be set.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy"), - }, - }, - }, - Required: []string{"containers"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerNode", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetRetryStrategy", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContinueOn defines if a workflow should continue even if a task or step fails/errors. It can be specified if the workflow should continue when the pod errors, fails or both.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "error": { - SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", - }, - }, - "failed": { - SchemaProps: spec.SchemaProps{ - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Counter(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Counter is a Counter prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the value of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"value"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CreateS3BucketOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CreateS3BucketOptions options used to determine automatic automatic bucket-creation process", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "objectLocking": { - SchemaProps: spec.SchemaProps{ - Description: "ObjectLocking Enable object locking", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflow(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflow is the definition of a scheduled workflow resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflowStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflowList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflowList is list of CronWorkflow resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CronWorkflow", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflowSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflowSpec is the specification of a CronWorkflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "workflowSpec": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowSpec is the spec of the workflow to be run", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - "schedule": { - SchemaProps: spec.SchemaProps{ - Description: "Schedule is a schedule to run the Workflow in Cron format", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "concurrencyPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "ConcurrencyPolicy is the K8s-style concurrency policy that will be used", - Type: []string{"string"}, - Format: "", - }, - }, - "suspend": { - SchemaProps: spec.SchemaProps{ - Description: "Suspend is a flag that will stop new CronWorkflows from running if set to true", - Type: []string{"boolean"}, - Format: "", - }, - }, - "startingDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "StartingDeadlineSeconds is the K8s-style deadline that will limit the time a CronWorkflow will be run after its original scheduled time if it is missed.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "successfulJobsHistoryLimit": { - SchemaProps: spec.SchemaProps{ - Description: "SuccessfulJobsHistoryLimit is the number of successful jobs to be kept at a time", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "failedJobsHistoryLimit": { - SchemaProps: spec.SchemaProps{ - Description: "FailedJobsHistoryLimit is the number of failed jobs to be kept at a time", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "timezone": { - SchemaProps: spec.SchemaProps{ - Description: "Timezone is the timezone against which the cron schedule will be calculated, e.g. \"Asia/Tokyo\". Default is machine's local time.", - Type: []string{"string"}, - Format: "", - }, - }, - "workflowMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowMetadata contains some metadata of the workflow to be run", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - }, - Required: []string{"workflowSpec", "schedule"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_CronWorkflowStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CronWorkflowStatus is the status of a CronWorkflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "active": { - SchemaProps: spec.SchemaProps{ - Description: "Active is a list of active workflows stemming from this CronWorkflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - }, - }, - }, - "lastScheduledTime": { - SchemaProps: spec.SchemaProps{ - Description: "LastScheduleTime is the last time the CronWorkflow was scheduled", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "conditions": { - SchemaProps: spec.SchemaProps{ - Description: "Conditions is a list of conditions the CronWorkflow may have", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition"), - }, - }, - }, - }, - }, - }, - Required: []string{"active", "lastScheduledTime", "conditions"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_DAGTask(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DAGTask represents a node in the graph during DAG execution", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the target", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Name of template to execute", - Type: []string{"string"}, - Format: "", - }, - }, - "inline": { - SchemaProps: spec.SchemaProps{ - Description: "Inline is the template. Template must be empty if this is declared (and vice-versa).", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments are the parameter and artifact arguments to the template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource to execute.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "dependencies": { - SchemaProps: spec.SchemaProps{ - Description: "Dependencies are name of other targets which this depends on", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "withItems": { - SchemaProps: spec.SchemaProps{ - Description: "WithItems expands a task into multiple parallel tasks from the items in the list", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item"), - }, - }, - }, - }, - }, - "withParam": { - SchemaProps: spec.SchemaProps{ - Description: "WithParam expands a task into multiple parallel tasks from the value in the parameter, which is expected to be a JSON list.", - Type: []string{"string"}, - Format: "", - }, - }, - "withSequence": { - SchemaProps: spec.SchemaProps{ - Description: "WithSequence expands a task into a numeric sequence", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence"), - }, - }, - "when": { - SchemaProps: spec.SchemaProps{ - Description: "When is an expression in which the task should conditionally execute", - Type: []string{"string"}, - Format: "", - }, - }, - "continueOn": { - SchemaProps: spec.SchemaProps{ - Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn"), - }, - }, - "onExit": { - SchemaProps: spec.SchemaProps{ - Description: "OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template. DEPRECATED: Use Hooks[exit].Template instead.", - Type: []string{"string"}, - Format: "", - }, - }, - "depends": { - SchemaProps: spec.SchemaProps{ - Description: "Depends are name of other targets which this depends on", - Type: []string{"string"}, - Format: "", - }, - }, - "hooks": { - SchemaProps: spec.SchemaProps{ - Description: "Hooks hold the lifecycle hook which is invoked at lifecycle of task, irrespective of the success, failure, or error status of the primary task", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), - }, - }, - }, - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DAGTemplate is a template subtype for directed acyclic graph templates", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "target": { - SchemaProps: spec.SchemaProps{ - Description: "Target are one or more names of targets to execute in a DAG", - Type: []string{"string"}, - Format: "", - }, - }, - "tasks": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Tasks are a list of DAG tasks", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask"), - }, - }, - }, - }, - }, - "failFast": { - SchemaProps: spec.SchemaProps{ - Description: "This flag is for DAG logic. The DAG logic has a built-in \"fail fast\" feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself. The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at https://github.com/argoproj/argo-workflows/issues/1442", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"tasks"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTask"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Data(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Data is a data template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "source": { - SchemaProps: spec.SchemaProps{ - Description: "Source sources external data into a data template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource"), - }, - }, - "transformation": { - SchemaProps: spec.SchemaProps{ - Description: "Transformation applies a set of transformations", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep"), - }, - }, - }, - }, - }, - }, - Required: []string{"source", "transformation"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DataSource", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TransformationStep"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_DataSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DataSource sources external data into a data template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifactPaths": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactPaths is a data transformation that collects a list of artifact paths", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactPaths"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Event(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "Selector (https://github.com/antonmedv/expr) that we must must match the event. E.g. `payload.message == \"test\"`", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"selector"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ExecutorConfig holds configurations of an executor container.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName specifies the service account name of the executor container.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GCSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GCSArtifact is the location of a GCS artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the path in the bucket where the artifact resides", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GCSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GCSArtifactRepository defines the controller configuration for a GCS artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "keyFormat": { - SchemaProps: spec.SchemaProps{ - Description: "KeyFormat is defines the format of how to store keys. Can reference workflow variables", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GCSBucket(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GCSBucket contains the access information for interfacring with a GCS bucket", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccountKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountKeySecret is the secret selector to the bucket's service account key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Gauge(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Gauge is a Gauge prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the value of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "realtime": { - SchemaProps: spec.SchemaProps{ - Description: "Realtime emits this metric in real time if applicable", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"value", "realtime"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GitArtifact is the location of an git artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "repo": { - SchemaProps: spec.SchemaProps{ - Description: "Repo is the git repository", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "revision": { - SchemaProps: spec.SchemaProps{ - Description: "Revision is the git commit, tag, branch to checkout", - Type: []string{"string"}, - Format: "", - }, - }, - "depth": { - SchemaProps: spec.SchemaProps{ - Description: "Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "fetch": { - SchemaProps: spec.SchemaProps{ - Description: "Fetch specifies a number of refs that should be fetched before checkout", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "usernameSecret": { - SchemaProps: spec.SchemaProps{ - Description: "UsernameSecret is the secret selector to the repository username", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "passwordSecret": { - SchemaProps: spec.SchemaProps{ - Description: "PasswordSecret is the secret selector to the repository password", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "sshPrivateKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SSHPrivateKeySecret is the secret selector to the repository ssh private key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "insecureIgnoreHostKey": { - SchemaProps: spec.SchemaProps{ - Description: "InsecureIgnoreHostKey disables SSH strict host key checking during git clone", - Type: []string{"boolean"}, - Format: "", - }, - }, - "disableSubmodules": { - SchemaProps: spec.SchemaProps{ - Description: "DisableSubmodules disables submodules during git clone", - Type: []string{"boolean"}, - Format: "", - }, - }, - "singleBranch": { - SchemaProps: spec.SchemaProps{ - Description: "SingleBranch enables single branch clone, using the `branch` parameter", - Type: []string{"boolean"}, - Format: "", - }, - }, - "branch": { - SchemaProps: spec.SchemaProps{ - Description: "Branch is the branch to fetch when `SingleBranch` is enabled", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"repo"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSArtifact is the location of an HDFS artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "addresses": { - SchemaProps: spec.SchemaProps{ - Description: "Addresses is accessible addresses of HDFS name nodes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hdfsUser": { - SchemaProps: spec.SchemaProps{ - Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is a file path in HDFS", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "force": { - SchemaProps: spec.SchemaProps{ - Description: "Force copies a file forcibly even if it exists", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSArtifactRepository defines the controller configuration for an HDFS artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "addresses": { - SchemaProps: spec.SchemaProps{ - Description: "Addresses is accessible addresses of HDFS name nodes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hdfsUser": { - SchemaProps: spec.SchemaProps{ - Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "pathFormat": { - SchemaProps: spec.SchemaProps{ - Description: "PathFormat is defines the format of path to store a file. Can reference workflow variables", - Type: []string{"string"}, - Format: "", - }, - }, - "force": { - SchemaProps: spec.SchemaProps{ - Description: "Force copies a file forcibly even if it exists", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSConfig is configurations for HDFS", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "addresses": { - SchemaProps: spec.SchemaProps{ - Description: "Addresses is accessible addresses of HDFS name nodes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hdfsUser": { - SchemaProps: spec.SchemaProps{ - Description: "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HDFSKrbConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HDFSKrbConfig is auth configurations for Kerberos", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "krbCCacheSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbKeytabSecret": { - SchemaProps: spec.SchemaProps{ - Description: "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "krbUsername": { - SchemaProps: spec.SchemaProps{ - Description: "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbRealm": { - SchemaProps: spec.SchemaProps{ - Description: "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - "krbConfigConfigMap": { - SchemaProps: spec.SchemaProps{ - Description: "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "krbServicePrincipalName": { - SchemaProps: spec.SchemaProps{ - Description: "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTP(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "method": { - SchemaProps: spec.SchemaProps{ - Description: "Method is HTTP methods for HTTP Request", - Type: []string{"string"}, - Format: "", - }, - }, - "url": { - SchemaProps: spec.SchemaProps{ - Description: "URL of the HTTP Request", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "headers": { - SchemaProps: spec.SchemaProps{ - Description: "Headers are an optional list of headers to send with HTTP requests", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader"), - }, - }, - }, - }, - }, - "timeoutSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "TimeoutSeconds is request timeout for HTTP Request. Default is 30 seconds", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "successCondition": { - SchemaProps: spec.SchemaProps{ - Description: "SuccessCondition is an expression if evaluated to true is considered successful", - Type: []string{"string"}, - Format: "", - }, - }, - "body": { - SchemaProps: spec.SchemaProps{ - Description: "Body is content of the HTTP Request", - Type: []string{"string"}, - Format: "", - }, - }, - "bodyFrom": { - SchemaProps: spec.SchemaProps{ - Description: "BodyFrom is content of the HTTP Request as Bytes", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource"), - }, - }, - "insecureSkipVerify": { - SchemaProps: spec.SchemaProps{ - Description: "InsecureSkipVerify is a bool when if set to true will skip TLS verification for the HTTP client", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPBodySource", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeader"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HTTPArtifact allows a file served on HTTP to be placed as an input artifact in a container", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "url": { - SchemaProps: spec.SchemaProps{ - Description: "URL of the artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "headers": { - SchemaProps: spec.SchemaProps{ - Description: "Headers are an optional list of headers to send with HTTP requests for artifacts", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header"), - }, - }, - }, - }, - }, - "auth": { - SchemaProps: spec.SchemaProps{ - Description: "Auth contains information for client authentication", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth"), - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Header"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "clientCert": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth"), - }, - }, - "oauth2": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth"), - }, - }, - "basicAuth": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.BasicAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ClientCertAuth", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2Auth"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPBodySource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HTTPBodySource contains the source of the HTTP body.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "bytes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "byte", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPHeader(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "valueFrom": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource"), - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTPHeaderSource"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_HTTPHeaderSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "secretKeyRef": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Header(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Header indicate a key-value request header to be used when fetching artifacts over HTTP", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the header name", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the literal value to use for the header", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "value"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Histogram(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Histogram is a Histogram prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the value of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "buckets": { - SchemaProps: spec.SchemaProps{ - Description: "Buckets is a list of bucket divisors for the histogram", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount"), - }, - }, - }, - }, - }, - }, - Required: []string{"value", "buckets"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Amount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Inputs(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "parameters": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Parameters are a list of parameters passed as inputs", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), - }, - }, - }, - }, - }, - "artifacts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Artifact are a list of artifacts passed as inputs", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Item(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Item expands a single workflow step into multiple parallel steps The value of Item can be a map, string, bool, or number", - Type: Item{}.OpenAPISchemaType(), - Format: Item{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LabelKeys(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LabelKeys is list of keys", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LabelValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "expression": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"expression"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LabelValues(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Labels is list of workflow labels", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_LifecycleHook(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template is the name of the template to execute by the hook", - Type: []string{"string"}, - Format: "", - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments hold arguments to the template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource to execute by the hook", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Link(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A link to another app.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the link, E.g. \"Workflow Logs\" or \"Pod Logs\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "scope": { - SchemaProps: spec.SchemaProps{ - Description: "\"workflow\", \"pod\", \"pod-logs\", \"event-source-logs\", \"sensor-logs\" or \"chat\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "url": { - SchemaProps: spec.SchemaProps{ - Description: "The URL. Can contain \"${metadata.namespace}\", \"${metadata.name}\", \"${status.startedAt}\", \"${status.finishedAt}\" or any other element in workflow yaml, e.g. \"${workflow.metadata.annotations.userDefinedKey}\"", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "scope", "url"}, - }, - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ManifestFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "artifact": { - SchemaProps: spec.SchemaProps{ - Description: "Artifact contains the artifact to use", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - Required: []string{"artifact"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MemoizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MemoizationStatus is the status of this memoized node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "hit": { - SchemaProps: spec.SchemaProps{ - Description: "Hit indicates whether this node was created from a cache entry", - Default: false, - Type: []string{"boolean"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the name of the key used for this node's cache", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "cacheName": { - SchemaProps: spec.SchemaProps{ - Description: "Cache is the name of the cache that was used", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"hit", "key", "cacheName"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Memoize(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Memoization enables caching for the Outputs of the template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the key to use as the caching key", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "cache": { - SchemaProps: spec.SchemaProps{ - Description: "Cache sets and configures the kind of cache", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache"), - }, - }, - "maxAge": { - SchemaProps: spec.SchemaProps{ - Description: "MaxAge is the maximum age (e.g. \"180s\", \"24h\") of an entry that is still considered valid. If an entry is older than the MaxAge, it will be ignored.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key", "cache", "maxAge"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Cache"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Metadata(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Pod metdata", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "annotations": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MetricLabel(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MetricLabel is a single label for a prometheus metric", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key", "value"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Metrics(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Metrics are a list of metrics emitted from a Workflow/Template", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "prometheus": { - SchemaProps: spec.SchemaProps{ - Description: "Prometheus is a list of prometheus metrics to be emitted", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus"), - }, - }, - }, - }, - }, - }, - Required: []string{"prometheus"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Prometheus"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Mutex(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Mutex holds Mutex configuration", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name of the mutex", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MutexHolding(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MutexHolding describes the mutex and the object which is holding it.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "mutex": { - SchemaProps: spec.SchemaProps{ - Description: "Reference for the mutex e.g: ${namespace}/mutex/${mutexName}", - Type: []string{"string"}, - Format: "", - }, - }, - "holder": { - SchemaProps: spec.SchemaProps{ - Description: "Holder is a reference to the object which holds the Mutex. Holding Scenario:\n 1. Current workflow's NodeID which is holding the lock.\n e.g: ${NodeID}\nWaiting Scenario:\n 1. Current workflow or other workflow NodeID which is holding the lock.\n e.g: ${WorkflowName}/${NodeID}", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_MutexStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MutexStatus contains which objects hold mutex locks, and which objects this workflow is waiting on to release locks.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "holding": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Holding is a list of mutexes and their respective objects that are held by mutex lock for this workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"), - }, - }, - }, - }, - }, - "waiting": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Waiting is a list of mutexes and their respective objects this workflow is waiting for.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexHolding"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NodeResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeStatus contains status information about an individual node in the workflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "id": { - SchemaProps: spec.SchemaProps{ - Description: "ID is a unique identifier of a node within the worklow It is implemented as a hash of the node name, which makes the ID deterministic", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is unique name in the node tree used to generate the node ID", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "displayName": { - SchemaProps: spec.SchemaProps{ - Description: "DisplayName is a human readable representation of the node. Unique within a template boundary", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type indicates type of node", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "templateName": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", - Type: []string{"string"}, - Format: "", - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "templateScope": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateScope is the template scope in which the template of this node was retrieved.", - Type: []string{"string"}, - Format: "", - }, - }, - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase a simple, high-level summary of where the node is in its lifecycle. Can be used as a state machine.", - Type: []string{"string"}, - Format: "", - }, - }, - "boundaryID": { - SchemaProps: spec.SchemaProps{ - Description: "BoundaryID indicates the node ID of the associated template root node in which this node belongs to", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about why the node is in this condition.", - Type: []string{"string"}, - Format: "", - }, - }, - "startedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this node started", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "finishedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this node completed", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "estimatedDuration": { - SchemaProps: spec.SchemaProps{ - Description: "EstimatedDuration in seconds.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Description: "Progress to completion", - Type: []string{"string"}, - Format: "", - }, - }, - "resourcesDuration": { - SchemaProps: spec.SchemaProps{ - Description: "ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - }, - }, - "podIP": { - SchemaProps: spec.SchemaProps{ - Description: "PodIP captures the IP of the pod for daemoned steps", - Type: []string{"string"}, - Format: "", - }, - }, - "daemoned": { - SchemaProps: spec.SchemaProps{ - Description: "Daemoned tracks whether or not this node was daemoned and need to be terminated", - Type: []string{"boolean"}, - Format: "", - }, - }, - "inputs": { - SchemaProps: spec.SchemaProps{ - Description: "Inputs captures input parameter values and artifact locations supplied to this template invocation", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs"), - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Description: "Outputs captures output parameter values and artifact locations produced by this template invocation", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "children": { - SchemaProps: spec.SchemaProps{ - Description: "Children is a list of child node IDs", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "outboundNodes": { - SchemaProps: spec.SchemaProps{ - Description: "OutboundNodes tracks the node IDs which are considered \"outbound\" nodes to a template invocation. For every invocation of a template, there are nodes which we considered as \"outbound\". Essentially, these are last nodes in the execution sequence to run, before the template is considered completed. These nodes are then connected as parents to a following step.\n\nIn the case of single pod steps (i.e. container, script, resource templates), this list will be nil since the pod itself is already considered the \"outbound\" node. In the case of DAGs, outbound nodes are the \"target\" tasks (tasks with no children). In the case of steps, outbound nodes are all the containers involved in the last step group. NOTE: since templates are composable, the list of outbound nodes are carried upwards when a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of a template, will be a superset of the outbound nodes of its last children.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "hostNodeName": { - SchemaProps: spec.SchemaProps{ - Description: "HostNodeName name of the Kubernetes node on which the Pod is running, if applicable", - Type: []string{"string"}, - Format: "", - }, - }, - "memoizationStatus": { - SchemaProps: spec.SchemaProps{ - Description: "MemoizationStatus holds information about cached nodes", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus"), - }, - }, - "synchronizationStatus": { - SchemaProps: spec.SchemaProps{ - Description: "SynchronizationStatus is the synchronization status of the node", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus"), - }, - }, - }, - Required: []string{"id", "name", "type"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MemoizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeSynchronizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NodeSynchronizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeSynchronizationStatus stores the status of a node", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "waiting": { - SchemaProps: spec.SchemaProps{ - Description: "Waiting is the name of the lock that this node is waiting for", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent files. Note that if the artifact is a directory, the artifact driver must support the ability to save/load the directory appropriately.", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OAuth2Auth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OAuth2Auth holds all information for client authentication via OAuth2 tokens", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "clientIDSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "clientSecretSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "tokenURLSecret": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "scopes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "endpointParams": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OAuth2EndpointParam", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OAuth2EndpointParam(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EndpointParam is for requesting optional fields that should be sent in the oauth request", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the header name", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the literal value to use for the header", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSArtifact is the location of an Alibaba Cloud OSS artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "securityToken": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", - Type: []string{"string"}, - Format: "", - }, - }, - "lifecycleRule": { - SchemaProps: spec.SchemaProps{ - Description: "LifecycleRule specifies how to manage bucket's lifecycle", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the path in the bucket where the artifact resides", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSArtifactRepository defines the controller configuration for an OSS artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "securityToken": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", - Type: []string{"string"}, - Format: "", - }, - }, - "lifecycleRule": { - SchemaProps: spec.SchemaProps{ - Description: "LifecycleRule specifies how to manage bucket's lifecycle", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), - }, - }, - "keyFormat": { - SchemaProps: spec.SchemaProps{ - Description: "KeyFormat is defines the format of how to store keys. Can reference workflow variables", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSBucket(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSBucket contains the access information required for interfacing with an Alibaba Cloud OSS bucket", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the OSS bucket for output artifacts, if it doesn't exist", - Type: []string{"boolean"}, - Format: "", - }, - }, - "securityToken": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityToken is the user's temporary security token. For more details, check out: https://www.alibabacloud.com/help/doc-detail/100624.htm", - Type: []string{"string"}, - Format: "", - }, - }, - "lifecycleRule": { - SchemaProps: spec.SchemaProps{ - Description: "LifecycleRule specifies how to manage bucket's lifecycle", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.OSSLifecycleRule", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_OSSLifecycleRule(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OSSLifecycleRule specifies how to manage bucket's lifecycle", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "markInfrequentAccessAfterDays": { - SchemaProps: spec.SchemaProps{ - Description: "MarkInfrequentAccessAfterDays is the number of days before we convert the objects in the bucket to Infrequent Access (IA) storage type", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "markDeletionAfterDays": { - SchemaProps: spec.SchemaProps{ - Description: "MarkDeletionAfterDays is the number of days before we delete objects in the bucket", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Object(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: Object{}.OpenAPISchemaType(), - Format: Object{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Outputs(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Outputs hold parameters, artifacts, and results from a step", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "parameters": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Parameters holds the list of output parameters produced by a step", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"), - }, - }, - }, - }, - }, - "artifacts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Artifacts holds the list of output artifacts produced by a step", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact"), - }, - }, - }, - }, - }, - "result": { - SchemaProps: spec.SchemaProps{ - Description: "Result holds the result (stdout) of a script template", - Type: []string{"string"}, - Format: "", - }, - }, - "exitCode": { - SchemaProps: spec.SchemaProps{ - Description: "ExitCode holds the exit code of a script template", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Artifact", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Parameter"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: ParallelSteps{}.OpenAPISchemaType(), - Format: ParallelSteps{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Parameter(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Parameter indicate a passed string parameter to a service template with an optional default value", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the parameter name", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "default": { - SchemaProps: spec.SchemaProps{ - Description: "Default is the default value to use for an input parameter if a value was not supplied", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the literal value to use for the parameter. If specified in the context of an input parameter, the value takes precedence over any passed values", - Type: []string{"string"}, - Format: "", - }, - }, - "valueFrom": { - SchemaProps: spec.SchemaProps{ - Description: "ValueFrom is the source for the output parameter's value", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom"), - }, - }, - "globalName": { - SchemaProps: spec.SchemaProps{ - Description: "GlobalName exports an output parameter to the global scope, making it available as '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters", - Type: []string{"string"}, - Format: "", - }, - }, - "enum": { - SchemaProps: spec.SchemaProps{ - Description: "Enum holds a list of string values to choose from, for the actual value of the parameter", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "description": { - SchemaProps: spec.SchemaProps{ - Description: "Description is the parameter description", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ValueFrom"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Plugin(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Plugin is an Object with exactly one key", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_PodGC(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodGC describes how to delete completed pods as they complete", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategy": { - SchemaProps: spec.SchemaProps{ - Description: "Strategy is the strategy to use. One of \"OnPodCompletion\", \"OnPodSuccess\", \"OnWorkflowCompletion\", \"OnWorkflowSuccess\"", - Type: []string{"string"}, - Format: "", - }, - }, - "labelSelector": { - SchemaProps: spec.SchemaProps{ - Description: "LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Prometheus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Prometheus is a prometheus metric to be emitted", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Description: "Labels is a list of metric labels", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel"), - }, - }, - }, - }, - }, - "help": { - SchemaProps: spec.SchemaProps{ - Description: "Help is a string that describes the metric", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "when": { - SchemaProps: spec.SchemaProps{ - Description: "When is a conditional statement that decides when to emit the metric", - Type: []string{"string"}, - Format: "", - }, - }, - "gauge": { - SchemaProps: spec.SchemaProps{ - Description: "Gauge is a gauge metric", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge"), - }, - }, - "histogram": { - SchemaProps: spec.SchemaProps{ - Description: "Histogram is a histogram metric", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram"), - }, - }, - "counter": { - SchemaProps: spec.SchemaProps{ - Description: "Counter is a counter metric", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter"), - }, - }, - }, - Required: []string{"name", "help"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Counter", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Gauge", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Histogram", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MetricLabel"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RawArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RawArtifact allows raw string content to be placed as an artifact in a container", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data is the string contents of the artifact", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"data"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ResourceTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceTemplate is a template subtype to manipulate kubernetes resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "action": { - SchemaProps: spec.SchemaProps{ - Description: "Action is the action to perform to the resource. Must be one of: get, create, apply, delete, replace, patch", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "mergeStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\" Must be one of: strategic, merge, json", - Type: []string{"string"}, - Format: "", - }, - }, - "manifest": { - SchemaProps: spec.SchemaProps{ - Description: "Manifest contains the kubernetes manifest", - Type: []string{"string"}, - Format: "", - }, - }, - "manifestFrom": { - SchemaProps: spec.SchemaProps{ - Description: "ManifestFrom is the source for a single kubernetes manifest", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom"), - }, - }, - "setOwnerReference": { - SchemaProps: spec.SchemaProps{ - Description: "SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "successCondition": { - SchemaProps: spec.SchemaProps{ - Description: "SuccessCondition is a label selector expression which describes the conditions of the k8s resource in which it is acceptable to proceed to the following step", - Type: []string{"string"}, - Format: "", - }, - }, - "failureCondition": { - SchemaProps: spec.SchemaProps{ - Description: "FailureCondition is a label selector expression which describes the conditions of the k8s resource in which the step was considered failed", - Type: []string{"string"}, - Format: "", - }, - }, - "flags": { - SchemaProps: spec.SchemaProps{ - Description: "Flags is a set of additional options passed to kubectl before submitting a resource I.e. to disable resource validation: flags: [\n\t\"--validate=false\" # disable resource validation\n]", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"action"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ManifestFrom"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RetryAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RetryAffinity prevents running steps on the same host.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodeAntiAffinity": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryNodeAntiAffinity"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RetryNodeAntiAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RetryNodeAntiAffinity is a placeholder for future expansion, only empty nodeAntiAffinity is allowed. In order to prevent running steps on the same host, it uses \"kubernetes.io/hostname\".", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_RetryStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy provides controls on how to retry a workflow step", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "limit": { - SchemaProps: spec.SchemaProps{ - Description: "Limit is the maximum number of retry attempts when retrying a container. It does not include the original container; the maximum number of total attempts will be `limit + 1`.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "retryPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryPolicy is a policy of NodePhase statuses that will be retried", - Type: []string{"string"}, - Format: "", - }, - }, - "backoff": { - SchemaProps: spec.SchemaProps{ - Description: "Backoff is a backoff strategy", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff"), - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "Affinity prevents running workflow's step on the same host", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity"), - }, - }, - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression is a condition expression for when a node will be retried. If it evaluates to false, the node will not be retried and the retry strategy will be ignored", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Backoff", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryAffinity", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3Artifact(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3Artifact is the location of an S3 artifact", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "region": { - SchemaProps: spec.SchemaProps{ - Description: "Region contains the optional bucket region", - Type: []string{"string"}, - Format: "", - }, - }, - "insecure": { - SchemaProps: spec.SchemaProps{ - Description: "Insecure will connect to the service with TLS", - Type: []string{"boolean"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "roleARN": { - SchemaProps: spec.SchemaProps{ - Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", - Type: []string{"string"}, - Format: "", - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), - }, - }, - "encryptionOptions": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the key in the bucket where the artifact resides", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3ArtifactRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3ArtifactRepository defines the controller configuration for an S3 artifact repository", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "region": { - SchemaProps: spec.SchemaProps{ - Description: "Region contains the optional bucket region", - Type: []string{"string"}, - Format: "", - }, - }, - "insecure": { - SchemaProps: spec.SchemaProps{ - Description: "Insecure will connect to the service with TLS", - Type: []string{"boolean"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "roleARN": { - SchemaProps: spec.SchemaProps{ - Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", - Type: []string{"string"}, - Format: "", - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), - }, - }, - "encryptionOptions": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), - }, - }, - "keyFormat": { - SchemaProps: spec.SchemaProps{ - Description: "KeyFormat is defines the format of how to store keys. Can reference workflow variables", - Type: []string{"string"}, - Format: "", - }, - }, - "keyPrefix": { - SchemaProps: spec.SchemaProps{ - Description: "KeyPrefix is prefix used as part of the bucket key in which the controller will store artifacts. DEPRECATED. Use KeyFormat instead", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3Bucket contains the access information required for interfacing with an S3 bucket", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint is the hostname of the bucket endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Bucket is the name of the bucket", - Type: []string{"string"}, - Format: "", - }, - }, - "region": { - SchemaProps: spec.SchemaProps{ - Description: "Region contains the optional bucket region", - Type: []string{"string"}, - Format: "", - }, - }, - "insecure": { - SchemaProps: spec.SchemaProps{ - Description: "Insecure will connect to the service with TLS", - Type: []string{"boolean"}, - Format: "", - }, - }, - "accessKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKeySecret is the secret selector to the bucket's access key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySecret is the secret selector to the bucket's secret key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "roleARN": { - SchemaProps: spec.SchemaProps{ - Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", - Type: []string{"string"}, - Format: "", - }, - }, - "useSDKCreds": { - SchemaProps: spec.SchemaProps{ - Description: "UseSDKCreds tells the driver to figure out credentials based on sdk defaults.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "createBucketIfNotPresent": { - SchemaProps: spec.SchemaProps{ - Description: "CreateBucketIfNotPresent tells the driver to attempt to create the S3 bucket for output artifacts, if it doesn't exist. Setting Enabled Encryption will apply either SSE-S3 to the bucket if KmsKeyId is not set or SSE-KMS if it is.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions"), - }, - }, - "encryptionOptions": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.CreateS3BucketOptions", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.S3EncryptionOptions", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_S3EncryptionOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "S3EncryptionOptions used to determine encryption options during s3 operations", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kmsKeyId": { - SchemaProps: spec.SchemaProps{ - Description: "KMSKeyId tells the driver to encrypt the object using the specified KMS Key.", - Type: []string{"string"}, - Format: "", - }, - }, - "kmsEncryptionContext": { - SchemaProps: spec.SchemaProps{ - Description: "KmsEncryptionContext is a json blob that contains an encryption context. See https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context for more information", - Type: []string{"string"}, - Format: "", - }, - }, - "enableEncryption": { - SchemaProps: spec.SchemaProps{ - Description: "EnableEncryption tells the driver to encrypt objects if set to true. If kmsKeyId and serverSideCustomerKeySecret are not set, SSE-S3 will be used", - Type: []string{"boolean"}, - Format: "", - }, - }, - "serverSideCustomerKeySecret": { - SchemaProps: spec.SchemaProps{ - Description: "ServerSideCustomerKeySecret tells the driver to encrypt the output artifacts using SSE-C with the specified secret.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ScriptTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ScriptTemplate is a template subtype to enable scripting through code steps", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "args": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "workingDir": { - SchemaProps: spec.SchemaProps{ - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerPort"), - }, - }, - }, - }, - }, - "envFrom": { - SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, - }, - }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the container. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "volumeDevices": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), - }, - }, - }, - }, - }, - "livenessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "readinessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "startupProbe": { - SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "lifecycle": { - SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), - }, - }, - "terminationMessagePath": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdinOnce": { - SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "source": { - SchemaProps: spec.SchemaProps{ - Description: "Source contains the source code of the script to execute", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "source"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SemaphoreHolding(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "semaphore": { - SchemaProps: spec.SchemaProps{ - Description: "Semaphore stores the semaphore name.", - Type: []string{"string"}, - Format: "", - }, - }, - "holders": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Holders stores the list of current holder names in the workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SemaphoreRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SemaphoreRef is a reference of Semaphore", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "configMapKeyRef": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMapKeyRef is configmap selector for Semaphore configuration", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SemaphoreStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "holding": { - SchemaProps: spec.SchemaProps{ - Description: "Holding stores the list of resource acquired synchronization lock for workflows.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"), - }, - }, - }, - }, - }, - "waiting": { - SchemaProps: spec.SchemaProps{ - Description: "Waiting indicates the list of current synchronization lock holders.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreHolding"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Sequence(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Sequence expands a workflow step into numeric range", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "count": { - SchemaProps: spec.SchemaProps{ - Description: "Count is number of elements in the sequence (default: 0). Not to be used with end", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "start": { - SchemaProps: spec.SchemaProps{ - Description: "Number at which to start the sequence (default: 0)", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "end": { - SchemaProps: spec.SchemaProps{ - Description: "Number at which to end the sequence (default: 0). Not to be used with Count", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "format": { - SchemaProps: spec.SchemaProps{ - Description: "Format is a printf format string to format the value in the sequence", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Submit(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "workflowTemplateRef": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateRef the workflow template to submit", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef"), - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Metadata optional means to customize select fields of the workflow metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments extracted from the event and then set as arguments to the workflow created.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - }, - Required: []string{"workflowTemplateRef"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SubmitOpts(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SubmitOpts are workflow submission options", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name overrides metadata.name", - Type: []string{"string"}, - Format: "", - }, - }, - "generateName": { - SchemaProps: spec.SchemaProps{ - Description: "GenerateName overrides metadata.generateName", - Type: []string{"string"}, - Format: "", - }, - }, - "entryPoint": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint overrides spec.entrypoint", - Type: []string{"string"}, - Format: "", - }, - }, - "parameters": { - SchemaProps: spec.SchemaProps{ - Description: "Parameters passes input parameters to workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "serviceAccount": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccount runs all pods in the workflow using specified ServiceAccount.", - Type: []string{"string"}, - Format: "", - }, - }, - "dryRun": { - SchemaProps: spec.SchemaProps{ - Description: "DryRun validates the workflow on the client-side without creating it. This option is not supported in API", - Type: []string{"boolean"}, - Format: "", - }, - }, - "serverDryRun": { - SchemaProps: spec.SchemaProps{ - Description: "ServerDryRun validates the workflow on the server-side without creating it", - Type: []string{"boolean"}, - Format: "", - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Description: "Labels adds to metadata.labels", - Type: []string{"string"}, - Format: "", - }, - }, - "ownerReference": { - SchemaProps: spec.SchemaProps{ - Description: "OwnerReference creates a metadata.ownerReference", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"), - }, - }, - "annotations": { - SchemaProps: spec.SchemaProps{ - Description: "Annotations adds to metadata.labels", - Type: []string{"string"}, - Format: "", - }, - }, - "podPriorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "Set the podPriorityClassName of the workflow", - Type: []string{"string"}, - Format: "", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority is used if controller is configured to process limited number of workflows in parallel, higher priority workflows are processed first.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SuppliedValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SuppliedValueFrom is a placeholder for a value to be filled in directly, either through the CLI, API, etc.", - Type: []string{"object"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SuspendTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "duration": { - SchemaProps: spec.SchemaProps{ - Description: "Duration is the seconds to wait before automatically resuming a template", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Synchronization(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Synchronization holds synchronization lock configuration", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "semaphore": { - SchemaProps: spec.SchemaProps{ - Description: "Semaphore holds the Semaphore configuration", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef"), - }, - }, - "mutex": { - SchemaProps: spec.SchemaProps{ - Description: "Mutex holds the Mutex lock details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Mutex", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_SynchronizationStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SynchronizationStatus stores the status of semaphore and mutex.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "semaphore": { - SchemaProps: spec.SchemaProps{ - Description: "Semaphore stores this workflow's Semaphore holder details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus"), - }, - }, - "mutex": { - SchemaProps: spec.SchemaProps{ - Description: "Mutex stores this workflow's mutex holder details", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.MutexStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SemaphoreStatus"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TTLStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TTLStrategy is the strategy for the time to live depending on if the workflow succeeded or failed", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "secondsAfterCompletion": { - SchemaProps: spec.SchemaProps{ - Description: "SecondsAfterCompletion is the number of seconds to live after completion", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "secondsAfterSuccess": { - SchemaProps: spec.SchemaProps{ - Description: "SecondsAfterSuccess is the number of seconds to live after success", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "secondsAfterFailure": { - SchemaProps: spec.SchemaProps{ - Description: "SecondsAfterFailure is the number of seconds to live after failure", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TarStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TarStrategy will tar and gzip the file or directory when saving", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "compressionLevel": { - SchemaProps: spec.SchemaProps{ - Description: "CompressionLevel specifies the gzip compression level to use for the artifact. Defaults to gzip.DefaultCompression.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Template is a reusable and composable unit of execution in a workflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the template", - Type: []string{"string"}, - Format: "", - }, - }, - "inputs": { - SchemaProps: spec.SchemaProps{ - Description: "Inputs describe what inputs parameters and artifacts are supplied to this template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs"), - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Description: "Outputs describe the parameters and artifacts that this template produces", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector to schedule this step of the workflow to be run on the selected node(s). Overrides the selector set at the workflow level.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "Affinity sets the pod's scheduling constraints Overrides the affinity set at the workflow level (if any)", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Metdata sets the pods's metadata, i.e. annotations and labels", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "daemon": { - SchemaProps: spec.SchemaProps{ - Description: "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness", - Type: []string{"boolean"}, - Format: "", - }, - }, - "steps": { - SchemaProps: spec.SchemaProps{ - Description: "Steps define a series of sequential/parallel workflow steps", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps"), - }, - }, - }, - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container is the main container image to run in the pod", - Ref: ref("k8s.io/api/core/v1.Container"), - }, - }, - "containerSet": { - SchemaProps: spec.SchemaProps{ - Description: "ContainerSet groups multiple containers within a single pod.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate"), - }, - }, - "script": { - SchemaProps: spec.SchemaProps{ - Description: "Script runs a portion of code against an interpreter", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate"), - }, - }, - "resource": { - SchemaProps: spec.SchemaProps{ - Description: "Resource template subtype which can run k8s resources", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate"), - }, - }, - "dag": { - SchemaProps: spec.SchemaProps{ - Description: "DAG template subtype which runs a DAG", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate"), - }, - }, - "suspend": { - SchemaProps: spec.SchemaProps{ - Description: "Suspend template subtype which can suspend a workflow when reaching the step", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate"), - }, - }, - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data is a data template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data"), - }, - }, - "http": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP makes a HTTP request", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP"), - }, - }, - "plugin": { - SchemaProps: spec.SchemaProps{ - Description: "Plugin is a plugin template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin"), - }, - }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Volumes is a list of volumes that can be mounted by containers in a template.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), - }, - }, - }, - }, - }, - "initContainers": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "InitContainers is a list of containers which run before the main container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer"), - }, - }, - }, - }, - }, - "sidecars": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer"), - }, - }, - }, - }, - }, - "archiveLocation": { - SchemaProps: spec.SchemaProps{ - Description: "Location in which all files related to the step will be stored (logs, artifacts, etc...). Can be overridden by individual items in Outputs. If omitted, will use the default artifact repository location configured in the controller, appended with the / in the key.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation"), - }, - }, - "activeDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds relative to the StartTime that the pod may be active on a node before the system actively tries to terminate the pod; value must be positive integer This field is only applicable to container and script templates.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "retryStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy describes how to retry a template when it fails", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy"), - }, - }, - "parallelism": { - SchemaProps: spec.SchemaProps{ - Description: "Parallelism limits the max total parallel pods that can execute at the same time within the boundaries of this template invocation. If additional steps/dag templates are invoked, the pods created by those templates will not be counted towards this total.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "failFast": { - SchemaProps: spec.SchemaProps{ - Description: "FailFast, if specified, will fail this template if any of its child pods has failed. This is useful for when this template is expanded with `withItems`, etc.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tolerations": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Tolerations to apply to workflow pods.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. Or it will be dispatched by workflow scope scheduler if specified. If neither specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", - }, - }, - "priorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "PriorityClassName to apply to workflow pods.", - Type: []string{"string"}, - Format: "", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority to apply to workflow pods.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName to apply to workflow pods", - Type: []string{"string"}, - Format: "", - }, - }, - "automountServiceAccountToken": { - SchemaProps: spec.SchemaProps{ - Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "executor": { - SchemaProps: spec.SchemaProps{ - Description: "Executor holds configurations of the executor container.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig"), - }, - }, - "hostAliases": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "ip", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.HostAlias"), - }, - }, - }, - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), - }, - }, - "podSpecPatch": { - SchemaProps: spec.SchemaProps{ - Description: "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).", - Type: []string{"string"}, - Format: "", - }, - }, - "metrics": { - SchemaProps: spec.SchemaProps{ - Description: "Metrics are a list of metrics emitted from this template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics"), - }, - }, - "synchronization": { - SchemaProps: spec.SchemaProps{ - Description: "Synchronization holds synchronization lock configuration for this template", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization"), - }, - }, - "memoize": { - SchemaProps: spec.SchemaProps{ - Description: "Memoize allows templates to use outputs generated from already executed templates", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize"), - }, - }, - "timeout": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout allows to set the total node execution timeout duration counting from the node's start time. This duration also includes time in which the node spends in Pending state. This duration may not be applied to Step or DAG templates.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContainerSetTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Data", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.HTTP", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Memoize", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ParallelSteps", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Plugin", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.UserContainer", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TemplateRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is a reference of template resource.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the resource name of the template.", - Type: []string{"string"}, - Format: "", - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template is the name of referred template in the resource.", - Type: []string{"string"}, - Format: "", - }, - }, - "clusterScope": { - SchemaProps: spec.SchemaProps{ - Description: "ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_TransformationStep(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression defines an expr expression to apply", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"expression"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_UserContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "UserContainer is a container specified by a user.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "args": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "workingDir": { - SchemaProps: spec.SchemaProps{ - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerPort"), - }, - }, - }, - }, - }, - "envFrom": { - SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, - }, - }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the container. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "volumeDevices": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), - }, - }, - }, - }, - }, - "livenessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "readinessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "startupProbe": { - SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "lifecycle": { - SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), - }, - }, - "terminationMessagePath": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdinOnce": { - SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "mirrorVolumeMounts": { - SchemaProps: spec.SchemaProps{ - Description: "MirrorVolumeMounts will mount the same volumes specified in the main container to the container (including artifacts), at the same mountPaths. This enables dind daemon to partially see the same filesystem as the main container in order to use features such as docker volume binding", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ValueFrom(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ValueFrom describes a location in which to obtain the value to a parameter", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path in the container to retrieve an output parameter value from in container templates", - Type: []string{"string"}, - Format: "", - }, - }, - "jsonPath": { - SchemaProps: spec.SchemaProps{ - Description: "JSONPath of a resource to retrieve an output parameter value from in resource templates", - Type: []string{"string"}, - Format: "", - }, - }, - "jqFilter": { - SchemaProps: spec.SchemaProps{ - Description: "JQFilter expression against the resource object in resource templates", - Type: []string{"string"}, - Format: "", - }, - }, - "event": { - SchemaProps: spec.SchemaProps{ - Description: "Selector (https://github.com/antonmedv/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message`", - Type: []string{"string"}, - Format: "", - }, - }, - "parameter": { - SchemaProps: spec.SchemaProps{ - Description: "Parameter reference to a step or dag task in which to retrieve an output parameter value from (e.g. '{{steps.mystep.outputs.myparam}}')", - Type: []string{"string"}, - Format: "", - }, - }, - "supplied": { - SchemaProps: spec.SchemaProps{ - Description: "Supplied value to be filled in directly, either through the CLI, API, etc.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom"), - }, - }, - "configMapKeyRef": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMapKeyRef is configmap selector for input parameter configuration", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "default": { - SchemaProps: spec.SchemaProps{ - Description: "Default specifies a value to be used if retrieving the value from the specified source fails", - Type: []string{"string"}, - Format: "", - }, - }, - "expression": { - SchemaProps: spec.SchemaProps{ - Description: "Expression, if defined, is evaluated to specify the value for the parameter", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SuppliedValueFrom", "k8s.io/api/core/v1.ConfigMapKeySelector"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Version(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "version": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "buildDate": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "gitCommit": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "gitTag": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "gitTreeState": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "goVersion": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "compiler": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "platform": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"version", "buildDate", "gitCommit", "gitTag", "gitTreeState", "goVersion", "compiler", "platform"}, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_VolumeClaimGC(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "VolumeClaimGC describes how to delete volumes from completed Workflows", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "strategy": { - SchemaProps: spec.SchemaProps{ - Description: "Strategy is the strategy to use. One of \"OnWorkflowCompletion\", \"OnWorkflowSuccess\"", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_Workflow(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Workflow is the definition of a workflow resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTask(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowArtifactGCTask specifies the Artifacts that need to be deleted as well as the status of deletion", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGCStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowArtifactGCTaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowArtifactGCTaskList is list of WorkflowArtifactGCTask resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowArtifactGCTask", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowEventBinding is the definition of an event resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBindingSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowEventBindingList is list of event resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowEventBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowEventBindingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "event": { - SchemaProps: spec.SchemaProps{ - Description: "Event is the event to bind to", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event"), - }, - }, - "submit": { - SchemaProps: spec.SchemaProps{ - Description: "Submit is the workflow template to submit", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit"), - }, - }, - }, - Required: []string{"event"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Event", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Submit"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowList is list of Workflow resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Workflow", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "labels": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "annotations": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "labelsFrom": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LabelValueFrom"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowSpec is the specification of a Workflow.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "templates": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Templates is a list of workflow templates used in a workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - }, - }, - }, - "entrypoint": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint is a template reference to the starting point of the workflow.", - Type: []string{"string"}, - Format: "", - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{workflow.parameters.myparam}}", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.", - Type: []string{"string"}, - Format: "", - }, - }, - "automountServiceAccountToken": { - SchemaProps: spec.SchemaProps{ - Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "executor": { - SchemaProps: spec.SchemaProps{ - Description: "Executor holds configurations of executor containers of the workflow.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig"), - }, - }, - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Volumes is a list of volumes that can be mounted by containers in a workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), - }, - }, - }, - }, - }, - "volumeClaimTemplates": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), - }, - }, - }, - }, - }, - "parallelism": { - SchemaProps: spec.SchemaProps{ - Description: "Parallelism limits the max total parallel pods that can execute at the same time in a workflow", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "artifactRepositoryRef": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef"), - }, - }, - "suspend": { - SchemaProps: spec.SchemaProps{ - Description: "Suspend will suspend the workflow and prevent execution of any future steps in the workflow", - Type: []string{"boolean"}, - Format: "", - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which will result in all pods of the workflow to be scheduled on the selected node(s). This is able to be overridden by a nodeSelector specified in the template.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "Affinity sets the scheduling constraints for all pods in the workflow. Can be overridden by an affinity specified in the template", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "tolerations": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Tolerations to apply to workflow pods.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "imagePullSecrets": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - }, - }, - "hostNetwork": { - SchemaProps: spec.SchemaProps{ - Description: "Host networking requested for this workflow pod. Default to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", - Type: []string{"string"}, - Format: "", - }, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "onExit": { - SchemaProps: spec.SchemaProps{ - Description: "OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary workflow.", - Type: []string{"string"}, - Format: "", - }, - }, - "ttlStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy"), - }, - }, - "activeDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds relative to the workflow start time which the workflow is allowed to run before the controller terminates the workflow. A value of zero is used to terminate a Running workflow", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.", - Type: []string{"string"}, - Format: "", - }, - }, - "podGC": { - SchemaProps: spec.SchemaProps{ - Description: "PodGC describes the strategy to use when deleting completed pods", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC"), - }, - }, - "podPriorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "PriorityClassName to apply to workflow pods.", - Type: []string{"string"}, - Format: "", - }, - }, - "podPriority": { - SchemaProps: spec.SchemaProps{ - Description: "Priority to apply to workflow pods. DEPRECATED: Use PodPriorityClassName instead.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "hostAliases": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "ip", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.HostAlias"), - }, - }, - }, - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), - }, - }, - "podSpecPatch": { - SchemaProps: spec.SchemaProps{ - Description: "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).", - Type: []string{"string"}, - Format: "", - }, - }, - "podDisruptionBudget": { - SchemaProps: spec.SchemaProps{ - Description: "PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.", - Ref: ref("k8s.io/api/policy/v1beta1.PodDisruptionBudgetSpec"), - }, - }, - "metrics": { - SchemaProps: spec.SchemaProps{ - Description: "Metrics are a list of metrics emitted from this Workflow", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics"), - }, - }, - "shutdown": { - SchemaProps: spec.SchemaProps{ - Description: "Shutdown will shutdown the workflow according to its ShutdownStrategy", - Type: []string{"string"}, - Format: "", - }, - }, - "workflowTemplateRef": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef"), - }, - }, - "synchronization": { - SchemaProps: spec.SchemaProps{ - Description: "Synchronization holds synchronization lock configuration for this Workflow", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization"), - }, - }, - "volumeClaimGC": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeClaimGC describes the strategy to use when deleting volumes from completed workflows", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC"), - }, - }, - "retryStrategy": { - SchemaProps: spec.SchemaProps{ - Description: "RetryStrategy for all templates in the workflow.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy"), - }, - }, - "podMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "PodMetadata defines additional metadata that should be applied to workflow pods", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata"), - }, - }, - "templateDefaults": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateDefaults holds default template values that will apply to all templates in the Workflow, unless overridden on the template-level", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - "archiveLogs": { - SchemaProps: spec.SchemaProps{ - Description: "ArchiveLogs indicates if the container logs should be archived", - Type: []string{"boolean"}, - Format: "", - }, - }, - "hooks": { - SchemaProps: spec.SchemaProps{ - Description: "Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), - }, - }, - }, - }, - }, - "workflowMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "WorkflowMetadata contains some metadata of the workflow to refer to", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata"), - }, - }, - "artifactGC": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts unless Artifact.ArtifactGC is specified, which overrides this)", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Metrics", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.PodGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Synchronization", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TTLStrategy", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.VolumeClaimGC", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowMetadata", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplateRef", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume", "k8s.io/api/policy/v1beta1.PodDisruptionBudgetSpec"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowStatus contains overall status information about a workflow", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase a simple, high-level summary of where the workflow is in its lifecycle.", - Type: []string{"string"}, - Format: "", - }, - }, - "startedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this workflow started", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "finishedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this workflow completed", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "estimatedDuration": { - SchemaProps: spec.SchemaProps{ - Description: "EstimatedDuration in seconds.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Description: "Progress to completion", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about why the workflow is in this condition.", - Type: []string{"string"}, - Format: "", - }, - }, - "compressedNodes": { - SchemaProps: spec.SchemaProps{ - Description: "Compressed and base64 decoded Nodes map", - Type: []string{"string"}, - Format: "", - }, - }, - "nodes": { - SchemaProps: spec.SchemaProps{ - Description: "Nodes is a mapping between a node ID and the node's status.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus"), - }, - }, - }, - }, - }, - "offloadNodeStatusVersion": { - SchemaProps: spec.SchemaProps{ - Description: "Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. This will actually be populated with a hash of the offloaded data.", - Type: []string{"string"}, - Format: "", - }, - }, - "storedTemplates": { - SchemaProps: spec.SchemaProps{ - Description: "StoredTemplates is a mapping between a template ref and the node's status.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - }, - }, - }, - "persistentVolumeClaims": { - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. The contents of this list are drained at the end of the workflow.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.Volume"), - }, - }, - }, - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Description: "Outputs captures output values and artifact locations produced by the workflow via global outputs", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "conditions": { - SchemaProps: spec.SchemaProps{ - Description: "Conditions is a list of conditions the Workflow may have", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition"), - }, - }, - }, - }, - }, - "resourcesDuration": { - SchemaProps: spec.SchemaProps{ - Description: "ResourcesDuration is the total for the workflow", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - }, - }, - "storedWorkflowTemplateSpec": { - SchemaProps: spec.SchemaProps{ - Description: "StoredWorkflowSpec stores the WorkflowTemplate spec for future execution.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - "synchronization": { - SchemaProps: spec.SchemaProps{ - Description: "Synchronization stores the status of synchronization locks", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus"), - }, - }, - "artifactRepositoryRef": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus"), - }, - }, - "artifactGCStatus": { - SchemaProps: spec.SchemaProps{ - Description: "ArtifactGCStatus maintains the status of Artifact Garbage Collection", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtGCStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRefStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Condition", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.SynchronizationStatus", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowStep is a reference to a template to execute in a series of step", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the step", - Type: []string{"string"}, - Format: "", - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template is the name of the template to execute as the step", - Type: []string{"string"}, - Format: "", - }, - }, - "inline": { - SchemaProps: spec.SchemaProps{ - Description: "Inline is the template. Template must be empty if this is declared (and vice-versa).", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - "arguments": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments hold arguments to the template", - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments"), - }, - }, - "templateRef": { - SchemaProps: spec.SchemaProps{ - Description: "TemplateRef is the reference to the template resource to execute as the step.", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"), - }, - }, - "withItems": { - SchemaProps: spec.SchemaProps{ - Description: "WithItems expands a step into multiple parallel steps from the items in the list", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item"), - }, - }, - }, - }, - }, - "withParam": { - SchemaProps: spec.SchemaProps{ - Description: "WithParam expands a step into multiple parallel steps from the value in the parameter, which is expected to be a JSON list.", - Type: []string{"string"}, - Format: "", - }, - }, - "withSequence": { - SchemaProps: spec.SchemaProps{ - Description: "WithSequence expands a step into a numeric sequence", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence"), - }, - }, - "when": { - SchemaProps: spec.SchemaProps{ - Description: "When is an expression in which the step should conditionally execute", - Type: []string{"string"}, - Format: "", - }, - }, - "continueOn": { - SchemaProps: spec.SchemaProps{ - Description: "ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified", - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn"), - }, - }, - "onExit": { - SchemaProps: spec.SchemaProps{ - Description: "OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template. DEPRECATED: Use Hooks[exit].Template instead.", - Type: []string{"string"}, - Format: "", - }, - }, - "hooks": { - SchemaProps: spec.SchemaProps{ - Description: "Hooks holds the lifecycle hook which is invoked at lifecycle of step, irrespective of the success, failure, or error status of the primary step", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.ContinueOn", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Item", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.LifecycleHook", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Sequence", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.TemplateRef"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTaskResult is a used to communicate a result back to the controller. Unlike WorkflowTaskSet, it has more capacity. This is an internal type. Users should never create this resource directly, much like you would never create a ReplicaSet directly.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "phase": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "outputs": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs"), - }, - }, - "progress": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"metadata"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Outputs", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskResultList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskResult", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSet(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetSpec", "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSetStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTaskSet", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "tasks": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.Template"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTaskSetStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodes": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.NodeResult"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplate is the definition of a workflow template resource", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec"), - }, - }, - }, - Required: []string{"metadata", "spec"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateList is list of WorkflowTemplate resources", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate"), - }, - }, - }, - }, - }, - }, - Required: []string{"metadata", "items"}, - }, - }, - Dependencies: []string{ - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1.WorkflowTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkflowTemplateRef is a reference to a WorkflowTemplate resource.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the resource name of the workflow template.", - Type: []string{"string"}, - Format: "", - }, - }, - "clusterScope": { - SchemaProps: spec.SchemaProps{ - Description: "ClusterScope indicates the referred template is cluster scoped (i.e. a ClusterWorkflowTemplate).", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_workflow_v1alpha1_ZipStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ZipStrategy will unzip zipped input artifacts", - Type: []string{"object"}, - }, - }, - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go index 76340b563e9..1b2d6320eb3 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -15,13 +15,15 @@ import ( "time" apiv1 "k8s.io/api/core/v1" - policyv1beta "k8s.io/api/policy/v1beta1" + policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" + log "github.com/sirupsen/logrus" + argoerrs "github.com/argoproj/argo-workflows/v3/errors" "github.com/argoproj/argo-workflows/v3/util/slice" ) @@ -202,23 +204,6 @@ func (w *Workflow) GetExecSpec() *WorkflowSpec { return &w.Spec } -func (w *Workflow) HasArtifactGC() bool { - - if w.Spec.ArtifactGC != nil && w.Spec.ArtifactGC.Strategy != ArtifactGCNever && w.Spec.ArtifactGC.Strategy != ArtifactGCStrategyUndefined { - return true - } - - // either it's defined by an Output Artifact or by the WorkflowSpec itself, or both - for _, template := range w.GetTemplates() { - for _, artifact := range template.Outputs.Artifacts { - if artifact.GetArtifactGC().Strategy != ArtifactGCNever && artifact.GetArtifactGC().Strategy != ArtifactGCStrategyUndefined { - return true - } - } - } - return false -} - // return the ultimate ArtifactGCStrategy for the Artifact // (defined on the Workflow level but can be overridden on the Artifact level) func (w *Workflow) GetArtifactGCStrategy(a *Artifact) ArtifactGCStrategy { @@ -305,9 +290,7 @@ type WorkflowSpec struct { // VolumeClaimTemplates is a list of claims that containers are allowed to reference. // The Workflow controller will create the claims at the beginning of the workflow // and delete the claims upon completion of the workflow - // +patchStrategy=merge - // +patchMergeKey=name - VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,opt,name=volumeClaimTemplates"` + VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,6,opt,name=volumeClaimTemplates"` // Parallelism limits the max total parallel pods that can execute at the same time in a workflow Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,7,opt,name=parallelism"` @@ -343,7 +326,7 @@ type WorkflowSpec struct { // Host networking requested for this workflow pod. Default to false. HostNetwork *bool `json:"hostNetwork,omitempty" protobuf:"bytes,14,opt,name=hostNetwork"` - // Set DNS policy for the pod. + // Set DNS policy for workflow pods. // Defaults to "ClusterFirst". // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. @@ -407,7 +390,7 @@ type WorkflowSpec struct { // Controller will automatically add the selector with workflow name, if selector is empty. // Optional: Defaults to empty. // +optional - PodDisruptionBudget *policyv1beta.PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty" protobuf:"bytes,31,opt,name=podDisruptionBudget"` + PodDisruptionBudget *policyv1.PodDisruptionBudgetSpec `json:"podDisruptionBudget,omitempty" protobuf:"bytes,31,opt,name=podDisruptionBudget"` // Metrics are a list of metrics emitted from this Workflow Metrics *Metrics `json:"metrics,omitempty" protobuf:"bytes,32,opt,name=metrics"` @@ -445,7 +428,7 @@ type WorkflowSpec struct { // ArtifactGC describes the strategy to use when deleting artifacts from completed or deleted workflows (applies to all output Artifacts // unless Artifact.ArtifactGC is specified, which overrides this) - ArtifactGC *ArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,43,opt,name=artifactGC"` + ArtifactGC *WorkflowLevelArtifactGC `json:"artifactGC,omitempty" protobuf:"bytes,43,opt,name=artifactGC"` } type LabelValueFrom struct { @@ -493,7 +476,7 @@ func (wfs WorkflowSpec) GetArtifactGC() *ArtifactGC { return &ArtifactGC{Strategy: ArtifactGCStrategyUndefined} } - return wfs.ArtifactGC + return &wfs.ArtifactGC.ArtifactGC } func (wfs WorkflowSpec) GetTTLStrategy() *TTLStrategy { @@ -641,7 +624,7 @@ type Template struct { // Metdata sets the pods's metadata, i.e. annotations and labels Metadata Metadata `json:"metadata,omitempty" protobuf:"bytes,9,opt,name=metadata"` - // Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness + // Daemon will allow a workflow to proceed to the next step so long as the container reaches readiness Daemon *bool `json:"daemon,omitempty" protobuf:"bytes,10,opt,name=daemon"` // Steps define a series of sequential/parallel workflow steps @@ -903,7 +886,7 @@ type ValueFrom struct { // JQFilter expression against the resource object in resource templates JQFilter string `json:"jqFilter,omitempty" protobuf:"bytes,3,opt,name=jqFilter"` - // Selector (https://github.com/antonmedv/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` + // Selector (https://github.com/expr-lang/expr) that is evaluated against the event to get the value of the parameter. E.g. `payload.message` Event string `json:"event,omitempty" protobuf:"bytes,7,opt,name=event"` // Parameter reference to a step or dag task in which to retrieve an output parameter value from @@ -1026,10 +1009,12 @@ func (a *Artifact) CleanPath() error { // PodGC describes how to delete completed pods as they complete type PodGC struct { - // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess" + // Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess". If unset, does not delete Pods Strategy PodGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=PodGCStrategy"` // LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue. LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,2,opt,name=labelSelector"` + // DeleteDelayDuration specifies the duration before pods in the GC queue get deleted. + DeleteDelayDuration *metav1.Duration `json:"deleteDelayDuration,omitempty" protobuf:"bytes,3,opt,name=deleteDelayDuration"` } // GetLabelSelector gets the label selector from podGC. @@ -1050,7 +1035,19 @@ func (podGC *PodGC) GetStrategy() PodGCStrategy { return PodGCOnPodNone } -// ArtifactGC describes how to delete artifacts from completed Workflows +// WorkflowLevelArtifactGC describes how to delete artifacts from completed Workflows - this spec is used on the Workflow level +type WorkflowLevelArtifactGC struct { + // ArtifactGC is an embedded struct + ArtifactGC `json:",inline" protobuf:"bytes,1,opt,name=artifactGC"` + + // ForceFinalizerRemoval: if set to true, the finalizer will be removed in the case that Artifact GC fails + ForceFinalizerRemoval bool `json:"forceFinalizerRemoval,omitempty" protobuf:"bytes,2,opt,name=forceFinalizerRemoval"` + + // PodSpecPatch holds strategic merge patch to apply against the artgc pod spec. + PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,3,opt,name=podSpecPatch"` +} + +// ArtifactGC describes how to delete artifacts from completed Workflows - this is embedded into the WorkflowLevelArtifactGC, and also used for individual Artifacts to override that as needed type ArtifactGC struct { // Strategy is the strategy to use. // +kubebuilder:validation:Enum="";OnWorkflowCompletion;OnWorkflowDeletion;Never @@ -1073,7 +1070,7 @@ func (agc *ArtifactGC) GetStrategy() ArtifactGCStrategy { // VolumeClaimGC describes how to delete volumes from completed Workflows type VolumeClaimGC struct { - // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess" + // Strategy is the strategy to use. One of "OnWorkflowCompletion", "OnWorkflowSuccess". Defaults to "OnWorkflowSuccess" Strategy VolumeClaimGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=VolumeClaimGCStrategy"` } @@ -1172,7 +1169,7 @@ func (a *ArtifactLocation) Get() (ArtifactLocationType, error) { } else if a.S3 != nil { return a.S3, nil } - return nil, fmt.Errorf("You need to configure artifact storage. More information on how to do this can be found in the docs: https://argoproj.github.io/argo-workflows/configure-artifact-repository/") + return nil, fmt.Errorf("You need to configure artifact storage. More information on how to do this can be found in the docs: https://argo-workflows.readthedocs.io/en/release-3.5/configure-artifact-repository/") } // SetType sets the type of the artifact to type the argument. @@ -1366,6 +1363,17 @@ func (gcStatus *ArtGCStatus) IsArtifactGCPodRecouped(podName string) bool { } return false } +func (gcStatus *ArtGCStatus) AllArtifactGCPodsRecouped() bool { + if gcStatus.PodsRecouped == nil { + return false + } + for _, recouped := range gcStatus.PodsRecouped { + if !recouped { + return false + } + } + return true +} type ArtifactSearchResult struct { Artifact `protobuf:"bytes,1,opt,name=artifact"` @@ -1650,12 +1658,16 @@ func (s *Synchronization) GetType() SynchronizationType { type SemaphoreRef struct { // ConfigMapKeyRef is configmap selector for Semaphore configuration ConfigMapKeyRef *apiv1.ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,1,opt,name=configMapKeyRef"` + // Namespace is the namespace of the configmap, default: [namespace of workflow] + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` } // Mutex holds Mutex configuration type Mutex struct { // name of the mutex Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Namespace is the namespace of the mutex, default: [namespace of workflow] + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` } // WorkflowTemplateRef is a reference to a WorkflowTemplate resource. @@ -1666,11 +1678,11 @@ type WorkflowTemplateRef struct { ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,2,opt,name=clusterScope"` } -func (ref *WorkflowTemplateRef) ToTemplateRef(entrypoint string) *TemplateRef { +func (ref *WorkflowTemplateRef) ToTemplateRef(template string) *TemplateRef { return &TemplateRef{ Name: ref.Name, ClusterScope: ref.ClusterScope, - Template: entrypoint, + Template: template, } } @@ -1721,6 +1733,64 @@ func (n Nodes) Find(f func(NodeStatus) bool) *NodeStatus { return nil } +// Get a NodeStatus from the hashmap of Nodes. +// Return a nil along with an error if non existent. +func (n Nodes) Get(key string) (*NodeStatus, error) { + val, ok := n[key] + if !ok { + return nil, fmt.Errorf("key was not found for %s", key) + } + return &val, nil +} + +// Check if the Nodes map has a key entry +func (n Nodes) Has(key string) bool { + _, err := n.Get(key) + return err == nil +} + +// Get the Phase of a Node +func (n Nodes) GetPhase(key string) (*NodePhase, error) { + val, err := n.Get(key) + if err != nil { + return nil, err + } + return &val.Phase, nil +} + +// Set the status of a node by key +func (n Nodes) Set(key string, status NodeStatus) { + if status.Name == "" { + log.Warnf("Name was not set for key %s", key) + } + if status.ID == "" { + log.Warnf("ID was not set for key %s", key) + } + _, ok := n[key] + if ok { + log.Tracef("Changing NodeStatus for %s to %+v", key, status) + } + n[key] = status +} + +// Delete a node from the Nodes by key +func (n Nodes) Delete(key string) { + has := n.Has(key) + if !has { + log.Warnf("Trying to delete non existent key %s", key) + return + } + delete(n, key) +} + +// Get the name of a node by key +func (n Nodes) GetName(key string) (string, error) { + val, err := n.Get(key) + if err != nil { + return "", err + } + return val.Name, nil +} func NodeWithName(name string) func(n NodeStatus) bool { return func(n NodeStatus) bool { return n.Name == name } } @@ -1752,6 +1822,33 @@ func (s Nodes) Children(parentNodeId string) Nodes { return childNodes } +// NestedChildrenStatus takes in a nodeID and returns all its children, this involves a tree search using DFS. +// This is needed to mark all children nodes as failed for example. +func (s Nodes) NestedChildrenStatus(parentNodeId string) ([]NodeStatus, error) { + parentNode, ok := s[parentNodeId] + if !ok { + return nil, fmt.Errorf("could not find %s in nodes when searching for nested children", parentNodeId) + } + + children := []NodeStatus{} + toexplore := []NodeStatus{parentNode} + + for len(toexplore) > 0 { + childNode := toexplore[0] + toexplore = toexplore[1:] + for _, nodeID := range childNode.Children { + toexplore = append(toexplore, s[nodeID]) + } + + if childNode.Name == parentNode.Name { + continue + } + children = append(children, childNode) + } + + return children, nil +} + // Filter returns the subset of the nodes that match the predicate, e.g. only failed nodes func (s Nodes) Filter(predicate func(NodeStatus) bool) Nodes { filteredNodes := make(Nodes) @@ -1786,6 +1883,8 @@ type UserContainer struct { // WorkflowStatus contains overall status information about a workflow type WorkflowStatus struct { // Phase a simple, high-level summary of where the workflow is in its lifecycle. + // Will be "" (Unknown), "Pending", or "Running" before the workflow is completed, and "Succeeded", + // "Failed" or "Error" once the workflow has completed. Phase WorkflowPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=WorkflowPhase"` // Time at which this workflow started @@ -1840,6 +1939,40 @@ type WorkflowStatus struct { // ArtifactGCStatus maintains the status of Artifact Garbage Collection ArtifactGCStatus *ArtGCStatus `json:"artifactGCStatus,omitempty" protobuf:"bytes,19,opt,name=artifactGCStatus"` + + // TaskResultsCompletionStatus tracks task result completion status (mapped by node ID). Used to prevent premature archiving and garbage collection. + TaskResultsCompletionStatus map[string]bool `json:"taskResultsCompletionStatus,omitempty" protobuf:"bytes,20,opt,name=taskResultsCompletionStatus"` +} + +func (ws *WorkflowStatus) MarkTaskResultIncomplete(name string) { + if ws.TaskResultsCompletionStatus == nil { + ws.TaskResultsCompletionStatus = make(map[string]bool) + } + ws.TaskResultsCompletionStatus[name] = false +} + +func (ws *WorkflowStatus) MarkTaskResultComplete(name string) { + if ws.TaskResultsCompletionStatus == nil { + ws.TaskResultsCompletionStatus = make(map[string]bool) + } + ws.TaskResultsCompletionStatus[name] = true +} + +func (ws *WorkflowStatus) TaskResultsInProgress() bool { + for _, value := range ws.TaskResultsCompletionStatus { + if !value { + return true + } + } + return false +} + +func (ws *WorkflowStatus) IsTaskResultIncomplete(name string) bool { + value, found := ws.TaskResultsCompletionStatus[name] + if found { + return !value + } + return false // workflows from older versions do not have this status, so assume completed if this is missing } func (ws *WorkflowStatus) IsOffloadNodeStatus() bool { @@ -1877,7 +2010,10 @@ type Backoff struct { Duration string `json:"duration,omitempty" protobuf:"varint,1,opt,name=duration"` // Factor is a factor to multiply the base duration after each failed retry Factor *intstr.IntOrString `json:"factor,omitempty" protobuf:"varint,2,opt,name=factor"` - // MaxDuration is the maximum amount of time allowed for the backoff strategy + // MaxDuration is the maximum amount of time allowed for a workflow in the backoff strategy. + // It is important to note that if the workflow template includes activeDeadlineSeconds, the pod's deadline is initially set with activeDeadlineSeconds. + // However, when the workflow fails, the pod's deadline is then overridden by maxDuration. + // This ensures that the workflow does not exceed the specified maximum duration when retries are involved. MaxDuration string `json:"maxDuration,omitempty" protobuf:"varint,3,opt,name=maxDuration"` } @@ -1910,6 +2046,22 @@ type RetryStrategy struct { Expression string `json:"expression,omitempty" protobuf:"bytes,5,opt,name=expression"` } +// RetryPolicyActual gets the active retry policy for a strategy. +// If the policy is explicit, use that. +// If an expression is given, use a policy of Always so the +// expression is all that controls the retry for 'least surprise'. +// Otherwise, if neither is given, default to retry OnFailure. +func (s RetryStrategy) RetryPolicyActual() RetryPolicy { + if s.RetryPolicy != "" { + return s.RetryPolicy + } + if s.Expression == "" { + return RetryPolicyOnFailure + } else { + return RetryPolicyAlways + } +} + // The amount of requested resource * the duration that request was used. // This is represented as duration in seconds, so can be converted to and from // duration (with loss of precision). @@ -2075,6 +2227,8 @@ type NodeStatus struct { // Phase a simple, high-level summary of where the node is in its lifecycle. // Can be used as a state machine. + // Will be one of these values "Pending", "Running" before the node is completed, or "Succeeded", + // "Skipped", "Failed", "Error", or "Omitted" as a final state. Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,7,opt,name=phase,casttype=NodePhase"` // BoundaryID indicates the node ID of the associated template root node in which this node belongs to @@ -2104,6 +2258,9 @@ type NodeStatus struct { // Daemoned tracks whether or not this node was daemoned and need to be terminated Daemoned *bool `json:"daemoned,omitempty" protobuf:"varint,13,opt,name=daemoned"` + // NodeFlag tracks some history of node. e.g.) hooked, retried, etc. + NodeFlag *NodeFlag `json:"nodeFlag,omitempty" protobuf:"bytes,27,opt,name=nodeFlag"` + // Inputs captures input parameter values and artifact locations supplied to this template invocation Inputs *Inputs `json:"inputs,omitempty" protobuf:"bytes,14,opt,name=inputs"` @@ -2212,7 +2369,7 @@ func (n NodeStatus) Pending() bool { return n.Phase == NodePending } -// IsDaemoned returns whether or not the node is deamoned +// IsDaemoned returns whether or not the node is daemoned func (n NodeStatus) IsDaemoned() bool { if n.Daemoned == nil || !*n.Daemoned { return false @@ -2337,6 +2494,9 @@ type S3Bucket struct { CreateBucketIfNotPresent *CreateS3BucketOptions `json:"createBucketIfNotPresent,omitempty" protobuf:"bytes,9,opt,name=createBucketIfNotPresent"` EncryptionOptions *S3EncryptionOptions `json:"encryptionOptions,omitempty" protobuf:"bytes,10,opt,name=encryptionOptions"` + + // CASecret specifies the secret that contains the CA, used to verify the TLS connection + CASecret *apiv1.SecretKeySelector `json:"caSecret,omitempty" protobuf:"bytes,11,opt,name=caSecret"` } // S3EncryptionOptions used to determine encryption options during s3 operations @@ -2511,7 +2671,7 @@ func (a *AzureArtifact) SetKey(key string) error { } func (a *AzureArtifact) HasLocation() bool { - return a != nil && a.Container != "" && a.Blob != "" + return a != nil && a.Endpoint != "" && a.Container != "" && a.Blob != "" } // HDFSArtifact is the location of an HDFS artifact @@ -2729,6 +2889,9 @@ type OSSBucket struct { // LifecycleRule specifies how to manage bucket's lifecycle LifecycleRule *OSSLifecycleRule `json:"lifecycleRule,omitempty" protobuf:"bytes,7,opt,name=lifecycleRule"` + + // UseSDKCreds tells the driver to figure out credentials based on sdk defaults. + UseSDKCreds bool `json:"useSDKCreds,omitempty" protobuf:"varint,8,opt,name=useSDKCreds"` } // OSSArtifact is the location of an Alibaba Cloud OSS artifact @@ -2863,6 +3026,8 @@ func (tmpl *Template) GetNodeType() NodeType { return NodeTypeSteps case TemplateTypeSuspend: return NodeTypeSuspend + case TemplateTypeHTTP: + return NodeTypeHTTP case TemplateTypePlugin: return NodeTypePlugin } @@ -2923,9 +3088,10 @@ func (tmpl *Template) GetVolumeMounts() []apiv1.VolumeMount { return nil } -// whether or not the template can and will have outputs (i.e. exit code and result) +// HasOutput returns true if the template can and will have outputs (i.e. exit code and result). +// In the case of a plugin, we assume it will have outputs because we cannot know at runtime. func (tmpl *Template) HasOutput() bool { - return tmpl.Container != nil || tmpl.ContainerSet.HasContainerNamed("main") || tmpl.Script != nil || tmpl.Data != nil || tmpl.HTTP != nil + return tmpl.Container != nil || tmpl.ContainerSet.HasContainerNamed("main") || tmpl.Script != nil || tmpl.Data != nil || tmpl.HTTP != nil || tmpl.Plugin != nil } func (t *Template) IsDaemon() bool { @@ -3061,7 +3227,8 @@ func (t *DAGTask) ShouldExpand() bool { // SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time type SuspendTemplate struct { - // Duration is the seconds to wait before automatically resuming a template + // Duration is the seconds to wait before automatically resuming a template. Must be a string. Default unit is seconds. + // Could also be a Duration, e.g.: "2m", "6h" Duration string `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` } @@ -3191,20 +3358,9 @@ func (wf *Workflow) GetTemplateByName(name string) *Template { return nil } -func (w *Workflow) GetTemplates() []Template { - return append( - w.GetExecSpec().Templates, - w.Status.GetStoredTemplates()..., - ) -} - -func (wf *Workflow) GetNodeByName(nodeName string) *NodeStatus { +func (wf *Workflow) GetNodeByName(nodeName string) (*NodeStatus, error) { nodeID := wf.NodeID(nodeName) - node, ok := wf.Status.Nodes[nodeID] - if !ok { - return nil - } - return &node + return wf.Status.Nodes.Get(nodeID) } // GetResourceScope returns the template scope of workflow. @@ -3257,6 +3413,34 @@ func (wf *Workflow) SetStoredTemplate(scope ResourceScope, resourceName string, return false, nil } +// SetStoredInlineTemplate stores a inline template in stored templates of the workflow. +func (wf *Workflow) SetStoredInlineTemplate(scope ResourceScope, resourceName string, tmpl *Template) error { + // Store inline templates in steps. + for _, steps := range tmpl.Steps { + for _, step := range steps.Steps { + if step.GetTemplate() != nil { + _, err := wf.SetStoredTemplate(scope, resourceName, &step, step.GetTemplate()) + if err != nil { + return err + } + } + } + } + // Store inline templates in DAG tasks. + if tmpl.DAG != nil { + for _, task := range tmpl.DAG.Tasks { + if task.GetTemplate() != nil { + _, err := wf.SetStoredTemplate(scope, resourceName, &task, task.GetTemplate()) + if err != nil { + return err + } + } + } + } + + return nil +} + // resolveTemplateReference resolves the stored template name of a given template holder on the template scope and determines // if it should be stored func resolveTemplateReference(callerScope ResourceScope, resourceName string, caller TemplateReferenceHolder) (string, bool) { @@ -3271,6 +3455,10 @@ func resolveTemplateReference(callerScope ResourceScope, resourceName string, ca return fmt.Sprintf("%s/%s/%s", referenceScope, tmplRef.Name, tmplRef.Template), true } else if callerScope != ResourceScopeLocal { // Either a WorkflowTemplate or a ClusterWorkflowTemplate is calling a template inside itself. Template storage is needed + if caller.GetTemplate() != nil { + // If we have an inlined template here, use the inlined name + return fmt.Sprintf("%s/%s/inline/%s", callerScope, resourceName, caller.GetName()), true + } return fmt.Sprintf("%s/%s/%s", callerScope, resourceName, caller.GetTemplateName()), true } else { // A Workflow is calling a template inside itself. Template storage is not needed @@ -3428,12 +3616,25 @@ type MetricLabel struct { // Gauge is a Gauge prometheus metric type Gauge struct { - // Value is the value of the metric + // Value is the value to be used in the operation with the metric's current value. If no operation is set, + // value is the value of the metric Value string `json:"value" protobuf:"bytes,1,opt,name=value"` // Realtime emits this metric in real time if applicable Realtime *bool `json:"realtime" protobuf:"varint,2,opt,name=realtime"` + // Operation defines the operation to apply with value and the metrics' current value + // +optional + Operation GaugeOperation `json:"operation,omitempty" protobuf:"bytes,3,opt,name=operation"` } +// A GaugeOperation is the set of operations that can be used in a gauge metric. +type GaugeOperation string + +const ( + GaugeOperationSet GaugeOperation = "Set" + GaugeOperationAdd GaugeOperation = "Add" + GaugeOperationSub GaugeOperation = "Sub" +) + // Histogram is a Histogram prometheus metric type Histogram struct { // Value is the value of the metric @@ -3686,3 +3887,10 @@ type NodeSynchronizationStatus struct { // Waiting is the name of the lock that this node is waiting for Waiting string `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` } + +type NodeFlag struct { + // Hooked tracks whether or not this node was triggered by hook or onExit + Hooked bool `json:"hooked,omitempty" protobuf:"varint,1,opt,name=hooked"` + // Retried tracks whether or not this node was retried by retryStrategy + Retried bool `json:"retried,omitempty" protobuf:"varint,2,opt,name=retried"` +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index d9603259d35..05aecefe741 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -9,7 +9,7 @@ import ( json "encoding/json" v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/policy/v1beta1" + policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" @@ -852,6 +852,22 @@ func (in ClusterWorkflowTemplates) DeepCopy() ClusterWorkflowTemplates { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Column) DeepCopyInto(out *Column) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Column. +func (in *Column) DeepCopy() *Column { + if in == nil { + return nil + } + out := new(Column) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Condition) DeepCopyInto(out *Condition) { *out = *in @@ -2052,6 +2068,22 @@ func (in *MutexStatus) DeepCopy() *MutexStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeFlag) DeepCopyInto(out *NodeFlag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFlag. +func (in *NodeFlag) DeepCopy() *NodeFlag { + if in == nil { + return nil + } + out := new(NodeFlag) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeResult) DeepCopyInto(out *NodeResult) { *out = *in @@ -2095,6 +2127,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { *out = new(bool) **out = **in } + if in.NodeFlag != nil { + in, out := &in.NodeFlag, &out.NodeFlag + *out = new(NodeFlag) + **out = **in + } if in.Inputs != nil { in, out := &in.Inputs, &out.Inputs *out = new(Inputs) @@ -2480,6 +2517,11 @@ func (in *PodGC) DeepCopyInto(out *PodGC) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.DeleteDelayDuration != nil { + in, out := &in.DeleteDelayDuration, &out.DeleteDelayDuration + *out = new(metav1.Duration) + **out = **in + } return } @@ -2729,6 +2771,11 @@ func (in *S3Bucket) DeepCopyInto(out *S3Bucket) { *out = new(S3EncryptionOptions) (*in).DeepCopyInto(*out) } + if in.CASecret != nil { + in, out := &in.CASecret, &out.CASecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } return } @@ -3564,6 +3611,23 @@ func (in *WorkflowEventBindingSpec) DeepCopy() *WorkflowEventBindingSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowLevelArtifactGC) DeepCopyInto(out *WorkflowLevelArtifactGC) { + *out = *in + in.ArtifactGC.DeepCopyInto(&out.ArtifactGC) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowLevelArtifactGC. +func (in *WorkflowLevelArtifactGC) DeepCopy() *WorkflowLevelArtifactGC { + if in == nil { + return nil + } + out := new(WorkflowLevelArtifactGC) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkflowList) DeepCopyInto(out *WorkflowList) { *out = *in @@ -3762,7 +3826,7 @@ func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { } if in.PodDisruptionBudget != nil { in, out := &in.PodDisruptionBudget, &out.PodDisruptionBudget - *out = new(v1beta1.PodDisruptionBudgetSpec) + *out = new(policyv1.PodDisruptionBudgetSpec) (*in).DeepCopyInto(*out) } if in.Metrics != nil { @@ -3819,7 +3883,7 @@ func (in *WorkflowSpec) DeepCopyInto(out *WorkflowSpec) { } if in.ArtifactGC != nil { in, out := &in.ArtifactGC, &out.ArtifactGC - *out = new(ArtifactGC) + *out = new(WorkflowLevelArtifactGC) (*in).DeepCopyInto(*out) } return @@ -3898,6 +3962,13 @@ func (in *WorkflowStatus) DeepCopyInto(out *WorkflowStatus) { *out = new(ArtGCStatus) (*in).DeepCopyInto(*out) } + if in.TaskResultsCompletionStatus != nil { + in, out := &in.TaskResultsCompletionStatus, &out.TaskResultsCompletionStatus + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/cmd/cmd.go b/vendor/github.com/argoproj/argo-workflows/v3/util/cmd/cmd.go deleted file mode 100644 index bf2e55852c7..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/cmd/cmd.go +++ /dev/null @@ -1,109 +0,0 @@ -// Package cmd provides functionally common to various argo CLIs -package cmd - -import ( - "fmt" - "net/url" - "os" - "strings" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - "github.com/argoproj/argo-workflows/v3" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -// NewVersionCmd returns a new `version` command to be used as a sub-command to root -func NewVersionCmd(cliName string) *cobra.Command { - var short bool - versionCmd := cobra.Command{ - Use: "version", - Short: "Print version information", - Run: func(cmd *cobra.Command, args []string) { - version := argo.GetVersion() - PrintVersion(cliName, version, short) - }, - } - versionCmd.Flags().BoolVar(&short, "short", false, "print just the version number") - return &versionCmd -} - -func PrintVersion(cliName string, version wfv1.Version, short bool) { - fmt.Printf("%s: %s\n", cliName, version.Version) - if short { - return - } - fmt.Printf(" BuildDate: %s\n", version.BuildDate) - fmt.Printf(" GitCommit: %s\n", version.GitCommit) - fmt.Printf(" GitTreeState: %s\n", version.GitTreeState) - if version.GitTag != "" { - fmt.Printf(" GitTag: %s\n", version.GitTag) - } - fmt.Printf(" GoVersion: %s\n", version.GoVersion) - fmt.Printf(" Compiler: %s\n", version.Compiler) - fmt.Printf(" Platform: %s\n", version.Platform) -} - -// MustIsDir returns whether or not the given filePath is a directory. Exits if path does not exist -func MustIsDir(filePath string) bool { - fileInfo, err := os.Stat(filePath) - if err != nil { - log.Fatal(err) - } - return fileInfo.IsDir() -} - -// IsURL returns whether or not a string is a URL -func IsURL(u string) bool { - var parsedURL *url.URL - var err error - - parsedURL, err = url.ParseRequestURI(u) - if err == nil { - if parsedURL != nil && parsedURL.Host != "" { - return true - } - } - return false -} - -// ParseLabels turns a string representation of a label set into a map[string]string -func ParseLabels(labelSpec interface{}) (map[string]string, error) { - labelString, isString := labelSpec.(string) - if !isString { - return nil, fmt.Errorf("expected string, found %v", labelSpec) - } - if len(labelString) == 0 { - return nil, fmt.Errorf("no label spec passed") - } - labels := map[string]string{} - labelSpecs := strings.Split(labelString, ",") - for ix := range labelSpecs { - labelSpec := strings.Split(labelSpecs[ix], "=") - if len(labelSpec) != 2 { - return nil, fmt.Errorf("unexpected label spec: %s", labelSpecs[ix]) - } - if len(labelSpec[0]) == 0 { - return nil, fmt.Errorf("unexpected empty label key") - } - labels[labelSpec[0]] = labelSpec[1] - } - return labels, nil -} - -// SetLogFormatter sets a log formatter for logrus -func SetLogFormatter(logFormat string) { - timestampFormat := "2006-01-02T15:04:05.000Z" - switch strings.ToLower(logFormat) { - case "json": - log.SetFormatter(&log.JSONFormatter{TimestampFormat: timestampFormat}) - case "text": - log.SetFormatter(&log.TextFormatter{ - TimestampFormat: timestampFormat, - FullTimestamp: true, - }) - default: - log.Fatalf("Unknown log format '%s'", logFormat) - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/cmd/glog.go b/vendor/github.com/argoproj/argo-workflows/v3/util/cmd/glog.go deleted file mode 100644 index 6fd5e49d2de..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/cmd/glog.go +++ /dev/null @@ -1,17 +0,0 @@ -package cmd - -import ( - "flag" - "strconv" - - "k8s.io/klog/v2" -) - -// SetGLogLevel set the glog level for the k8s go-client -// this is taken from argoproj/pkg but uses v2 of klog here -// to be compatible with k8s clients v0.19.x and above -func SetGLogLevel(glogLevel int) { - klog.InitFlags(nil) - _ = flag.Set("logtostderr", "true") - _ = flag.Set("v", strconv.Itoa(glogLevel)) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/errors/errors.go b/vendor/github.com/argoproj/argo-workflows/v3/util/errors/errors.go index 51f9a32bcc1..2982be216fd 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/errors/errors.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/util/errors/errors.go @@ -28,8 +28,17 @@ func IsTransientErr(err error) bool { return false } err = argoerrs.Cause(err) - isTransient := isExceededQuotaErr(err) || apierr.IsTooManyRequests(err) || isResourceQuotaConflictErr(err) || isTransientNetworkErr(err) || apierr.IsServerTimeout(err) || apierr.IsServiceUnavailable(err) || matchTransientErrPattern(err) || - errors.Is(err, NewErrTransient("")) + isTransient := isExceededQuotaErr(err) || + apierr.IsTooManyRequests(err) || + isResourceQuotaConflictErr(err) || + isResourceQuotaTimeoutErr(err) || + isTransientNetworkErr(err) || + apierr.IsServerTimeout(err) || + apierr.IsServiceUnavailable(err) || + isTransientEtcdErr(err) || + matchTransientErrPattern(err) || + errors.Is(err, NewErrTransient("")) || + isTransientSqbErr(err) if isTransient { log.Infof("Transient error: %v", err) } else { @@ -57,6 +66,20 @@ func isResourceQuotaConflictErr(err error) bool { return apierr.IsConflict(err) && strings.Contains(err.Error(), "Operation cannot be fulfilled on resourcequota") } +func isResourceQuotaTimeoutErr(err error) bool { + return apierr.IsInternalError(err) && strings.Contains(err.Error(), "resource quota evaluation timed out") +} + +func isTransientEtcdErr(err error) bool { + // Some clusters expose these (transient) etcd errors to the caller + if strings.Contains(err.Error(), "etcdserver: leader changed") { + return true + } else if strings.Contains(err.Error(), "etcdserver: request timed out") { + return true + } + return false +} + func isTransientNetworkErr(err error) bool { switch err.(type) { case *net.DNSError, *net.OpError, net.UnknownNetworkError: @@ -83,6 +106,12 @@ func isTransientNetworkErr(err error) bool { } else if _, ok := err.(*url.Error); ok && strings.Contains(errorString, "EOF") { // If err is EOF, retry. return true + } else if strings.Contains(errorString, "http2: client connection lost") { + // If err is http2 transport ping timeout, retry. + return true + } else if strings.Contains(errorString, "connect: connection refused") { + // If err is connection refused, retry. + return true } return false @@ -95,3 +124,7 @@ func generateErrorString(err error) string { } return errorString } + +func isTransientSqbErr(err error) bool { + return strings.Contains(err.Error(), "upper: no more rows in") +} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/expand/expand.go b/vendor/github.com/argoproj/argo-workflows/v3/util/expand/expand.go deleted file mode 100644 index 04c6f68e0bf..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/expand/expand.go +++ /dev/null @@ -1,34 +0,0 @@ -package expand - -import ( - "sort" - "strings" - - "github.com/doublerebel/bellows" -) - -func Expand(m map[string]interface{}) map[string]interface{} { - return bellows.Expand(removeConflicts(m)) -} - -// It is possible for the map to contain conflicts: -// {"a.b": 1, "a": 2} -// What should the result be? We remove the less-specific key. -// {"a.b": 1, "a": 2} -> {"a.b": 1, "a": 2} -func removeConflicts(m map[string]interface{}) map[string]interface{} { - var keys []string - n := map[string]interface{}{} - for k, v := range m { - keys = append(keys, k) - n[k] = v - } - sort.Strings(keys) - for i := 0; i < len(keys)-1; i++ { - k := keys[i] - // remove any parent that has a child - if strings.HasPrefix(keys[i+1], k+".") { - delete(n, k) - } - } - return n -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/expr/env/env.go b/vendor/github.com/argoproj/argo-workflows/v3/util/expr/env/env.go deleted file mode 100644 index df2535cf6eb..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/expr/env/env.go +++ /dev/null @@ -1,35 +0,0 @@ -package env - -import ( - "encoding/json" - - sprig "github.com/Masterminds/sprig/v3" - exprpkg "github.com/argoproj/pkg/expr" - - "github.com/argoproj/argo-workflows/v3/util/expand" -) - -var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance - -func init() { - delete(sprigFuncMap, "env") - delete(sprigFuncMap, "expandenv") -} - -func GetFuncMap(m map[string]interface{}) map[string]interface{} { - env := expand.Expand(m) - for k, v := range exprpkg.GetExprEnvFunctionMap() { - env[k] = v - } - env["toJson"] = toJson - env["sprig"] = sprigFuncMap - return env -} - -func toJson(v interface{}) string { - output, err := json.Marshal(v) - if err != nil { - panic(err) - } - return string(output) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/file/fileutil.go b/vendor/github.com/argoproj/argo-workflows/v3/util/file/fileutil.go deleted file mode 100644 index 2dc6da19678..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/file/fileutil.go +++ /dev/null @@ -1,121 +0,0 @@ -package file - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "strings" - - "github.com/klauspost/pgzip" - log "github.com/sirupsen/logrus" - "k8s.io/utils/env" -) - -var gzipImpl = env.GetString(GZipImplEnvVarKey, PGZIP) - -const ( - GZipImplEnvVarKey = "GZIP_IMPLEMENTATION" - GZIP = "GZip" - PGZIP = "PGZip" -) - -type TarReader interface { - Next() (*tar.Header, error) -} - -// GetGzipReader gets the GzipReader based on `GZipImplEnvVarKey` environment variable. -func GetGzipReader(reader io.Reader) (io.ReadCloser, error) { - var err error - var gzipReader io.ReadCloser - switch gzipImpl { - case GZIP: - gzipReader, err = gzip.NewReader(reader) - default: - gzipReader, err = pgzip.NewReader(reader) - } - if err != nil { - return nil, err - } - return gzipReader, nil -} - -// ExistsInTar return true if file or directory exists in tar -func ExistsInTar(sourcePath string, tarReader TarReader) bool { - sourcePath = strings.Trim(sourcePath, "/") - for { - hdr, err := tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - return false - } - if hdr.FileInfo().IsDir() && strings.Contains(sourcePath, strings.Trim(hdr.Name, "/")) { - return true - } - if strings.Contains(sourcePath, hdr.Name) { - return true - } - } - return false -} - -// Close the file -func close(f io.Closer) { - err := f.Close() - if err != nil { - log.Warnf("Failed to close the file/writer/reader. %v", err) - } -} - -// CompressEncodeString will return the compressed string with base64 encoded -func CompressEncodeString(content string) string { - return base64.StdEncoding.EncodeToString(CompressContent([]byte(content))) -} - -// DecodeDecompressString will return decode and decompress the -func DecodeDecompressString(content string) (string, error) { - buf, err := base64.StdEncoding.DecodeString(content) - if err != nil { - return "", err - } - dBuf, err := DecompressContent(buf) - if err != nil { - return "", err - } - return string(dBuf), nil -} - -// CompressContent will compress the byte array using zip writer -func CompressContent(content []byte) []byte { - var buf bytes.Buffer - var gzipWriter io.WriteCloser - switch gzipImpl { - case GZIP: - gzipWriter = gzip.NewWriter(&buf) - default: - gzipWriter = pgzip.NewWriter(&buf) - } - - _, err := gzipWriter.Write(content) - if err != nil { - log.Warnf("Error in compressing: %v", err) - } - close(gzipWriter) - return buf.Bytes() -} - -// DecompressContent will return the uncompressed content -func DecompressContent(content []byte) ([]byte, error) { - buf := bytes.NewReader(content) - gzipReader, err := GetGzipReader(buf) - if err != nil { - return nil, fmt.Errorf("failed to decompress: %w", err) - } - defer close(gzipReader) - return ioutil.ReadAll(gzipReader) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/instanceid/service.go b/vendor/github.com/argoproj/argo-workflows/v3/util/instanceid/service.go deleted file mode 100644 index ebfdc12eb1d..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/instanceid/service.go +++ /dev/null @@ -1,60 +0,0 @@ -package instanceid - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/argoproj/argo-workflows/v3/util/labels" - "github.com/argoproj/argo-workflows/v3/workflow/common" -) - -type Service interface { - Label(obj metav1.Object) - With(options *metav1.ListOptions) - Validate(obj metav1.Object) error - InstanceID() string -} - -func NewService(instanceID string) Service { - return &service{instanceID} -} - -type service struct { - instanceID string -} - -func (s *service) InstanceID() string { - return s.instanceID -} - -func (s *service) Label(obj metav1.Object) { - if s.instanceID != "" { - labels.Label(obj, common.LabelKeyControllerInstanceID, s.instanceID) - } else { - labels.UnLabel(obj, common.LabelKeyControllerInstanceID) - } -} - -func (s *service) With(opts *metav1.ListOptions) { - if len(opts.LabelSelector) > 0 { - opts.LabelSelector += "," - } - if s.instanceID == "" { - opts.LabelSelector += fmt.Sprintf("!%s", common.LabelKeyControllerInstanceID) - } else { - opts.LabelSelector += fmt.Sprintf("%s=%s", common.LabelKeyControllerInstanceID, s.instanceID) - } -} - -func (s *service) Validate(obj metav1.Object) error { - l := obj.GetLabels() - if s.instanceID == "" { - if _, ok := l[common.LabelKeyControllerInstanceID]; !ok { - return nil - } - } else if val, ok := l[common.LabelKeyControllerInstanceID]; ok && val == s.instanceID { - return nil - } - return fmt.Errorf("'%s' is not managed by the current Argo Server", obj.GetName()) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parametrizable.go b/vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parametrizable.go deleted file mode 100644 index 00ce57b989b..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parametrizable.go +++ /dev/null @@ -1,55 +0,0 @@ -package intstr - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/apimachinery/pkg/util/intstr" -) - -// These are utility functions when using IntOrString to hold either an Int or an Argo Variable -func Int(is *intstr.IntOrString) (*int, error) { - if is == nil { - return nil, nil - } - if is.Type == intstr.String { - i, err := strconv.Atoi(is.StrVal) - if err != nil { - return nil, fmt.Errorf("value '%s' cannot be resolved to an int", is.StrVal) - } - return &i, nil - } - i := int(is.IntVal) - return &i, nil -} - -func Int32(is *intstr.IntOrString) (*int32, error) { - v, err := Int(is) - if v == nil || err != nil { - return nil, err - } - i := int32(*v) - return &i, err -} - -func Int64(is *intstr.IntOrString) (*int64, error) { - v, err := Int(is) - if v == nil || err != nil { - return nil, err - } - i := int64(*v) - return &i, err -} - -func IsValidIntOrArgoVariable(is *intstr.IntOrString) bool { - if is == nil { - return true - } else if is.Type == intstr.Int { - return true - } else if _, err := strconv.Atoi(is.StrVal); err == nil { - return true - } else { - return strings.HasPrefix(is.StrVal, "{{") && strings.HasSuffix(is.StrVal, "}}") - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parse.go b/vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parse.go deleted file mode 100644 index e1ad40722cf..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/intstr/parse.go +++ /dev/null @@ -1,9 +0,0 @@ -package intstr - -import "k8s.io/apimachinery/pkg/util/intstr" - -// convenience func to get a pointer -func ParsePtr(val string) *intstr.IntOrString { - x := intstr.Parse(val) - return &x -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go deleted file mode 100644 index fda3296e40c..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/json/json.go +++ /dev/null @@ -1,36 +0,0 @@ -package json - -import ( - "encoding/json" - "io" - - gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" -) - -// JSONMarshaler is a type which satisfies the grpc-gateway Marshaler interface -type JSONMarshaler struct{} - -// ContentType implements gwruntime.Marshaler. -func (j *JSONMarshaler) ContentType() string { - return "application/json" -} - -// Marshal implements gwruntime.Marshaler. -func (j *JSONMarshaler) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// NewDecoder implements gwruntime.Marshaler. -func (j *JSONMarshaler) NewDecoder(r io.Reader) gwruntime.Decoder { - return json.NewDecoder(r) -} - -// NewEncoder implements gwruntime.Marshaler. -func (j *JSONMarshaler) NewEncoder(w io.Writer) gwruntime.Encoder { - return json.NewEncoder(w) -} - -// Unmarshal implements gwruntime.Marshaler. -func (j *JSONMarshaler) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go b/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go deleted file mode 100644 index bdb25bce5ae..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/json/jsonify.go +++ /dev/null @@ -1,12 +0,0 @@ -package json - -import "encoding/json" - -func Jsonify(v interface{}) (map[string]interface{}, error) { - data, err := json.Marshal(v) - if err != nil { - return nil, err - } - x := make(map[string]interface{}) - return x, json.Unmarshal(data, &x) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/k8s/parse.go b/vendor/github.com/argoproj/argo-workflows/v3/util/k8s/parse.go deleted file mode 100644 index d74adae3ce9..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/k8s/parse.go +++ /dev/null @@ -1,50 +0,0 @@ -package k8s - -import ( - "net/http" - "strings" -) - -func ParseRequest(r *http.Request) (verb string, kind string) { - i := strings.Index(r.URL.Path, "/v") + 1 - path := strings.Split(r.URL.Path[i:], "/") - n := len(path) - - verb = map[string]string{ - http.MethodGet: "List", - http.MethodPost: "Create", - http.MethodDelete: "Delete", - http.MethodPatch: "Patch", - http.MethodPut: "Update", - }[r.Method] - - x := n%2 == 0 - - if r.URL.Query().Get("watch") != "" { - verb = "Watch" - } else if verb == "List" && !x { - verb = "Get" - } else if verb == "Delete" && x { - verb = "DeleteCollection" - } - - kind = "Unknown" - switch verb { - case "List", "Watch", "Create", "DeleteCollection": - if n > 2 && n%4 == 2 { - // sub-resource, e.g. pods/exec - kind = path[n-3] + "/" + path[n-1] - } else { - kind = path[n-1] - } - case "Get", "Delete", "Patch", "Update": - if x { - // sub-resource - kind = path[n-3] + "/" + path[n-1] - } else { - kind = path[n-2] - } - } - - return verb, kind -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/labels/labeler.go b/vendor/github.com/argoproj/argo-workflows/v3/util/labels/labeler.go deleted file mode 100644 index 99f80b01836..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/labels/labeler.go +++ /dev/null @@ -1,29 +0,0 @@ -package labels - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// label the object with the first non-empty value, if all value are empty, it is not set at all -func Label(obj metav1.Object, name string, values ...string) { - for _, value := range values { - if value == "" { - continue - } - labels := obj.GetLabels() - if labels == nil { - labels = map[string]string{} - } - labels[name] = value - obj.SetLabels(labels) - return - } -} - -func UnLabel(obj metav1.Object, name string) { - labels := obj.GetLabels() - if labels == nil { - return - } - delete(labels, name) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/sorting/topological_sorting.go b/vendor/github.com/argoproj/argo-workflows/v3/util/sorting/topological_sorting.go deleted file mode 100644 index f0e45e0f2b3..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/sorting/topological_sorting.go +++ /dev/null @@ -1,59 +0,0 @@ -package sorting - -import ( - "fmt" -) - -type TopologicalSortingNode struct { - NodeName string - Dependencies []string -} - -func TopologicalSorting(graph []*TopologicalSortingNode) ([]*TopologicalSortingNode, error) { - priorNodeCountMap := make(map[string]int, len(graph)) // nodeName -> priorNodeCount - nextNodeMap := make(map[string][]string, len(graph)) // nodeName -> nextNodeList - nodeNameMap := make(map[string]*TopologicalSortingNode, len(graph)) // nodeName -> node - for _, node := range graph { - if _, ok := nodeNameMap[node.NodeName]; ok { - return nil, fmt.Errorf("duplicated nodeName %s", node.NodeName) - } - nodeNameMap[node.NodeName] = node - priorNodeCountMap[node.NodeName] = len(node.Dependencies) - } - for _, node := range graph { - for _, dependency := range node.Dependencies { - if _, ok := nodeNameMap[dependency]; !ok { - return nil, fmt.Errorf("invalid dependency %s", dependency) - } - nextNodeMap[dependency] = append(nextNodeMap[dependency], node.NodeName) - } - } - - queue := make([]*TopologicalSortingNode, len(graph)) - head, tail := 0, 0 - for nodeName, priorNodeCount := range priorNodeCountMap { - if priorNodeCount == 0 { - queue[tail] = nodeNameMap[nodeName] - tail += 1 - } - } - - for head < len(queue) { - curr := queue[head] - if curr == nil { - return nil, fmt.Errorf("graph with cycle") - } - for _, next := range nextNodeMap[curr.NodeName] { - if priorNodeCountMap[next] > 0 { - if priorNodeCountMap[next] == 1 { - queue[tail] = nodeNameMap[next] - tail += 1 - } - priorNodeCountMap[next] -= 1 - } - } - head += 1 - } - - return queue, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/template/expression_template.go b/vendor/github.com/argoproj/argo-workflows/v3/util/template/expression_template.go deleted file mode 100644 index e2982c2dc8a..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/template/expression_template.go +++ /dev/null @@ -1,81 +0,0 @@ -package template - -import ( - "encoding/json" - "fmt" - "io" - "os" - - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/file" - "github.com/antonmedv/expr/parser/lexer" -) - -func init() { - if os.Getenv("EXPRESSION_TEMPLATES") != "false" { - registerKind(kindExpression) - } -} - -func expressionReplace(w io.Writer, expression string, env map[string]interface{}, allowUnresolved bool) (int, error) { - // The template is JSON-marshaled. This JSON-unmarshals the expression to undo any character escapes. - var unmarshalledExpression string - err := json.Unmarshal([]byte(fmt.Sprintf(`"%s"`, expression)), &unmarshalledExpression) - if err != nil && allowUnresolved { - return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression))) - } - if err != nil { - return 0, fmt.Errorf("failed to unmarshall JSON expression: %w", err) - } - - if _, ok := env["retries"]; !ok && hasRetries(unmarshalledExpression) && allowUnresolved { - // this is to make sure expressions like `sprig.int(retries)` don't get resolved to 0 when `retries` don't exist in the env - // See https://github.com/argoproj/argo-workflows/issues/5388 - return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression))) - } - result, err := expr.Eval(unmarshalledExpression, env) - if (err != nil || result == nil) && allowUnresolved { // result is also un-resolved, and any error can be unresolved - return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression))) - } - if err != nil { - return 0, fmt.Errorf("failed to evaluate expression: %w", err) - } - if result == nil { - return 0, fmt.Errorf("failed to evaluate expression %q", expression) - } - resultMarshaled, err := json.Marshal(fmt.Sprintf("%v", result)) - if (err != nil || resultMarshaled == nil) && allowUnresolved { - return w.Write([]byte(fmt.Sprintf("{{%s%s}}", kindExpression, expression))) - } - if err != nil { - return 0, fmt.Errorf("failed to marshal evaluated expression: %w", err) - } - if resultMarshaled == nil { - return 0, fmt.Errorf("failed to marshal evaluated marshaled expression %q", expression) - } - // Trim leading and trailing quotes. The value is being inserted into something that's already a string. - marshaledLength := len(resultMarshaled) - return w.Write(resultMarshaled[1 : marshaledLength-1]) -} - -func EnvMap(replaceMap map[string]string) map[string]interface{} { - envMap := make(map[string]interface{}) - for k, v := range replaceMap { - envMap[k] = v - } - return envMap -} - -// hasRetries checks if the variable `retries` exists in the expression template -func hasRetries(expression string) bool { - tokens, err := lexer.Lex(file.NewSource(expression)) - if err != nil { - return false - } - for _, token := range tokens { - if token.Kind == lexer.Identifier && token.Value == "retries" { - return true - } - } - return false -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/template/kind.go b/vendor/github.com/argoproj/argo-workflows/v3/util/template/kind.go deleted file mode 100644 index 989464f4130..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/template/kind.go +++ /dev/null @@ -1,29 +0,0 @@ -package template - -import ( - "strings" - - jsonutil "github.com/argoproj/argo-workflows/v3/util/json" -) - -type kind = string // defines the prefix symbol that determines the syntax of the tag - -const ( - kindSimple kind = "" // default is simple, i.e. no prefix - kindExpression kind = "=" -) - -var kinds []kind - -func registerKind(k kind) { - kinds = append(kinds, k) -} - -func parseTag(tag string) (kind, string) { - for _, k := range kinds { - if strings.HasPrefix(tag, k) { - return k, jsonutil.Fix(strings.TrimPrefix(tag, k)) - } - } - return kindSimple, tag -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/template/replace.go b/vendor/github.com/argoproj/argo-workflows/v3/util/template/replace.go deleted file mode 100644 index 7a38a29d13b..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/template/replace.go +++ /dev/null @@ -1,29 +0,0 @@ -package template - -import ( - "encoding/json" - "errors" -) - -// Replace takes a json-formatted string and performs variable replacement. -func Replace(s string, replaceMap map[string]string, allowUnresolved bool) (string, error) { - if !json.Valid([]byte(s)) { - return "", errors.New("cannot do template replacements with invalid JSON") - } - - t, err := NewTemplate(s) - if err != nil { - return "", err - } - - replacedString, err := t.Replace(replaceMap, allowUnresolved) - if err != nil { - return s, err - } - - if !json.Valid([]byte(replacedString)) { - return s, errors.New("cannot finish template replacement because the result was invalid JSON") - } - - return replacedString, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/template/resolve_var.go b/vendor/github.com/argoproj/argo-workflows/v3/util/template/resolve_var.go deleted file mode 100644 index 954faa8dc20..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/template/resolve_var.go +++ /dev/null @@ -1,31 +0,0 @@ -package template - -import ( - "strings" - - "github.com/antonmedv/expr" - - "github.com/argoproj/argo-workflows/v3/errors" -) - -func ResolveVar(s string, m map[string]interface{}) (interface{}, error) { - tag := strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(s, prefix), suffix)) - kind, expression := parseTag(tag) - switch kind { - case kindExpression: - result, err := expr.Eval(expression, m) - if err != nil { - return nil, errors.Errorf(errors.CodeBadRequest, "Invalid expression: %q", expression) - } - if result == nil { - return nil, errors.Errorf(errors.CodeBadRequest, "Unable to resolve: %q", tag) - } - return result, nil - default: - v, ok := m[tag] - if !ok { - return nil, errors.Errorf(errors.CodeBadRequest, "Unable to resolve: %q", tag) - } - return v, nil - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/template/simple_template.go b/vendor/github.com/argoproj/argo-workflows/v3/util/template/simple_template.go deleted file mode 100644 index 89bd9ddb073..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/template/simple_template.go +++ /dev/null @@ -1,36 +0,0 @@ -package template - -import ( - "fmt" - "io" - "strconv" - "strings" - - "github.com/argoproj/argo-workflows/v3/errors" -) - -func simpleReplace(w io.Writer, tag string, replaceMap map[string]string, allowUnresolved bool) (int, error) { - replacement, ok := replaceMap[strings.TrimSpace(tag)] - if !ok { - // Attempt to resolve nested tags, if possible - if index := strings.LastIndex(tag, "{{"); index > 0 { - nestedTagPrefix := tag[:index] - nestedTag := tag[index+2:] - if replacement, ok := replaceMap[nestedTag]; ok { - replacement = strconv.Quote(replacement) - replacement = replacement[1 : len(replacement)-1] - return w.Write([]byte("{{" + nestedTagPrefix + replacement)) - } - } - if allowUnresolved { - // just write the same string back - return w.Write([]byte(fmt.Sprintf("{{%s}}", tag))) - } - return 0, errors.Errorf(errors.CodeBadRequest, "failed to resolve {{%s}}", tag) - } - // The following escapes any special characters (e.g. newlines, tabs, etc...) - // in preparation for substitution - replacement = strconv.Quote(replacement) - replacement = replacement[1 : len(replacement)-1] - return w.Write([]byte(replacement)) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/template/template.go b/vendor/github.com/argoproj/argo-workflows/v3/util/template/template.go deleted file mode 100644 index e776b31b8b5..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/template/template.go +++ /dev/null @@ -1,46 +0,0 @@ -package template - -import ( - "bytes" - "io" - - "github.com/valyala/fasttemplate" - - exprenv "github.com/argoproj/argo-workflows/v3/util/expr/env" -) - -const ( - prefix = "{{" - suffix = "}}" -) - -type Template interface { - Replace(replaceMap map[string]string, allowUnresolved bool) (string, error) -} - -func NewTemplate(s string) (Template, error) { - template, err := fasttemplate.NewTemplate(s, prefix, suffix) - if err != nil { - return nil, err - } - return &impl{template}, nil -} - -type impl struct { - *fasttemplate.Template -} - -func (t *impl) Replace(replaceMap map[string]string, allowUnresolved bool) (string, error) { - replacedTmpl := &bytes.Buffer{} - _, err := t.Template.ExecuteFunc(replacedTmpl, func(w io.Writer, tag string) (int, error) { - kind, expression := parseTag(tag) - switch kind { - case kindExpression: - env := exprenv.GetFuncMap(EnvMap(replaceMap)) - return expressionReplace(w, expression, env, allowUnresolved) - default: - return simpleReplace(w, tag, replaceMap, allowUnresolved) - } - }) - return replacedTmpl.String(), err -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/template/validate.go b/vendor/github.com/argoproj/argo-workflows/v3/util/template/validate.go deleted file mode 100644 index 08a043be8d2..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/template/validate.go +++ /dev/null @@ -1,25 +0,0 @@ -package template - -import ( - "io" - "io/ioutil" - - "github.com/valyala/fasttemplate" -) - -func Validate(s string, validator func(tag string) error) error { - t, err := fasttemplate.NewTemplate(s, prefix, suffix) - if err != nil { - return err - } - _, err = t.ExecuteFunc(ioutil.Discard, func(w io.Writer, tag string) (int, error) { - kind, _ := parseTag(tag) - switch kind { - case kindExpression: - return 0, nil // we do not validate expression templates - default: - return 0, validator(tag) - } - }) - return err -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/tls/tls.go b/vendor/github.com/argoproj/argo-workflows/v3/util/tls/tls.go deleted file mode 100644 index 6662908cc1f..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/tls/tls.go +++ /dev/null @@ -1,151 +0,0 @@ -package tls - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "log" - "math/big" - "net" - "os" - "time" - - "k8s.io/client-go/kubernetes" - - "github.com/argoproj/argo-workflows/v3/util" -) - -const ( - // The key of the tls.crt within the Kubernetes secret - tlsCrtSecretKey = "tls.crt" - - // The key of the tls.key within the Kubernetes secret - tlsKeySecretKey = "tls.key" -) - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *ecdsa.PrivateKey: - b, err := x509.MarshalECPrivateKey(k) - if err != nil { - log.Print(err) - os.Exit(2) - } - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - return nil - } -} - -func generate() ([]byte, crypto.PrivateKey, error) { - hosts := []string{"localhost"} - - var err error - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, nil, fmt.Errorf("failed to generate private key: %s", err) - } - - notBefore := time.Now() - notAfter := notBefore.Add(365 * 24 * time.Hour) - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, nil, fmt.Errorf("failed to generate serial number: %s", err) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{"ArgoProj"}, - }, - NotBefore: notBefore, - NotAfter: notAfter, - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - for _, h := range hosts { - if ip := net.ParseIP(h); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else { - template.DNSNames = append(template.DNSNames, h) - } - } - - certBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) - if err != nil { - return nil, nil, fmt.Errorf("failed to create certificate: %s", err) - } - return certBytes, privateKey, nil -} - -// generatePEM generates a new certificate and key and returns it as PEM encoded bytes -func generatePEM() ([]byte, []byte, error) { - certBytes, privateKey, err := generate() - if err != nil { - return nil, nil, err - } - certpem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes}) - keypem := pem.EncodeToMemory(pemBlockForKey(privateKey)) - return certpem, keypem, nil -} - -// GenerateX509KeyPair generates a X509 key pair -func GenerateX509KeyPair() (*tls.Certificate, error) { - certpem, keypem, err := generatePEM() - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(certpem, keypem) - if err != nil { - return nil, err - } - return &cert, nil -} - -func GenerateX509KeyPairTLSConfig(tlsMinVersion uint16) (*tls.Config, error) { - - cer, err := GenerateX509KeyPair() - if err != nil { - return nil, err - } - - return &tls.Config{ - Certificates: []tls.Certificate{*cer}, - MinVersion: uint16(tlsMinVersion), - InsecureSkipVerify: true, - }, nil -} - -func GetServerTLSConfigFromSecret(ctx context.Context, kubectlConfig kubernetes.Interface, tlsKubernetesSecretName string, tlsMinVersion uint16, namespace string) (*tls.Config, error) { - certpem, err := util.GetSecrets(ctx, kubectlConfig, namespace, tlsKubernetesSecretName, tlsCrtSecretKey) - if err != nil { - return nil, err - } - - keypem, err := util.GetSecrets(ctx, kubectlConfig, namespace, tlsKubernetesSecretName, tlsKeySecretKey) - if err != nil { - return nil, err - } - - cert, err := tls.X509KeyPair(certpem, keypem) - if err != nil { - return nil, err - } - - return &tls.Config{ - Certificates: []tls.Certificate{cert}, - MinVersion: uint16(tlsMinVersion), - }, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/unstructured/unstructured.go b/vendor/github.com/argoproj/argo-workflows/v3/util/unstructured/unstructured.go deleted file mode 100644 index 7cc72e64a8c..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/unstructured/unstructured.go +++ /dev/null @@ -1,48 +0,0 @@ -package unstructured - -import ( - "context" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers/internalinterfaces" - "k8s.io/client-go/tools/cache" -) - -// NewUnstructuredInformer constructs a new informer for Unstructured type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewUnstructuredInformer(resource schema.GroupVersionResource, client dynamic.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredUnstructuredInformer(resource, client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredUnstructuredInformer constructs a new informer for Unstructured type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredUnstructuredInformer(resource schema.GroupVersionResource, client dynamic.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - ctx := context.Background() - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Resource(resource).Namespace(namespace).List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.Resource(resource).Namespace(namespace).Watch(ctx, options) - }, - }, - &unstructured.Unstructured{}, - resyncPeriod, - indexers, - ) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/util/util.go b/vendor/github.com/argoproj/argo-workflows/v3/util/util.go deleted file mode 100644 index 502cb1f7dc3..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/util/util.go +++ /dev/null @@ -1,135 +0,0 @@ -package util - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/kubernetes" - - "github.com/argoproj/argo-workflows/v3/errors" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - errorsutil "github.com/argoproj/argo-workflows/v3/util/errors" - "github.com/argoproj/argo-workflows/v3/util/retry" - waitutil "github.com/argoproj/argo-workflows/v3/util/wait" -) - -type Closer interface { - Close() error -} - -// Close is a convenience function to close a object that has a Close() method, ignoring any errors -// Used to satisfy errcheck lint -func Close(c Closer) { - _ = c.Close() -} - -// GetSecrets retrieves a secret value and memoizes the result -func GetSecrets(ctx context.Context, clientSet kubernetes.Interface, namespace, name, key string) ([]byte, error) { - secretsIf := clientSet.CoreV1().Secrets(namespace) - var secret *apiv1.Secret - err := waitutil.Backoff(retry.DefaultRetry, func() (bool, error) { - var err error - secret, err = secretsIf.Get(ctx, name, metav1.GetOptions{}) - return !errorsutil.IsTransientErr(err), err - }) - if err != nil { - return []byte{}, errors.InternalWrapError(err) - } - val, ok := secret.Data[key] - if !ok { - return []byte{}, errors.Errorf(errors.CodeBadRequest, "secret '%s' does not have the key '%s'", name, key) - } - return val, nil -} - -// Write the Terminate message in pod spec -func WriteTerminateMessage(message string) { - err := ioutil.WriteFile("/dev/termination-log", []byte(message), 0o600) - if err != nil { - println("unable to write termination log: " + err.Error()) - } -} - -// Merge the two parameters Slice -// Merge the slices based on arguments order (first is high priority). -func MergeParameters(params ...[]wfv1.Parameter) []wfv1.Parameter { - var resultParams []wfv1.Parameter - passedParams := make(map[string]bool) - for _, param := range params { - for _, item := range param { - if _, ok := passedParams[item.Name]; ok { - continue - } - resultParams = append(resultParams, item) - passedParams[item.Name] = true - } - } - return resultParams -} - -// MergeArtifacts merges artifact argument slices -// Merge the slices based on arguments order (first is high priority). -func MergeArtifacts(artifactSlices ...[]wfv1.Artifact) []wfv1.Artifact { - var result []wfv1.Artifact - alreadyMerged := make(map[string]bool) - for _, artifacts := range artifactSlices { - for _, item := range artifacts { - if !alreadyMerged[item.Name] { - result = append(result, item) - alreadyMerged[item.Name] = true - } - } - } - return result -} - -func RecoverIndexFromNodeName(name string) int { - startIndex := strings.Index(name, "(") - endIndex := strings.Index(name, ":") - if startIndex < 0 || endIndex < 0 { - return -1 - } - out, err := strconv.Atoi(name[startIndex+1 : endIndex]) - if err != nil { - return -1 - } - return out -} - -func GenerateFieldSelectorFromWorkflowName(wfName string) string { - result := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", wfName)).String() - compare := RecoverWorkflowNameFromSelectorStringIfAny(result) - if wfName != compare { - panic(fmt.Sprintf("Could not recover field selector from workflow name. Expected '%s' but got '%s'\n", wfName, compare)) - } - return result -} - -func RecoverWorkflowNameFromSelectorStringIfAny(selector string) string { - const tag = "metadata.name=" - if starts := strings.Index(selector, tag); starts > -1 { - suffix := selector[starts+len(tag):] - if ends := strings.Index(suffix, ","); ends > -1 { - return strings.TrimSpace(suffix[:ends]) - } - return strings.TrimSpace(suffix) - } - return "" -} - -// getDeletePropagation return the default or configured DeletePropagation policy -func GetDeletePropagation() *metav1.DeletionPropagation { - propagationPolicy := metav1.DeletePropagationBackground - envVal, ok := os.LookupEnv("WF_DEL_PROPAGATION_POLICY") - if ok && envVal != "" { - propagationPolicy = metav1.DeletionPropagation(envVal) - } - return &propagationPolicy -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/version.go b/vendor/github.com/argoproj/argo-workflows/v3/version.go deleted file mode 100644 index 651364d6577..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/version.go +++ /dev/null @@ -1,59 +0,0 @@ -package argo - -import ( - "fmt" - "runtime" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -// Version information set by link flags during build. We fall back to these sane -// default values when we build outside the Makefile context (e.g. go build or go test). -var ( - version = "v0.0.0" // value from VERSION file - buildDate = "1970-01-01T00:00:00Z" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'` - gitCommit = "" // output from `git rev-parse HEAD` - gitTag = "" // output from `git describe --exact-match --tags HEAD` (if clean tree state) - gitTreeState = "" // determined from `git status --porcelain`. either 'clean' or 'dirty' -) - -// ImageTag return the image tag. -// GetVersion().Version adulterates the version making it useless as the image tag. -func ImageTag() string { - if version != "untagged" { - return version - } - return "latest" -} - -// GetVersion returns the version information -func GetVersion() wfv1.Version { - var versionStr string - if gitCommit != "" && gitTag != "" && gitTreeState == "clean" { - // if we have a clean tree state and the current commit is tagged, - // this is an official release. - versionStr = gitTag - } else { - // otherwise formulate a version string based on as much metadata - // information we have available. - versionStr = version - if len(gitCommit) >= 7 { - versionStr += "+" + gitCommit[0:7] - if gitTreeState != "clean" { - versionStr += ".dirty" - } - } else { - versionStr += "+unknown" - } - } - return wfv1.Version{ - Version: versionStr, - BuildDate: buildDate, - GitCommit: gitCommit, - GitTag: gitTag, - GitTreeState: gitTreeState, - GoVersion: runtime.Version(), - Compiler: runtime.Compiler, - Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/common.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/common.go deleted file mode 100644 index a8135a707a4..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/common.go +++ /dev/null @@ -1,35 +0,0 @@ -package common - -import ( - "errors" - "io" - - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -// ArtifactDriver is the interface for loading and saving of artifacts -type ArtifactDriver interface { - // Load accepts an artifact source URL and places it at specified path - Load(inputArtifact *v1alpha1.Artifact, path string) error - - // OpenStream opens an artifact for reading. If the artifact is a file, - // then the file should be opened. If the artifact is a directory, the - // driver may return that as a tarball. OpenStream is intended to be efficient, - // so implementations should minimise usage of disk, CPU and memory. - // Implementations must not implement retry mechanisms. This will be handled by - // the client, so would result in O(nm) cost. - OpenStream(a *v1alpha1.Artifact) (io.ReadCloser, error) - - // Save uploads the path to artifact destination - Save(path string, outputArtifact *v1alpha1.Artifact) error - - Delete(artifact *v1alpha1.Artifact) error - - ListObjects(artifact *v1alpha1.Artifact) ([]string, error) - - IsDirectory(artifact *v1alpha1.Artifact) (bool, error) -} - -// ErrDeleteNotSupported Sentinel error definition for artifact deletion -var ErrDeleteNotSupported = errors.New("delete not supported for this artifact storage, please check" + - " the following issue for details: https://github.com/argoproj/argo-workflows/issues/3102") diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/load_to_stream.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/load_to_stream.go deleted file mode 100644 index 18a165b33d1..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/common/load_to_stream.go +++ /dev/null @@ -1,41 +0,0 @@ -package common - -import ( - "io" - "os" - "reflect" - - log "github.com/sirupsen/logrus" - - "k8s.io/apimachinery/pkg/util/rand" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -// wrapper around os.File enables us to remove the file when it gets closed -type selfDestructingFile struct { - os.File -} - -func (w selfDestructingFile) Close() error { - err := w.File.Close() - _ = os.Remove(w.Name()) - return err -} - -// Use ArtifactDriver.Load() to get a stream, which we can use for all implementations of ArtifactDriver.OpenStream() -// that aren't yet implemented the "right way" and/or for those that don't have a natural way of streaming -func LoadToStream(a *wfv1.Artifact, g ArtifactDriver) (io.ReadCloser, error) { - log.Infof("Efficient artifact streaming is not supported for type %v: see https://github.com/argoproj/argo-workflows/issues/8489", - reflect.TypeOf(g)) - filename := "/tmp/" + rand.String(32) - if err := g.Load(a, filename); err != nil { - return nil, err - } - f, err := os.Open(filename) - if err != nil { - _ = os.Remove(filename) - return nil, err - } - return &selfDestructingFile{*f}, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/hdfs.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/hdfs.go deleted file mode 100644 index 289b5ee3488..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/hdfs.go +++ /dev/null @@ -1,248 +0,0 @@ -package hdfs - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/argoproj/pkg/file" - "gopkg.in/jcmturner/gokrb5.v5/credentials" - "gopkg.in/jcmturner/gokrb5.v5/keytab" - - "github.com/argoproj/argo-workflows/v3/errors" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/util" - "github.com/argoproj/argo-workflows/v3/workflow/artifacts/common" - "github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource" -) - -// ArtifactDriver is a driver for HDFS -type ArtifactDriver struct { - Addresses []string // comma-separated name nodes - Path string - Force bool - HDFSUser string - KrbOptions *KrbOptions -} - -var _ common.ArtifactDriver = &ArtifactDriver{} - -// KrbOptions is options for Kerberos -type KrbOptions struct { - CCacheOptions *CCacheOptions - KeytabOptions *KeytabOptions - Config string - ServicePrincipalName string -} - -// CCacheOptions is options for ccache -type CCacheOptions struct { - CCache credentials.CCache -} - -// KeytabOptions is options for keytab -type KeytabOptions struct { - Keytab keytab.Keytab - Username string - Realm string -} - -// ValidateArtifact validates HDFS artifact -func ValidateArtifact(errPrefix string, art *wfv1.HDFSArtifact) error { - if len(art.Addresses) == 0 { - return errors.Errorf(errors.CodeBadRequest, "%s.addresses is required", errPrefix) - } - if art.Path == "" { - return errors.Errorf(errors.CodeBadRequest, "%s.path is required", errPrefix) - } - if !filepath.IsAbs(art.Path) { - return errors.Errorf(errors.CodeBadRequest, "%s.path must be a absolute file path", errPrefix) - } - - hasKrbCCache := art.KrbCCacheSecret != nil - hasKrbKeytab := art.KrbKeytabSecret != nil - - if art.HDFSUser == "" && !hasKrbCCache && !hasKrbKeytab { - return errors.Errorf(errors.CodeBadRequest, "either %s.hdfsUser, %s.krbCCacheSecret or %s.krbKeytabSecret is required", errPrefix, errPrefix, errPrefix) - } - if hasKrbKeytab && (art.KrbServicePrincipalName == "" || art.KrbConfigConfigMap == nil || art.KrbUsername == "" || art.KrbRealm == "") { - return errors.Errorf(errors.CodeBadRequest, "%s.krbServicePrincipalName, %s.krbConfigConfigMap, %s.krbUsername and %s.krbRealm are required with %s.krbKeytabSecret", errPrefix, errPrefix, errPrefix, errPrefix, errPrefix) - } - if hasKrbCCache && (art.KrbServicePrincipalName == "" || art.KrbConfigConfigMap == nil) { - return errors.Errorf(errors.CodeBadRequest, "%s.krbServicePrincipalName and %s.krbConfigConfigMap are required with %s.krbCCacheSecret", errPrefix, errPrefix, errPrefix) - } - return nil -} - -// CreateDriver constructs ArtifactDriver -func CreateDriver(ctx context.Context, ci resource.Interface, art *wfv1.HDFSArtifact) (*ArtifactDriver, error) { - var krbConfig string - var krbOptions *KrbOptions - var err error - - if art.KrbConfigConfigMap != nil && art.KrbConfigConfigMap.Name != "" { - krbConfig, err = ci.GetConfigMapKey(ctx, art.KrbConfigConfigMap.Name, art.KrbConfigConfigMap.Key) - if err != nil { - return nil, err - } - } - if art.KrbCCacheSecret != nil && art.KrbCCacheSecret.Name != "" { - bytes, err := ci.GetSecret(ctx, art.KrbCCacheSecret.Name, art.KrbCCacheSecret.Key) - if err != nil { - return nil, err - } - ccache, err := credentials.ParseCCache([]byte(bytes)) - if err != nil { - return nil, err - } - krbOptions = &KrbOptions{ - CCacheOptions: &CCacheOptions{ - CCache: ccache, - }, - Config: krbConfig, - ServicePrincipalName: art.KrbServicePrincipalName, - } - } - if art.KrbKeytabSecret != nil && art.KrbKeytabSecret.Name != "" { - bytes, err := ci.GetSecret(ctx, art.KrbKeytabSecret.Name, art.KrbKeytabSecret.Key) - if err != nil { - return nil, err - } - ktb, err := keytab.Parse([]byte(bytes)) - if err != nil { - return nil, err - } - krbOptions = &KrbOptions{ - KeytabOptions: &KeytabOptions{ - Keytab: ktb, - Username: art.KrbUsername, - Realm: art.KrbRealm, - }, - Config: krbConfig, - ServicePrincipalName: art.KrbServicePrincipalName, - } - } - - driver := ArtifactDriver{ - Addresses: art.Addresses, - Path: art.Path, - Force: art.Force, - HDFSUser: art.HDFSUser, - KrbOptions: krbOptions, - } - return &driver, nil -} - -// Load downloads artifacts from HDFS compliant storage -func (driver *ArtifactDriver) Load(_ *wfv1.Artifact, path string) error { - hdfscli, err := createHDFSClient(driver.Addresses, driver.HDFSUser, driver.KrbOptions) - if err != nil { - return err - } - defer util.Close(hdfscli) - - srcStat, err := hdfscli.Stat(driver.Path) - if err != nil { - if os.IsNotExist(err) { - return errors.New(errors.CodeNotFound, err.Error()) - } - return err - } - if srcStat.IsDir() { - return fmt.Errorf("HDFS artifact does not suppot directory copy") - } - - _, err = os.Stat(path) - if err != nil && !os.IsNotExist(err) { - return err - } - - if os.IsNotExist(err) { - dirPath := filepath.Dir(driver.Path) - if dirPath != "." && dirPath != "/" { - // Follow umask for the permission - err = os.MkdirAll(dirPath, 0o777) - if err != nil { - return err - } - } - } else { - if driver.Force { - err = os.Remove(path) - if err != nil && !os.IsNotExist(err) { - return err - } - } - } - - err = hdfscli.CopyToLocal(driver.Path, path) - if err != nil { - if os.IsNotExist(err) { - return errors.New(errors.CodeNotFound, err.Error()) - } - return err - } - return nil -} - -func (driver *ArtifactDriver) OpenStream(a *wfv1.Artifact) (io.ReadCloser, error) { - // todo: this is a temporary implementation which loads file to disk first - return common.LoadToStream(a, driver) -} - -// Save saves an artifact to HDFS compliant storage -func (driver *ArtifactDriver) Save(path string, outputArtifact *wfv1.Artifact) error { - hdfscli, err := createHDFSClient(driver.Addresses, driver.HDFSUser, driver.KrbOptions) - if err != nil { - return err - } - defer util.Close(hdfscli) - - isDir, err := file.IsDirectory(path) - if err != nil { - return err - } - if isDir { - return fmt.Errorf("HDFS artifact does not suppot directory copy") - } - - _, err = hdfscli.Stat(driver.Path) - if err != nil && !os.IsNotExist(err) { - return err - } - - if os.IsNotExist(err) { - dirPath := filepath.Dir(driver.Path) - if dirPath != "." && dirPath != "/" { - // Follow umask for the permission - err = hdfscli.MkdirAll(dirPath, 0o777) - if err != nil { - return err - } - } - } else { - if driver.Force { - err = hdfscli.Remove(driver.Path) - if err != nil && !os.IsNotExist(err) { - return err - } - } - } - - return hdfscli.CopyToRemote(path, driver.Path) -} - -// Delete is unsupported for the hdfs artifacts -func (driver *ArtifactDriver) Delete(s *wfv1.Artifact) error { - return common.ErrDeleteNotSupported -} - -func (driver *ArtifactDriver) ListObjects(artifact *wfv1.Artifact) ([]string, error) { - return nil, fmt.Errorf("ListObjects is currently not supported for this artifact type, but it will be in a future version") -} - -func (driver *ArtifactDriver) IsDirectory(artifact *wfv1.Artifact) (bool, error) { - return false, errors.New(errors.CodeNotImplemented, "IsDirectory currently unimplemented for HDFS") -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/util.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/util.go deleted file mode 100644 index 3af330ae012..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs/util.go +++ /dev/null @@ -1,53 +0,0 @@ -package hdfs - -import ( - "fmt" - - "github.com/colinmarc/hdfs" - krb "gopkg.in/jcmturner/gokrb5.v5/client" - "gopkg.in/jcmturner/gokrb5.v5/config" -) - -func createHDFSClient(addresses []string, user string, krbOptions *KrbOptions) (*hdfs.Client, error) { - options := hdfs.ClientOptions{ - Addresses: addresses, - } - - if krbOptions != nil { - krbClient, err := createKrbClient(krbOptions) - if err != nil { - return nil, err - } - options.KerberosClient = krbClient - options.KerberosServicePrincipleName = krbOptions.ServicePrincipalName - } else { - options.User = user - } - - return hdfs.NewClient(options) -} - -func createKrbClient(krbOptions *KrbOptions) (*krb.Client, error) { - krbConfig, err := config.NewConfigFromString(krbOptions.Config) - if err != nil { - return nil, err - } - - if krbOptions.CCacheOptions != nil { - client, err := krb.NewClientFromCCache(krbOptions.CCacheOptions.CCache) - if err != nil { - return nil, err - } - return client.WithConfig(krbConfig), nil - } else if krbOptions.KeytabOptions != nil { - client := krb.NewClientWithKeytab(krbOptions.KeytabOptions.Username, krbOptions.KeytabOptions.Realm, krbOptions.KeytabOptions.Keytab) - client = *client.WithConfig(krbConfig) - err = client.Login() - if err != nil { - return nil, err - } - return &client, nil - } - - return nil, fmt.Errorf("Failed to get a Kerberos client") -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource/resource.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource/resource.go deleted file mode 100644 index df9e2fbc938..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource/resource.go +++ /dev/null @@ -1,8 +0,0 @@ -package resource - -import "context" - -type Interface interface { - GetSecret(ctx context.Context, name, key string) (string, error) - GetConfigMapKey(ctx context.Context, name, key string) (string, error) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/ancestry.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/ancestry.go deleted file mode 100644 index e91f7aac006..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/ancestry.go +++ /dev/null @@ -1,182 +0,0 @@ -package common - -import ( - "fmt" - "regexp" - "sort" - "strings" - "time" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -type DagContext interface { - GetTask(taskName string) *wfv1.DAGTask - GetTaskDependencies(taskName string) []string - GetTaskFinishedAtTime(taskName string) time.Time -} - -type TaskResult string - -const ( - TaskResultSucceeded TaskResult = "Succeeded" - TaskResultFailed TaskResult = "Failed" - TaskResultErrored TaskResult = "Errored" - TaskResultSkipped TaskResult = "Skipped" - TaskResultOmitted TaskResult = "Omitted" - TaskResultDaemoned TaskResult = "Daemoned" - TaskResultAnySucceeded TaskResult = "AnySucceeded" - TaskResultAllFailed TaskResult = "AllFailed" -) - -var ( - // TODO: This should use validate.workflowFieldNameFmt, but we can't import it here because an import cycle would be created - taskNameRegex = regexp.MustCompile(`([a-zA-Z0-9][-a-zA-Z0-9]*?\.[A-Z][a-zA-Z]+)|([a-zA-Z0-9][-a-zA-Z0-9]*)`) - taskResultRegex = regexp.MustCompile(`([a-zA-Z0-9][-a-zA-Z0-9]*?\.[A-Z][a-zA-Z]+)`) -) - -type expansionMatch struct { - taskName string - start int - end int -} - -type DependencyType int - -const ( - DependencyTypeTask DependencyType = iota - DependencyTypeItems -) - -func GetTaskDependencies(task *wfv1.DAGTask, ctx DagContext) (map[string]DependencyType, string) { - depends := getTaskDependsLogic(task, ctx) - matches := taskNameRegex.FindAllStringSubmatchIndex(depends, -1) - var expansionMatches []expansionMatch - dependencies := make(map[string]DependencyType) - for _, matchGroup := range matches { - // We have matched a taskName.TaskResult - if matchGroup[2] != -1 { - match := depends[matchGroup[2]:matchGroup[3]] - split := strings.Split(match, ".") - if split[1] == string(TaskResultAnySucceeded) || split[1] == string(TaskResultAllFailed) { - dependencies[split[0]] = DependencyTypeItems - } else if _, ok := dependencies[split[0]]; !ok { // DependencyTypeItems takes precedence - dependencies[split[0]] = DependencyTypeTask - } - } else if matchGroup[4] != -1 { - match := depends[matchGroup[4]:matchGroup[5]] - dependencies[match] = DependencyTypeTask - expansionMatches = append(expansionMatches, expansionMatch{taskName: match, start: matchGroup[4], end: matchGroup[5]}) - } - } - - if len(expansionMatches) == 0 { - return dependencies, depends - } - - sort.Slice(expansionMatches, func(i, j int) bool { - // Sort in descending order - return expansionMatches[i].start > expansionMatches[j].start - }) - for _, match := range expansionMatches { - matchTask := ctx.GetTask(match.taskName) - depends = depends[:match.start] + expandDependency(match.taskName, matchTask) + depends[match.end:] - } - - return dependencies, depends -} - -func ValidateTaskResults(dagTask *wfv1.DAGTask) error { - // If a user didn't specify a depends expression, there are no task results to validate - if dagTask.Depends == "" { - return nil - } - - matches := taskResultRegex.FindAllStringSubmatch(dagTask.Depends, -1) - for _, matchGroup := range matches { - split := strings.Split(matchGroup[1], ".") - taskName, taskResult := split[0], TaskResult(split[1]) - switch taskResult { - case TaskResultSucceeded, TaskResultFailed, TaskResultSkipped, TaskResultOmitted, TaskResultErrored, TaskResultDaemoned, TaskResultAnySucceeded, TaskResultAllFailed: - // Do nothing - default: - return fmt.Errorf("task result '%s' for task '%s' is invalid", taskResult, taskName) - } - } - return nil -} - -func getTaskDependsLogic(dagTask *wfv1.DAGTask, ctx DagContext) string { - if dagTask.Depends != "" { - return dagTask.Depends - } - - // For backwards compatibility, "dependencies: [A, B]" is equivalent to "depends: (A.Successful || A.Skipped || A.Daemoned)) && (B.Successful || B.Skipped || B.Daemoned)" - var dependencies []string - for _, dependency := range dagTask.Dependencies { - depTask := ctx.GetTask(dependency) - dependencies = append(dependencies, expandDependency(dependency, depTask)) - } - return strings.Join(dependencies, " && ") -} - -func expandDependency(depName string, depTask *wfv1.DAGTask) string { - resultForTask := func(result TaskResult) string { return fmt.Sprintf("%s.%s", depName, result) } - - taskDepends := []string{resultForTask(TaskResultSucceeded), resultForTask(TaskResultSkipped), resultForTask(TaskResultDaemoned)} - if depTask.ContinueOn != nil { - if depTask.ContinueOn.Error { - taskDepends = append(taskDepends, resultForTask(TaskResultErrored)) - } - if depTask.ContinueOn.Failed { - taskDepends = append(taskDepends, resultForTask(TaskResultFailed)) - } - } - return "(" + strings.Join(taskDepends, " || ") + ")" -} - -// GetTaskAncestry returns a list of taskNames which are ancestors of this task. -// The list is ordered by the tasks finished time. -func GetTaskAncestry(ctx DagContext, taskName string) []string { - visited := make(map[string]time.Time) - - var getAncestry func(currTask string) - getAncestry = func(currTask string) { - if _, seen := visited[currTask]; seen { - return - } - for _, depTask := range ctx.GetTaskDependencies(currTask) { - getAncestry(depTask) - } - if currTask != taskName { - if _, ok := visited[currTask]; !ok { - visited[currTask] = ctx.GetTaskFinishedAtTime(currTask) - } - } - } - - getAncestry(taskName) - - ancestry := make([]string, len(visited)) - for newTask, newFinishedAt := range visited { - insertTask(visited, ancestry, newTask, newFinishedAt) - } - - return ancestry -} - -// insertTask inserts the newTaskName at the right position ordered by time into the ancestry list. -func insertTask(visited map[string]time.Time, ancestry []string, newTaskName string, finishedAt time.Time) { - for i, taskName := range ancestry { - if taskName == "" { - ancestry[i] = newTaskName - return - } - if finishedAt.Before(visited[taskName]) { - // insert at position i and shift others - copy(ancestry[i+1:], ancestry[i:]) - ancestry[i] = newTaskName - return - } - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/common.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/common.go index 72d61279e04..b1086892cfe 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/common.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/common.go @@ -51,6 +51,10 @@ const ( // AnnotationKeyProgress is N/M progress for the node AnnotationKeyProgress = workflow.WorkflowFullName + "/progress" + // AnnotationKeyReportOutputsCompleted is an annotation on a workflow pod indicating outputs have completed. + // Only used as a backup in case LabelKeyReportOutputsCompleted can't be added to WorkflowTaskResult. + AnnotationKeyReportOutputsCompleted = workflow.WorkflowFullName + "/report-outputs-completed" + // AnnotationKeyArtifactGCStrategy is listed as an annotation on the Artifact GC Pod to identify // the strategy whose artifacts are being deleted AnnotationKeyArtifactGCStrategy = workflow.WorkflowFullName + "/artifact-gc-strategy" @@ -69,7 +73,8 @@ const ( // LabelKeyWorkflowArchivingStatus indicates if a workflow needs archiving or not: // * `` - does not need archiving ... yet // * `Pending` - pending archiving - // * `Archived` - has been archived + // * `Archived` - has been archived and has live manifest + // * `Persisted` - has been archived and retrieved from db // See also `LabelKeyCompleted`. LabelKeyWorkflowArchivingStatus = workflow.WorkflowFullName + "/workflow-archiving-status" // LabelKeyWorkflow is the pod metadata label to indicate the associated workflow name @@ -93,6 +98,8 @@ const ( LabelKeyOnExit = workflow.WorkflowFullName + "/on-exit" // LabelKeyArtifactGCPodHash is a label applied to WorkflowTaskSets used by the Artifact Garbage Collection Pod LabelKeyArtifactGCPodHash = workflow.WorkflowFullName + "/artifact-gc-pod" + // LabelKeyReportOutputsCompleted is a label applied to WorkflowTaskResults indicating whether all the outputs have been reported. + LabelKeyReportOutputsCompleted = workflow.WorkflowFullName + "/report-outputs-completed" // ExecutorArtifactBaseDir is the base directory in the init container in which artifacts will be copied to. // Each artifact will be named according to its input name (e.g: /argo/inputs/artifacts/CODE) @@ -167,6 +174,8 @@ const ( GlobalVarWorkflowName = "workflow.name" // GlobalVarWorkflowNamespace is a global workflow variable referencing the workflow's metadata.namespace field GlobalVarWorkflowNamespace = "workflow.namespace" + // GlobalVarWorkflowMainEntrypoint is a global workflow variable referencing the workflow's top level entrypoint name + GlobalVarWorkflowMainEntrypoint = "workflow.mainEntrypoint" // GlobalVarWorkflowServiceAccountName is a global workflow variable referencing the workflow's spec.serviceAccountName field GlobalVarWorkflowServiceAccountName = "workflow.serviceAccountName" // GlobalVarWorkflowUID is a global workflow variable referencing the workflow's metadata.uid field @@ -224,6 +233,8 @@ const ( LocalVarRetriesLastStatus = "lastRetry.status" // LocalVarRetriesLastDuration is a variable that references information about the last retry's duration, in seconds LocalVarRetriesLastDuration = "lastRetry.duration" + // LocalVarRetriesLastMessage is a variable that references information about the last retry's failure message + LocalVarRetriesLastMessage = "lastRetry.message" KubeConfigDefaultMountPath = "/kube/config" KubeConfigDefaultVolumeName = "kubeconfig" @@ -240,9 +251,6 @@ const ( // ArgoProgressPath defines the path to a file used for self reporting progress ArgoProgressPath = VarRunArgoPath + "/progress" - // ErrDeadlineExceeded is the pod status reason when exceed deadline - ErrDeadlineExceeded = "DeadlineExceeded" - ConfigMapName = "workflow-controller-configmap" ) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/configmap.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/configmap.go deleted file mode 100644 index 5624f309ebf..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/configmap.go +++ /dev/null @@ -1,36 +0,0 @@ -package common - -import ( - "fmt" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" - - "github.com/argoproj/argo-workflows/v3/errors" -) - -// GetConfigMapValue retrieves a configmap value -func GetConfigMapValue(configMapInformer cache.SharedIndexInformer, namespace, name, key string) (string, error) { - obj, exists, err := configMapInformer.GetIndexer().GetByKey(namespace + "/" + name) - if err != nil { - return "", err - } - if exists { - cm, ok := obj.(*apiv1.ConfigMap) - if !ok { - return "", fmt.Errorf("unable to convert object %s to configmap when syncing ConfigMaps", name) - } - if cmType := cm.Labels[LabelKeyConfigMapType]; cmType != LabelValueTypeConfigMapParameter { - return "", fmt.Errorf( - "ConfigMap '%s' needs to have the label %s: %s to load parameters", - name, LabelKeyConfigMapType, LabelValueTypeConfigMapParameter) - } - cmValue, ok := cm.Data[key] - if !ok { - return "", errors.Errorf(errors.CodeNotFound, "ConfigMap '%s' does not have the key '%s'", name, key) - } - return cmValue, nil - } - return "", errors.Errorf(errors.CodeNotFound, "ConfigMap '%s' does not exist. Please make sure it has the label %s: %s to be detectable by the controller", - name, LabelKeyConfigMapType, LabelValueTypeConfigMapParameter) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/convert.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/convert.go deleted file mode 100644 index b71820626f4..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/convert.go +++ /dev/null @@ -1,86 +0,0 @@ -package common - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -func ConvertCronWorkflowToWorkflow(cronWf *wfv1.CronWorkflow) *wfv1.Workflow { - meta := metav1.ObjectMeta{ - GenerateName: cronWf.Name + "-", - Labels: make(map[string]string), - Annotations: map[string]string{ - AnnotationKeyCronWfScheduledTime: time.Now().Format(time.RFC3339), - }, - } - return toWorkflow(*cronWf, meta) -} - -func ConvertCronWorkflowToWorkflowWithProperties(cronWf *wfv1.CronWorkflow, name string, scheduledTime time.Time) *wfv1.Workflow { - meta := metav1.ObjectMeta{ - Name: name, - Labels: make(map[string]string), - Annotations: map[string]string{ - AnnotationKeyCronWfScheduledTime: scheduledTime.Format(time.RFC3339), - }, - } - return toWorkflow(*cronWf, meta) -} - -func NewWorkflowFromWorkflowTemplate(templateName string, clusterScope bool) *wfv1.Workflow { - wf := &wfv1.Workflow{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: templateName + "-", - Labels: make(map[string]string), - Annotations: make(map[string]string), - }, - Spec: wfv1.WorkflowSpec{ - WorkflowTemplateRef: &wfv1.WorkflowTemplateRef{ - Name: templateName, - ClusterScope: clusterScope, - }, - }, - } - - if clusterScope { - wf.Labels[LabelKeyClusterWorkflowTemplate] = templateName - } else { - wf.Labels[LabelKeyWorkflowTemplate] = templateName - } - return wf -} - -func toWorkflow(cronWf wfv1.CronWorkflow, objectMeta metav1.ObjectMeta) *wfv1.Workflow { - wf := &wfv1.Workflow{ - TypeMeta: metav1.TypeMeta{ - Kind: workflow.WorkflowKind, - APIVersion: cronWf.TypeMeta.APIVersion, - }, - ObjectMeta: objectMeta, - Spec: cronWf.Spec.WorkflowSpec, - } - - if instanceId, ok := cronWf.ObjectMeta.GetLabels()[LabelKeyControllerInstanceID]; ok { - wf.ObjectMeta.GetLabels()[LabelKeyControllerInstanceID] = instanceId - } - - wf.Labels[LabelKeyCronWorkflow] = cronWf.Name - if cronWf.Spec.WorkflowMetadata != nil { - for key, label := range cronWf.Spec.WorkflowMetadata.Labels { - wf.Labels[key] = label - } - - if len(cronWf.Spec.WorkflowMetadata.Annotations) > 0 { - for key, annotation := range cronWf.Spec.WorkflowMetadata.Annotations { - wf.Annotations[key] = annotation - } - } - } - wf.SetOwnerReferences(append(wf.GetOwnerReferences(), *metav1.NewControllerRef(&cronWf, wfv1.SchemeGroupVersion.WithKind(workflow.CronWorkflowKind)))) - - return wf -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/params.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/params.go deleted file mode 100644 index fa7314e8ebf..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/params.go +++ /dev/null @@ -1,24 +0,0 @@ -package common - -// Parameters extends string map with useful methods. -type Parameters map[string]string - -// Merge merges given parameters. -func (ps Parameters) Merge(args ...Parameters) Parameters { - newParams := ps.DeepCopy() - for _, params := range args { - for k, v := range params { - newParams[k] = v - } - } - return newParams -} - -// DeepCopy returns a new instance which has the same parameters as the receiver. -func (ps Parameters) DeepCopy() Parameters { - newParams := make(Parameters) - for k, v := range ps { - newParams[k] = v - } - return newParams -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/parse.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/parse.go deleted file mode 100644 index 29d5de552e9..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/parse.go +++ /dev/null @@ -1,175 +0,0 @@ -package common - -import ( - "regexp" - "strings" - - jsonpkg "github.com/argoproj/pkg/json" - log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "sigs.k8s.io/yaml" - - wf "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -var yamlSeparator = regexp.MustCompile(`\n---`) - -type ParseResult struct { - Object metav1.Object - Err error -} - -func ParseObjects(body []byte, strict bool) []ParseResult { - var res []ParseResult - if jsonpkg.IsJSON(body) { - un := &unstructured.Unstructured{} - err := jsonpkg.Unmarshal(body, un) - if un.GetKind() != "" && err != nil { - // only return an error if this is a kubernetes object, otherwise, ignore - return append(res, ParseResult{nil, err}) - } - v, err := toWorkflowTypeJSON(body, un.GetKind(), strict) - return append(res, ParseResult{v, err}) - } - - for i, text := range yamlSeparator.Split(string(body), -1) { - if strings.TrimSpace(text) == "" { - continue - } - un := &unstructured.Unstructured{} - err := yaml.Unmarshal([]byte(text), un) - if err != nil { - // Only return an error if this is a kubernetes object, otherwise, print the error - if un.GetKind() != "" { - res = append(res, ParseResult{nil, err}) - } else { - log.Errorf("yaml file at index %d is not valid: %s", i, err) - } - continue - } - v, err := toWorkflowTypeYAML([]byte(text), un.GetKind(), strict) - if v != nil { - // only append when this is a Kubernetes object - res = append(res, ParseResult{v, err}) - } - } - return res -} - -func objectForKind(kind string) metav1.Object { - switch kind { - case wf.CronWorkflowKind: - return &wfv1.CronWorkflow{} - case wf.ClusterWorkflowTemplateKind: - return &wfv1.ClusterWorkflowTemplate{} - case wf.WorkflowKind: - return &wfv1.Workflow{} - case wf.WorkflowEventBindingKind: - return &wfv1.WorkflowEventBinding{} - case wf.WorkflowTemplateKind: - return &wfv1.WorkflowTemplate{} - case wf.WorkflowTaskSetKind: - return &wfv1.WorkflowTaskSet{} - default: - return &metav1.ObjectMeta{} - } -} - -func toWorkflowTypeYAML(body []byte, kind string, strict bool) (metav1.Object, error) { - var json []byte - var err error - - if strict { - json, err = yaml.YAMLToJSONStrict(body) - } else { - json, err = yaml.YAMLToJSON(body) - } - if err != nil { - return nil, err - } - - return toWorkflowTypeJSON(json, kind, strict) -} - -func toWorkflowTypeJSON(body []byte, kind string, strict bool) (metav1.Object, error) { - v := objectForKind(kind) - if strict { - return v, jsonpkg.UnmarshalStrict(body, v) - } - - return v, jsonpkg.Unmarshal(body, v) -} - -// SplitWorkflowYAMLFile is a helper to split a body into multiple workflow objects -func SplitWorkflowYAMLFile(body []byte, strict bool) ([]wfv1.Workflow, error) { - manifests := make([]wfv1.Workflow, 0) - for _, res := range ParseObjects(body, strict) { - obj, err := res.Object, res.Err - v, ok := obj.(*wfv1.Workflow) - if !ok { - log.Warnf("%s is not of kind Workflow. Ignoring...", obj.GetName()) - continue - } - if err != nil { // only returns parsing errors for workflow types - return nil, err - } - manifests = append(manifests, *v) - } - return manifests, nil -} - -// SplitWorkflowTemplateYAMLFile is a helper to split a body into multiple workflow template objects -func SplitWorkflowTemplateYAMLFile(body []byte, strict bool) ([]wfv1.WorkflowTemplate, error) { - manifests := make([]wfv1.WorkflowTemplate, 0) - for _, res := range ParseObjects(body, strict) { - obj, err := res.Object, res.Err - v, ok := obj.(*wfv1.WorkflowTemplate) - if !ok { - log.Warnf("%s is not of kind WorkflowTemplate. Ignoring...", obj.GetName()) - continue - } - if err != nil { // only returns parsing errors for template types - return nil, err - } - manifests = append(manifests, *v) - } - return manifests, nil -} - -// SplitCronWorkflowYAMLFile is a helper to split a body into multiple workflow template objects -func SplitCronWorkflowYAMLFile(body []byte, strict bool) ([]wfv1.CronWorkflow, error) { - manifests := make([]wfv1.CronWorkflow, 0) - for _, res := range ParseObjects(body, strict) { - obj, err := res.Object, res.Err - v, ok := obj.(*wfv1.CronWorkflow) - if !ok { - log.Warnf("%s is not of kind CronWorkflow. Ignoring...", obj.GetName()) - continue - } - if err != nil { // only returns parsing errors for cron types - return nil, err - } - manifests = append(manifests, *v) - } - return manifests, nil -} - -// SplitClusterWorkflowTemplateYAMLFile is a helper to split a body into multiple cluster workflow template objects -func SplitClusterWorkflowTemplateYAMLFile(body []byte, strict bool) ([]wfv1.ClusterWorkflowTemplate, error) { - manifests := make([]wfv1.ClusterWorkflowTemplate, 0) - for _, res := range ParseObjects(body, strict) { - obj, err := res.Object, res.Err - v, ok := obj.(*wfv1.ClusterWorkflowTemplate) - if !ok { - log.Warnf("%s is not of kind ClusterWorkflowTemplate. Ignoring...", obj.GetName()) - continue - } - if err != nil { // only returns parsing errors for cwft types - return nil, err - } - manifests = append(manifests, *v) - } - return manifests, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/placeholder.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/placeholder.go deleted file mode 100644 index fdc2c36c9c5..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/placeholder.go +++ /dev/null @@ -1,27 +0,0 @@ -package common - -import ( - "fmt" - "strings" -) - -// placeholderGenerator is to generate dynamically-generated placeholder strings. -type placeholderGenerator struct { - index int -} - -// NewPlaceholderGenerator returns a placeholderGenerator. -func NewPlaceholderGenerator() *placeholderGenerator { - return &placeholderGenerator{} -} - -// NextPlaceholder returns an arbitrary string to perform mock substitution of variables -func (p *placeholderGenerator) NextPlaceholder() string { - s := fmt.Sprintf("placeholder-%d", p.index) - p.index = p.index + 1 - return s -} - -func (p *placeholderGenerator) IsPlaceholder(s string) bool { - return strings.HasPrefix(s, "placeholder-") -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/util.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/util.go deleted file mode 100644 index b8adafd0247..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/common/util.go +++ /dev/null @@ -1,346 +0,0 @@ -package common - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "net/http" - "os/exec" - "runtime" - "sort" - "strings" - "time" - - "github.com/gorilla/websocket" - log "github.com/sirupsen/logrus" - apiv1 "k8s.io/api/core/v1" - apierr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/remotecommand" - - "github.com/argoproj/argo-workflows/v3/errors" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/util" - "github.com/argoproj/argo-workflows/v3/util/template" -) - -// FindOverlappingVolume looks an artifact path, checks if it overlaps with any -// user specified volumeMounts in the template, and returns the deepest volumeMount -// (if any). A return value of nil indicates the path is not under any volumeMount. -func FindOverlappingVolume(tmpl *wfv1.Template, path string) *apiv1.VolumeMount { - volumeMounts := tmpl.GetVolumeMounts() - sort.Slice(volumeMounts, func(i, j int) bool { - return len(volumeMounts[i].MountPath) > len(volumeMounts[j].MountPath) - }) - for _, mnt := range volumeMounts { - normalizedMountPath := strings.TrimRight(mnt.MountPath, "/") - if path == normalizedMountPath || isSubPath(path, normalizedMountPath) { - return &mnt - } - } - return nil -} - -func isSubPath(path string, normalizedMountPath string) bool { - return strings.HasPrefix(path, normalizedMountPath+"/") -} - -type RoundTripCallback func(conn *websocket.Conn, resp *http.Response, err error) error - -type WebsocketRoundTripper struct { - Dialer *websocket.Dialer - Do RoundTripCallback -} - -func (d *WebsocketRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - conn, resp, err := d.Dialer.Dial(r.URL.String(), r.Header) - if err == nil { - defer util.Close(conn) - } - return resp, d.Do(conn, resp, err) -} - -// ExecPodContainer runs a command in a container in a pod and returns the remotecommand.Executor -func ExecPodContainer(restConfig *rest.Config, namespace string, pod string, container string, stdout bool, stderr bool, command ...string) (remotecommand.Executor, error) { - clientset, err := kubernetes.NewForConfig(restConfig) - if err != nil { - return nil, errors.InternalWrapError(err) - } - - execRequest := clientset.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(pod). - Namespace(namespace). - SubResource("exec"). - Param("container", container). - Param("stdout", fmt.Sprintf("%v", stdout)). - Param("stderr", fmt.Sprintf("%v", stderr)). - Param("tty", "false") - - for _, cmd := range command { - execRequest = execRequest.Param("command", cmd) - } - - log.Info(execRequest.URL()) - exec, err := remotecommand.NewSPDYExecutor(restConfig, "POST", execRequest.URL()) - if err != nil { - return nil, errors.InternalWrapError(err) - } - return exec, nil -} - -// GetExecutorOutput returns the output of an remotecommand.Executor -func GetExecutorOutput(exec remotecommand.Executor) (*bytes.Buffer, *bytes.Buffer, error) { - var stdOut bytes.Buffer - var stdErr bytes.Buffer - err := exec.Stream(remotecommand.StreamOptions{ - Stdout: &stdOut, - Stderr: &stdErr, - Tty: false, - }) - if err != nil { - return nil, nil, errors.InternalWrapError(err) - } - return &stdOut, &stdErr, nil -} - -// ProcessArgs sets in the inputs, the values either passed via arguments, or the hardwired values -// It substitutes: -// * parameters in the template from the arguments -// * global parameters (e.g. {{workflow.parameters.XX}}, {{workflow.name}}, {{workflow.status}}) -// * local parameters (e.g. {{pod.name}}) -func ProcessArgs(tmpl *wfv1.Template, args wfv1.ArgumentsProvider, globalParams, localParams Parameters, validateOnly bool, namespace string, configMapInformer cache.SharedIndexInformer) (*wfv1.Template, error) { - // For each input parameter: - // 1) check if was supplied as argument. if so use the supplied value from arg - // 2) if not, use default value. - // 3) if no default value, it is an error - newTmpl := tmpl.DeepCopy() - for i, inParam := range newTmpl.Inputs.Parameters { - if inParam.Value == nil && inParam.Default != nil { - // first set to default value - inParam.Value = inParam.Default - } - // overwrite value from argument (if supplied) - argParam := args.GetParameterByName(inParam.Name) - if argParam != nil { - if argParam.Value != nil { - inParam.Value = argParam.Value - } else { - inParam.ValueFrom = argParam.ValueFrom - } - } - if inParam.ValueFrom != nil && inParam.ValueFrom.ConfigMapKeyRef != nil { - if configMapInformer != nil { - // SubstituteParams is called only at the end of this method. To support parametrization of the configmap - // we need to perform a substitution here over the name and the key of the ConfigMapKeyRef. - cmName, err := substituteConfigMapKeyRefParam(inParam.ValueFrom.ConfigMapKeyRef.Name, globalParams) - if err != nil { - log.WithError(err).Error("unable to substitute name for ConfigMapKeyRef") - return nil, err - } - cmKey, err := substituteConfigMapKeyRefParam(inParam.ValueFrom.ConfigMapKeyRef.Key, globalParams) - if err != nil { - log.WithError(err).Error("unable to substitute key for ConfigMapKeyRef") - return nil, err - } - - cmValue, err := GetConfigMapValue(configMapInformer, namespace, cmName, cmKey) - if err != nil { - if inParam.ValueFrom.Default != nil && errors.IsCode(errors.CodeNotFound, err) { - inParam.Value = inParam.ValueFrom.Default - } else { - return nil, errors.Errorf(errors.CodeBadRequest, "unable to retrieve inputs.parameters.%s from ConfigMap: %s", inParam.Name, err) - } - } else { - inParam.Value = wfv1.AnyStringPtr(cmValue) - } - } - } else { - if inParam.Value == nil { - return nil, errors.Errorf(errors.CodeBadRequest, "inputs.parameters.%s was not supplied", inParam.Name) - } - } - - newTmpl.Inputs.Parameters[i] = inParam - } - - // Performs substitutions of input artifacts - artifacts := newTmpl.Inputs.Artifacts - for i, inArt := range artifacts { - - argArt := args.GetArtifactByName(inArt.Name) - - if !inArt.Optional && !inArt.HasLocationOrKey() { - // artifact must be supplied - if argArt == nil { - return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s was not supplied", inArt.Name) - } - if (argArt.From == "" || argArt.FromExpression == "") && !argArt.HasLocationOrKey() && !validateOnly { - return nil, errors.Errorf(errors.CodeBadRequest, "inputs.artifacts.%s missing location information", inArt.Name) - } - } - if argArt != nil { - artifacts[i] = *argArt - artifacts[i].Path = inArt.Path - artifacts[i].Mode = inArt.Mode - artifacts[i].RecurseMode = inArt.RecurseMode - } - } - - return SubstituteParams(newTmpl, globalParams, localParams) -} - -// substituteConfigMapKeyRefParams check if ConfigMapKeyRef's key is a param and perform the substitution. -func substituteConfigMapKeyRefParam(in string, globalParams Parameters) (string, error) { - if strings.HasPrefix(in, "{{") && strings.HasSuffix(in, "}}") { - k := strings.TrimSuffix(strings.TrimPrefix(in, "{{"), "}}") - k = strings.Trim(k, " ") - - v, ok := globalParams[k] - if !ok { - err := errors.InternalError(fmt.Sprintf("parameter %s not found", k)) - log.WithError(err).Error() - return "", err - } - return v, nil - } - return in, nil -} - -// SubstituteParams returns a new copy of the template with global, pod, and input parameters substituted -func SubstituteParams(tmpl *wfv1.Template, globalParams, localParams Parameters) (*wfv1.Template, error) { - tmplBytes, err := json.Marshal(tmpl) - if err != nil { - return nil, errors.InternalWrapError(err) - } - // First replace globals & locals, then replace inputs because globals could be referenced in the inputs - replaceMap := globalParams.Merge(localParams) - globalReplacedTmplStr, err := template.Replace(string(tmplBytes), replaceMap, true) - if err != nil { - return nil, err - } - var globalReplacedTmpl wfv1.Template - err = json.Unmarshal([]byte(globalReplacedTmplStr), &globalReplacedTmpl) - if err != nil { - return nil, errors.InternalWrapError(err) - } - // Now replace the rest of substitutions (the ones that can be made) in the template - replaceMap = make(map[string]string) - for _, inParam := range globalReplacedTmpl.Inputs.Parameters { - if inParam.Value == nil && inParam.ValueFrom == nil { - return nil, errors.InternalErrorf("inputs.parameters.%s had no value", inParam.Name) - } else if inParam.Value != nil { - replaceMap["inputs.parameters."+inParam.Name] = inParam.Value.String() - } - } - // allow {{inputs.parameters}} to fetch the entire input parameters list as JSON - jsonInputParametersBytes, err := json.Marshal(globalReplacedTmpl.Inputs.Parameters) - if err != nil { - return nil, errors.InternalWrapError(err) - } - replaceMap["inputs.parameters"] = string(jsonInputParametersBytes) - for _, inArt := range globalReplacedTmpl.Inputs.Artifacts { - if inArt.Path != "" { - replaceMap["inputs.artifacts."+inArt.Name+".path"] = inArt.Path - } - } - for _, outArt := range globalReplacedTmpl.Outputs.Artifacts { - if outArt.Path != "" { - replaceMap["outputs.artifacts."+outArt.Name+".path"] = outArt.Path - } - } - for _, param := range globalReplacedTmpl.Outputs.Parameters { - if param.ValueFrom != nil && param.ValueFrom.Path != "" { - replaceMap["outputs.parameters."+param.Name+".path"] = param.ValueFrom.Path - } - } - - s, err := template.Replace(globalReplacedTmplStr, replaceMap, true) - if err != nil { - return nil, err - } - var newTmpl wfv1.Template - err = json.Unmarshal([]byte(s), &newTmpl) - if err != nil { - return nil, errors.InternalWrapError(err) - } - return &newTmpl, nil -} - -// RunCommand is a convenience function to run/log a command and log the stderr upon failure -func RunCommand(name string, arg ...string) ([]byte, error) { - cmd := exec.Command(name, arg...) - cmdStr := strings.Join(cmd.Args, " ") - log.Info(cmdStr) - out, err := cmd.Output() - if err != nil { - if exErr, ok := err.(*exec.ExitError); ok { - errOutput := string(exErr.Stderr) - log.Errorf("`%s` failed: %s", cmdStr, errOutput) - return nil, errors.InternalError(strings.TrimSpace(errOutput)) - } - return nil, errors.InternalWrapError(err) - } - return out, nil -} - -// RunShellCommand is a convenience function to use RunCommand for shell executions. It's os-specific -// and runs `cmd` in windows. -func RunShellCommand(arg ...string) ([]byte, error) { - name := "sh" - shellFlag := "-c" - if runtime.GOOS == "windows" { - name = "cmd" - shellFlag = "/c" - } - arg = append([]string{shellFlag}, arg...) - return RunCommand(name, arg...) -} - -const deleteRetries = 3 - -// DeletePod deletes a pod. Ignores NotFound error -func DeletePod(ctx context.Context, c kubernetes.Interface, podName, namespace string) error { - var err error - for attempt := 0; attempt < deleteRetries; attempt++ { - err = c.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{}) - if err == nil || apierr.IsNotFound(err) { - return nil - } - time.Sleep(100 * time.Millisecond) - } - return err -} - -// GetTemplateGetterString returns string of TemplateHolder. -func GetTemplateGetterString(getter wfv1.TemplateHolder) string { - return fmt.Sprintf("%T (namespace=%s,name=%s)", getter, getter.GetNamespace(), getter.GetName()) -} - -// GetTemplateHolderString returns string of TemplateReferenceHolder. -func GetTemplateHolderString(tmplHolder wfv1.TemplateReferenceHolder) string { - if tmplHolder.GetTemplate() != nil { - return fmt.Sprintf("%T inlined", tmplHolder) - } else if x := tmplHolder.GetTemplateName(); x != "" { - return fmt.Sprintf("%T (%s)", tmplHolder, x) - } else if x := tmplHolder.GetTemplateRef(); x != nil { - return fmt.Sprintf("%T (%s/%s#%v)", tmplHolder, x.Name, x.Template, x.ClusterScope) - } else { - return fmt.Sprintf("%T invalid (https://argoproj.github.io/argo-workflows/templates/)", tmplHolder) - } -} - -func GenerateOnExitNodeName(parentNodeName string) string { - return fmt.Sprintf("%s.onExit", parentNodeName) -} - -func IsDone(un *unstructured.Unstructured) bool { - return un.GetDeletionTimestamp() == nil && - un.GetLabels()[LabelKeyCompleted] == "true" && - un.GetLabels()[LabelKeyWorkflowArchivingStatus] != "Pending" -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/hydrator/hydrator.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/hydrator/hydrator.go deleted file mode 100644 index c5b922c7219..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/hydrator/hydrator.go +++ /dev/null @@ -1,127 +0,0 @@ -package hydrator - -import ( - "fmt" - "os" - "time" - - log "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/argoproj/argo-workflows/v3/persist/sqldb" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - errorsutil "github.com/argoproj/argo-workflows/v3/util/errors" - waitutil "github.com/argoproj/argo-workflows/v3/util/wait" - "github.com/argoproj/argo-workflows/v3/workflow/packer" -) - -type Interface interface { - // whether or not the workflow in hydrated - IsHydrated(wf *wfv1.Workflow) bool - // hydrate the workflow - doing nothing if it is already hydrated - Hydrate(wf *wfv1.Workflow) error - // dehydrate the workflow - doing nothing if already dehydrated - Dehydrate(wf *wfv1.Workflow) error - // hydrate the workflow using the provided nodes - HydrateWithNodes(wf *wfv1.Workflow, nodes wfv1.Nodes) -} - -func New(offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo) Interface { - return &hydrator{offloadNodeStatusRepo} -} - -var alwaysOffloadNodeStatus = os.Getenv("ALWAYS_OFFLOAD_NODE_STATUS") == "true" - -func init() { - log.WithField("alwaysOffloadNodeStatus", alwaysOffloadNodeStatus).Debug("Hydrator config") -} - -type hydrator struct { - offloadNodeStatusRepo sqldb.OffloadNodeStatusRepo -} - -func (h hydrator) IsHydrated(wf *wfv1.Workflow) bool { - return wf.Status.CompressedNodes == "" && !wf.Status.IsOffloadNodeStatus() -} - -func (h hydrator) HydrateWithNodes(wf *wfv1.Workflow, offloadedNodes wfv1.Nodes) { - wf.Status.Nodes = offloadedNodes - wf.Status.CompressedNodes = "" - wf.Status.OffloadNodeStatusVersion = "" -} - -// should be <10s -// Retry Seconds -// 1 0.10 -// 2 0.30 -// 3 0.70 -// 4 1.50 -// 5 3.10 -var readRetry = wait.Backoff{Steps: 5, Duration: 100 * time.Millisecond, Factor: 2} - -// needs to be long -// http://backoffcalculator.com/?attempts=5&rate=2&interval=1 -// Retry Seconds -// 1 1.00 -// 2 3.00 -// 3 7.00 -// 4 15.00 -// 5 31.00 -var writeRetry = wait.Backoff{Steps: 5, Duration: 1 * time.Second, Factor: 2} - -func (h hydrator) Hydrate(wf *wfv1.Workflow) error { - err := packer.DecompressWorkflow(wf) - if err != nil { - return err - } - if wf.Status.IsOffloadNodeStatus() { - var offloadedNodes wfv1.Nodes - err := waitutil.Backoff(readRetry, func() (bool, error) { - offloadedNodes, err = h.offloadNodeStatusRepo.Get(string(wf.UID), wf.GetOffloadNodeStatusVersion()) - return !errorsutil.IsTransientErr(err), err - }) - if err != nil { - return err - } - h.HydrateWithNodes(wf, offloadedNodes) - log.WithField("Workflow Size", wf.Size()).Info("Workflow hydrated") - } - - return nil -} - -func (h hydrator) Dehydrate(wf *wfv1.Workflow) error { - if !h.IsHydrated(wf) { - return nil - } - var err error - log.WithField("Workflow Size", wf.Size()).Info("Workflow to be dehydrated") - if !alwaysOffloadNodeStatus { - err = packer.CompressWorkflowIfNeeded(wf) - if err == nil { - wf.Status.OffloadNodeStatusVersion = "" - return nil - } - } - if packer.IsTooLargeError(err) || alwaysOffloadNodeStatus { - var offloadVersion string - var errMsg string - if err != nil { - errMsg += err.Error() - } - offloadErr := waitutil.Backoff(writeRetry, func() (bool, error) { - var offloadErr error - offloadVersion, offloadErr = h.offloadNodeStatusRepo.Save(string(wf.UID), wf.Namespace, wf.Status.Nodes) - return !errorsutil.IsTransientErr(offloadErr), offloadErr - }) - if offloadErr != nil { - return fmt.Errorf("%sTried to offload but encountered error: %s", errMsg, offloadErr.Error()) - } - wf.Status.Nodes = nil - wf.Status.CompressedNodes = "" - wf.Status.OffloadNodeStatusVersion = offloadVersion - return nil - } else { - return err - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/k8s_request_total_metric.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/k8s_request_total_metric.go deleted file mode 100644 index ea241787b43..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/k8s_request_total_metric.go +++ /dev/null @@ -1,45 +0,0 @@ -package metrics - -import ( - "net/http" - "strconv" - - "github.com/prometheus/client_golang/prometheus" - "k8s.io/client-go/rest" - - "github.com/argoproj/argo-workflows/v3/util/k8s" -) - -var K8sRequestTotalMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "k8s_request_total", - Help: "Number of kubernetes requests executed. https://argoproj.github.io/argo-workflows/metrics/#argo_workflows_k8s_request_total", - }, - []string{"kind", "verb", "status_code"}, -) - -type metricsRoundTripper struct { - roundTripper http.RoundTripper -} - -func (m metricsRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - x, err := m.roundTripper.RoundTrip(r) - if x != nil { - verb, kind := k8s.ParseRequest(r) - K8sRequestTotalMetric.WithLabelValues(kind, verb, strconv.Itoa(x.StatusCode)).Inc() - } - return x, err -} - -func AddMetricsTransportWrapper(config *rest.Config) *rest.Config { - wrap := config.WrapTransport - config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - if wrap != nil { - rt = wrap(rt) - } - return &metricsRoundTripper{roundTripper: rt} - } - return config -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/metrics.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/metrics.go deleted file mode 100644 index dbd9bd10559..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/metrics.go +++ /dev/null @@ -1,287 +0,0 @@ -package metrics - -import ( - "fmt" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/util/workqueue" - - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -const ( - argoNamespace = "argo" - workflowsSubsystem = "workflows" - DefaultMetricsServerPort = 9090 - DefaultMetricsServerPath = "/metrics" -) - -type ServerConfig struct { - Enabled bool - Path string - Port int - TTL time.Duration - IgnoreErrors bool - Secure bool -} - -func (s ServerConfig) SameServerAs(other ServerConfig) bool { - return s.Port == other.Port && s.Path == other.Path && s.Enabled && other.Enabled && s.Secure == other.Secure -} - -type metric struct { - metric prometheus.Metric - lastUpdated time.Time -} - -type Metrics struct { - // Ensures mutual exclusion in workflows map - mutex sync.RWMutex - metricsConfig ServerConfig - telemetryConfig ServerConfig - - workflowsProcessed prometheus.Counter - podsByPhase map[corev1.PodPhase]prometheus.Gauge - workflowsByPhase map[v1alpha1.NodePhase]prometheus.Gauge - workflows map[string][]string - operationDurations prometheus.Histogram - errors map[ErrorCause]prometheus.Counter - customMetrics map[string]metric - workqueueMetrics map[string]prometheus.Metric - workersBusy map[string]prometheus.Gauge - - // Used to quickly check if a metric desc is already used by the system - defaultMetricDescs map[string]bool - metricNameHelps map[string]string - logMetric *prometheus.CounterVec -} - -func (m *Metrics) Levels() []log.Level { - return []log.Level{log.InfoLevel, log.WarnLevel, log.ErrorLevel} -} - -func (m *Metrics) Fire(entry *log.Entry) error { - m.logMetric.WithLabelValues(entry.Level.String()).Inc() - return nil -} - -var _ prometheus.Collector = &Metrics{} - -func New(metricsConfig, telemetryConfig ServerConfig) *Metrics { - metrics := &Metrics{ - metricsConfig: metricsConfig, - telemetryConfig: telemetryConfig, - workflowsProcessed: newCounter("workflows_processed_count", "Number of workflow updates processed", nil), - podsByPhase: getPodPhaseGauges(), - workflowsByPhase: getWorkflowPhaseGauges(), - workflows: make(map[string][]string), - operationDurations: newHistogram("operation_duration_seconds", "Histogram of durations of operations", nil, []float64{0.1, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0}), - errors: getErrorCounters(), - customMetrics: make(map[string]metric), - workqueueMetrics: make(map[string]prometheus.Metric), - workersBusy: make(map[string]prometheus.Gauge), - defaultMetricDescs: make(map[string]bool), - metricNameHelps: make(map[string]string), - logMetric: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "log_messages", - Help: "Total number of log messages.", - }, []string{"level"}), - } - - for _, metric := range metrics.allMetrics() { - metrics.defaultMetricDescs[metric.Desc().String()] = true - } - - for _, level := range metrics.Levels() { - metrics.logMetric.WithLabelValues(level.String()) - } - - log.AddHook(metrics) - - return metrics -} - -func (m *Metrics) allMetrics() []prometheus.Metric { - m.mutex.RLock() - defer m.mutex.RUnlock() - - allMetrics := []prometheus.Metric{ - m.workflowsProcessed, - m.operationDurations, - } - for _, metric := range m.workflowsByPhase { - allMetrics = append(allMetrics, metric) - } - for _, metric := range m.podsByPhase { - allMetrics = append(allMetrics, metric) - } - for _, metric := range m.errors { - allMetrics = append(allMetrics, metric) - } - for _, metric := range m.workqueueMetrics { - allMetrics = append(allMetrics, metric) - } - for _, metric := range m.workersBusy { - allMetrics = append(allMetrics, metric) - } - for _, metric := range m.customMetrics { - allMetrics = append(allMetrics, metric.metric) - } - return allMetrics -} - -func (m *Metrics) StopRealtimeMetricsForKey(key string) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if _, exists := m.workflows[key]; !exists { - return - } - - realtimeMetrics := m.workflows[key] - for _, metric := range realtimeMetrics { - delete(m.customMetrics, metric) - } - - delete(m.workflows, key) -} - -func (m *Metrics) OperationCompleted(durationSeconds float64) { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.operationDurations.Observe(durationSeconds) -} - -func (m *Metrics) GetCustomMetric(key string) prometheus.Metric { - m.mutex.RLock() - defer m.mutex.RUnlock() - - // It's okay to return nil metrics in this function - return m.customMetrics[key].metric -} - -func (m *Metrics) UpsertCustomMetric(key string, ownerKey string, newMetric prometheus.Metric, realtime bool) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - metricDesc := newMetric.Desc().String() - if _, inUse := m.defaultMetricDescs[metricDesc]; inUse { - return fmt.Errorf("metric '%s' is already in use by the system, please use a different name", newMetric.Desc()) - } - name, help := recoverMetricNameAndHelpFromDesc(metricDesc) - if existingHelp, inUse := m.metricNameHelps[name]; inUse && help != existingHelp { - return fmt.Errorf("metric '%s' has help string '%s' but should have '%s' (help strings must be identical for metrics of the same name)", name, help, existingHelp) - } else { - m.metricNameHelps[name] = help - } - m.customMetrics[key] = metric{metric: newMetric, lastUpdated: time.Now()} - - // If this is a realtime metric, track it - if realtime { - m.workflows[ownerKey] = append(m.workflows[ownerKey], key) - } - - return nil -} - -func (m *Metrics) SetWorkflowPhaseGauge(phase v1alpha1.NodePhase, num int) { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.workflowsByPhase[phase].Set(float64(num)) -} - -func (m *Metrics) SetPodPhaseGauge(phase corev1.PodPhase, num int) { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.podsByPhase[phase].Set(float64(num)) -} - -type ErrorCause string - -const ( - ErrorCauseOperationPanic ErrorCause = "OperationPanic" - ErrorCauseCronWorkflowSubmissionError ErrorCause = "CronWorkflowSubmissionError" - ErrorCauseCronWorkflowSpecError ErrorCause = "CronWorkflowSpecError" -) - -func (m *Metrics) OperationPanic() { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.errors[ErrorCauseOperationPanic].Inc() -} - -func (m *Metrics) CronWorkflowSubmissionError() { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.errors[ErrorCauseCronWorkflowSubmissionError].Inc() -} - -func (m *Metrics) CronWorkflowSpecError() { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.errors[ErrorCauseCronWorkflowSpecError].Inc() -} - -// Act as a metrics provider for a workflow queue -var _ workqueue.MetricsProvider = &Metrics{} - -func (m *Metrics) NewDepthMetric(name string) workqueue.GaugeMetric { - m.mutex.Lock() - defer m.mutex.Unlock() - - key := fmt.Sprintf("%s-depth", name) - if _, ok := m.workqueueMetrics[key]; !ok { - m.workqueueMetrics[key] = newGauge("queue_depth_count", "Depth of the queue", map[string]string{"queue_name": name}) - } - return m.workqueueMetrics[key].(prometheus.Gauge) -} - -func (m *Metrics) NewAddsMetric(name string) workqueue.CounterMetric { - m.mutex.Lock() - defer m.mutex.Unlock() - - key := fmt.Sprintf("%s-adds", name) - if _, ok := m.workqueueMetrics[key]; !ok { - m.workqueueMetrics[key] = newCounter("queue_adds_count", "Adds to the queue", map[string]string{"queue_name": name}) - } - return m.workqueueMetrics[key].(prometheus.Counter) -} - -func (m *Metrics) NewLatencyMetric(name string) workqueue.HistogramMetric { - m.mutex.Lock() - defer m.mutex.Unlock() - - key := fmt.Sprintf("%s-latency", name) - if _, ok := m.workqueueMetrics[key]; !ok { - m.workqueueMetrics[key] = newHistogram("queue_latency", "Time objects spend waiting in the queue", map[string]string{"queue_name": name}, []float64{1.0, 5.0, 20.0, 60.0, 180.0}) - } - return m.workqueueMetrics[key].(prometheus.Histogram) -} - -// These metrics are not relevant to be exposed -type noopMetric struct{} - -func (noopMetric) Inc() {} -func (noopMetric) Dec() {} -func (noopMetric) Set(float64) {} -func (noopMetric) Observe(float64) {} - -func (m *Metrics) NewRetriesMetric(name string) workqueue.CounterMetric { return noopMetric{} } -func (m *Metrics) NewWorkDurationMetric(name string) workqueue.HistogramMetric { return noopMetric{} } -func (m *Metrics) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { - return noopMetric{} -} - -func (m *Metrics) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { - return noopMetric{} -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/pod_missing_metric.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/pod_missing_metric.go deleted file mode 100644 index e1243b53c25..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/pod_missing_metric.go +++ /dev/null @@ -1,12 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -var PodMissingMetric = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: argoNamespace, - Name: "pod_missing", - Help: "Incidents of pod missing. https://argoproj.github.io/argo-workflows/metrics/#argo_pod_missing", - }, - []string{"recently_started", "node_phase"}, -) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/server.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/server.go deleted file mode 100644 index 46bcdff8ee0..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/server.go +++ /dev/null @@ -1,134 +0,0 @@ -package metrics - -import ( - "context" - "crypto/tls" - "fmt" - "net/http" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/collectors" - "github.com/prometheus/client_golang/prometheus/promhttp" - log "github.com/sirupsen/logrus" - runtimeutil "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/utils/env" - - tlsutils "github.com/argoproj/argo-workflows/v3/util/tls" -) - -// RunServer starts a metrics server -func (m *Metrics) RunServer(ctx context.Context) { - defer runtimeutil.HandleCrash(runtimeutil.PanicHandlers...) - - if !m.metricsConfig.Enabled { - // If metrics aren't enabled, return - return - } - - metricsRegistry := prometheus.NewRegistry() - metricsRegistry.MustRegister(m) - - if m.metricsConfig.SameServerAs(m.telemetryConfig) { - // If the metrics and telemetry servers are the same, run both of them in the same instance - metricsRegistry.MustRegister(collectors.NewGoCollector()) - } else if m.telemetryConfig.Enabled { - // If the telemetry server is different -- and it's enabled -- run each on its own instance - telemetryRegistry := prometheus.NewRegistry() - telemetryRegistry.MustRegister(collectors.NewGoCollector()) - go runServer(m.telemetryConfig, telemetryRegistry, ctx) - } - - // Run the metrics server - go runServer(m.metricsConfig, metricsRegistry, ctx) - - go m.garbageCollector(ctx) -} - -func runServer(config ServerConfig, registry *prometheus.Registry, ctx context.Context) { - var handlerOpts promhttp.HandlerOpts - if config.IgnoreErrors { - handlerOpts.ErrorHandling = promhttp.ContinueOnError - } - - mux := http.NewServeMux() - mux.Handle(config.Path, promhttp.HandlerFor(registry, handlerOpts)) - srv := &http.Server{Addr: fmt.Sprintf(":%v", config.Port), Handler: mux} - - if config.Secure { - tlsMinVersion, err := env.GetInt("TLS_MIN_VERSION", tls.VersionTLS12) - if err != nil { - panic(err) - } - log.Infof("Generating Self Signed TLS Certificates for Telemetry Servers") - tlsConfig, err := tlsutils.GenerateX509KeyPairTLSConfig(uint16(tlsMinVersion)) - if err != nil { - panic(err) - } - srv.TLSConfig = tlsConfig - go func() { - log.Infof("Starting prometheus metrics server at localhost:%v%s", config.Port, config.Path) - if err := srv.ListenAndServeTLS("", ""); err != nil { - panic(err) - } - }() - } else { - go func() { - log.Infof("Starting prometheus metrics server at localhost:%v%s", config.Port, config.Path) - if err := srv.ListenAndServe(); err != nil { - panic(err) - } - }() - } - - // Waiting for stop signal - <-ctx.Done() - - // Shutdown the server gracefully with a 1 second timeout - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - if err := srv.Shutdown(ctx); err != nil { - log.Infof("Unable to shutdown metrics server at localhost:%v%s", config.Port, config.Path) - } -} - -func (m *Metrics) Describe(ch chan<- *prometheus.Desc) { - for _, metric := range m.allMetrics() { - ch <- metric.Desc() - } - m.logMetric.Describe(ch) - K8sRequestTotalMetric.Describe(ch) - PodMissingMetric.Describe(ch) - WorkflowConditionMetric.Describe(ch) -} - -func (m *Metrics) Collect(ch chan<- prometheus.Metric) { - for _, metric := range m.allMetrics() { - ch <- metric - } - m.logMetric.Collect(ch) - K8sRequestTotalMetric.Collect(ch) - PodMissingMetric.Collect(ch) - WorkflowConditionMetric.Collect(ch) -} - -func (m *Metrics) garbageCollector(ctx context.Context) { - if m.metricsConfig.TTL == 0 { - return - } - - ticker := time.NewTicker(m.metricsConfig.TTL) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - for key, metric := range m.customMetrics { - if time.Since(metric.lastUpdated) > m.metricsConfig.TTL { - delete(m.customMetrics, key) - } - } - } - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/util.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/util.go deleted file mode 100644 index b4238d1127d..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/util.go +++ /dev/null @@ -1,270 +0,0 @@ -package metrics - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/common/model" - v1 "k8s.io/api/core/v1" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -var ( - invalidMetricNameError = "metric name is invalid: names may only contain alphanumeric characters, '_', or ':'" - invalidMetricLabelrror = "metric label '%s' is invalid: keys may only contain alphanumeric characters, '_', or ':'" - descRegex = regexp.MustCompile(fmt.Sprintf(`Desc{fqName: "%s_%s_(.+?)", help: "(.+?)", constLabels: {`, argoNamespace, workflowsSubsystem)) -) - -type RealTimeMetric struct { - Func func() float64 -} - -func ConstructOrUpdateMetric(metric prometheus.Metric, metricSpec *wfv1.Prometheus) (prometheus.Metric, error) { - if !IsValidMetricName(metricSpec.Name) { - return nil, fmt.Errorf(invalidMetricNameError) - } - - switch metricSpec.GetMetricType() { - case wfv1.MetricTypeGauge: - return constructOrUpdateGaugeMetric(metric, metricSpec) - case wfv1.MetricTypeHistogram: - return constructOrUpdateHistogramMetric(metric, metricSpec) - case wfv1.MetricTypeCounter: - return constructOrUpdateCounterMetric(metric, metricSpec) - default: - return nil, fmt.Errorf("invalid metric spec") - } -} - -func ConstructRealTimeGaugeMetric(metricSpec *wfv1.Prometheus, valueFunc func() float64) (prometheus.Metric, error) { - if !IsValidMetricName(metricSpec.Name) { - return nil, fmt.Errorf(invalidMetricNameError) - } - - gaugeOpts := prometheus.GaugeOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: metricSpec.Name, - Help: metricSpec.Help, - ConstLabels: metricSpec.GetMetricLabels(), - } - - return prometheus.NewGaugeFunc(gaugeOpts, valueFunc), nil -} - -func constructOrUpdateCounterMetric(metric prometheus.Metric, metricSpec *wfv1.Prometheus) (prometheus.Metric, error) { - if metric == nil { - labels := metricSpec.GetMetricLabels() - if err := ValidateMetricLabels(labels); err != nil { - return nil, err - } - metric = newCounter(metricSpec.Name, metricSpec.Help, labels) - } - - val, err := strconv.ParseFloat(metricSpec.Counter.Value, 64) - if err != nil { - return nil, err - } - - counter := metric.(prometheus.Counter) - counter.Add(val) - return counter, nil -} - -func constructOrUpdateGaugeMetric(metric prometheus.Metric, metricSpec *wfv1.Prometheus) (prometheus.Metric, error) { - if metric == nil { - labels := metricSpec.GetMetricLabels() - if err := ValidateMetricLabels(labels); err != nil { - return nil, err - } - metric = newGauge(metricSpec.Name, metricSpec.Help, labels) - } - - val, err := strconv.ParseFloat(metricSpec.Gauge.Value, 64) - if err != nil { - return nil, err - } - - gauge := metric.(prometheus.Gauge) - gauge.Set(val) - return gauge, nil -} - -func constructOrUpdateHistogramMetric(metric prometheus.Metric, metricSpec *wfv1.Prometheus) (prometheus.Metric, error) { - if metric == nil { - labels := metricSpec.GetMetricLabels() - if err := ValidateMetricLabels(labels); err != nil { - return nil, err - } - metric = newHistogram(metricSpec.Name, metricSpec.Help, labels, metricSpec.Histogram.GetBuckets()) - } - - val, err := strconv.ParseFloat(metricSpec.Histogram.Value, 64) - if err != nil { - return nil, err - } - - hist := metric.(prometheus.Histogram) - hist.Observe(val) - return hist, nil -} - -func newCounter(name, help string, labels map[string]string) prometheus.Counter { - counterOpts := prometheus.CounterOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: name, - Help: help, - ConstLabels: labels, - } - m := prometheus.NewCounter(counterOpts) - mustBeRecoverable(name, help, m) - return m -} - -func newGauge(name, help string, labels map[string]string) prometheus.Gauge { - gaugeOpts := prometheus.GaugeOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: name, - Help: help, - ConstLabels: labels, - } - m := prometheus.NewGauge(gaugeOpts) - mustBeRecoverable(name, help, m) - return m -} - -func newHistogram(name, help string, labels map[string]string, buckets []float64) prometheus.Histogram { - histOpts := prometheus.HistogramOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: name, - Help: help, - ConstLabels: labels, - Buckets: buckets, - } - m := prometheus.NewHistogram(histOpts) - mustBeRecoverable(name, help, m) - return m -} - -func getWorkflowPhaseGauges() map[wfv1.NodePhase]prometheus.Gauge { - getOptsByPhase := func(phase wfv1.NodePhase) prometheus.GaugeOpts { - return prometheus.GaugeOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "count", - Help: "Number of Workflows currently accessible by the controller by status (refreshed every 15s)", - ConstLabels: map[string]string{"status": string(phase)}, - } - } - return map[wfv1.NodePhase]prometheus.Gauge{ - wfv1.NodePending: prometheus.NewGauge(getOptsByPhase(wfv1.NodePending)), - wfv1.NodeRunning: prometheus.NewGauge(getOptsByPhase(wfv1.NodeRunning)), - wfv1.NodeSucceeded: prometheus.NewGauge(getOptsByPhase(wfv1.NodeSucceeded)), - wfv1.NodeFailed: prometheus.NewGauge(getOptsByPhase(wfv1.NodeFailed)), - wfv1.NodeError: prometheus.NewGauge(getOptsByPhase(wfv1.NodeError)), - } -} - -func getPodPhaseGauges() map[v1.PodPhase]prometheus.Gauge { - getOptsByPhase := func(phase v1.PodPhase) prometheus.GaugeOpts { - return prometheus.GaugeOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "pods_count", - Help: "Number of Pods from Workflows currently accessible by the controller by status (refreshed every 15s)", - ConstLabels: map[string]string{"status": string(phase)}, - } - } - return map[v1.PodPhase]prometheus.Gauge{ - v1.PodPending: prometheus.NewGauge(getOptsByPhase(v1.PodPending)), - v1.PodRunning: prometheus.NewGauge(getOptsByPhase(v1.PodRunning)), - // v1.PodSucceeded: prometheus.NewGauge(getOptsByPhase(v1.PodSucceeded)), - // v1.PodFailed: prometheus.NewGauge(getOptsByPhase(v1.PodFailed)), - } -} - -func getErrorCounters() map[ErrorCause]prometheus.Counter { - getOptsByPahse := func(phase ErrorCause) prometheus.CounterOpts { - return prometheus.CounterOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "error_count", - Help: "Number of errors encountered by the controller by cause", - ConstLabels: map[string]string{"cause": string(phase)}, - } - } - return map[ErrorCause]prometheus.Counter{ - ErrorCauseOperationPanic: prometheus.NewCounter(getOptsByPahse(ErrorCauseOperationPanic)), - ErrorCauseCronWorkflowSubmissionError: prometheus.NewCounter(getOptsByPahse(ErrorCauseCronWorkflowSubmissionError)), - ErrorCauseCronWorkflowSpecError: prometheus.NewCounter(getOptsByPahse(ErrorCauseCronWorkflowSpecError)), - } -} - -func getWorkersBusy(name string) prometheus.Gauge { - return prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "workers_busy_count", - Help: "Number of workers currently busy", - ConstLabels: map[string]string{"worker_type": name}, - }) -} - -func IsValidMetricName(name string) bool { - return model.IsValidMetricName(model.LabelValue(name)) -} - -func ValidateMetricValues(metric *wfv1.Prometheus) error { - if metric.Gauge != nil { - if metric.Gauge.Value == "" { - return errors.New("missing gauge.value") - } - if metric.Gauge.Realtime != nil && *metric.Gauge.Realtime { - if strings.Contains(metric.Gauge.Value, "resourcesDuration.") { - return errors.New("'resourcesDuration.*' metrics cannot be used in real-time") - } - } - } - if metric.Counter != nil && metric.Counter.Value == "" { - return errors.New("missing counter.value") - } - if metric.Histogram != nil && metric.Histogram.Value == "" { - return errors.New("missing histogram.value") - } - return nil -} - -func ValidateMetricLabels(metrics map[string]string) error { - for name := range metrics { - if !IsValidMetricName(name) { - return fmt.Errorf(invalidMetricLabelrror, name) - } - } - return nil -} - -func mustBeRecoverable(name, help string, metric prometheus.Metric) { - recoveredName, recoveredHelp := recoverMetricNameAndHelpFromDesc(metric.Desc().String()) - if name != recoveredName { - panic(fmt.Sprintf("unable to recover metric name from desc provided by prometheus: expected '%s' got '%s'", name, recoveredName)) - } - if help != recoveredHelp { - panic(fmt.Sprintf("unable to recover metric help from desc provided by prometheus: expected '%s' got '%s'", help, recoveredHelp)) - } -} - -func recoverMetricNameAndHelpFromDesc(desc string) (string, string) { - finds := descRegex.FindStringSubmatch(desc) - if len(finds) != 3 { - panic(fmt.Sprintf("malformed desc provided by prometheus: '%s' parsed to %v", desc, finds)) - } - return finds[1], strings.ReplaceAll(finds[2], `\"`, `"`) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/work_queue.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/work_queue.go deleted file mode 100644 index 0cc83354bfb..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/work_queue.go +++ /dev/null @@ -1,50 +0,0 @@ -package metrics - -import "k8s.io/client-go/util/workqueue" - -type workersBusyRateLimiterWorkQueue struct { - workqueue.RateLimitingInterface - workerType string - metrics *Metrics -} - -func (m *Metrics) RateLimiterWithBusyWorkers(workQueue workqueue.RateLimiter, queueName string) workqueue.RateLimitingInterface { - m.newWorker(queueName) - return workersBusyRateLimiterWorkQueue{ - RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(workQueue, queueName), - workerType: queueName, - metrics: m, - } -} - -func (m *Metrics) newWorker(workerType string) { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.workersBusy[workerType] = getWorkersBusy(workerType) -} - -func (m *Metrics) workerBusy(workerType string) { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.workersBusy[workerType].Inc() -} - -func (m *Metrics) workerFree(workerType string) { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.workersBusy[workerType].Dec() -} - -func (w workersBusyRateLimiterWorkQueue) Get() (interface{}, bool) { - item, shutdown := w.RateLimitingInterface.Get() - w.metrics.workerBusy(w.workerType) - return item, shutdown -} - -func (w workersBusyRateLimiterWorkQueue) Done(item interface{}) { - w.RateLimitingInterface.Done(item) - w.metrics.workerFree(w.workerType) -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/workflow_condition_metric.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/workflow_condition_metric.go deleted file mode 100644 index cd1462d80a1..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/metrics/workflow_condition_metric.go +++ /dev/null @@ -1,15 +0,0 @@ -package metrics - -import ( - "github.com/prometheus/client_golang/prometheus" -) - -var WorkflowConditionMetric = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: argoNamespace, - Subsystem: workflowsSubsystem, - Name: "workflow_condition", - Help: "Workflow condition. https://argoproj.github.io/argo-workflows/metrics/#argo_workflows_workflow_condition", - }, - []string{"type", "status"}, -) diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/packer/packer.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/packer/packer.go deleted file mode 100644 index a140b8682ce..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/packer/packer.go +++ /dev/null @@ -1,98 +0,0 @@ -package packer - -import ( - "encoding/json" - "fmt" - "os" - "strconv" - "strings" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/util/file" -) - -const envVarName = "MAX_WORKFLOW_SIZE" - -func getMaxWorkflowSize() int { - s, _ := strconv.Atoi(os.Getenv(envVarName)) - if s == 0 { - s = 1024 * 1024 - } - return s -} - -func SetMaxWorkflowSize(s int) func() { - _ = os.Setenv(envVarName, strconv.Itoa(s)) - return func() { _ = os.Unsetenv(envVarName) } -} - -func DecompressWorkflow(wf *wfv1.Workflow) error { - if len(wf.Status.Nodes) == 0 && wf.Status.CompressedNodes != "" { - nodeContent, err := file.DecodeDecompressString(wf.Status.CompressedNodes) - if err != nil { - return err - } - err = json.Unmarshal([]byte(nodeContent), &wf.Status.Nodes) - wf.Status.CompressedNodes = "" - return err - } - return nil -} - -// getSize return the entire workflow json string size -func getSize(wf *wfv1.Workflow) (int, error) { - nodeContent, err := json.Marshal(wf) - if err != nil { - return 0, err - } - return len(nodeContent), nil -} - -func IsLargeWorkflow(wf *wfv1.Workflow) (bool, error) { - size, err := getSize(wf) - return size > getMaxWorkflowSize(), err -} - -const tooLarge = "workflow is longer than maximum allowed size." - -func IsTooLargeError(err error) bool { - return err != nil && strings.HasPrefix(err.Error(), tooLarge) -} - -func CompressWorkflowIfNeeded(wf *wfv1.Workflow) error { - large, err := IsLargeWorkflow(wf) - if err != nil { - return err - } - if !large { - return nil - } - return compressWorkflow(wf) -} - -func compressWorkflow(wf *wfv1.Workflow) error { - nodes := wf.Status.Nodes - nodeContent, err := json.Marshal(nodes) - if err != nil { - return err - } - wf.Status.CompressedNodes = file.CompressEncodeString(string(nodeContent)) - wf.Status.Nodes = nil - // still too large? - large, err := IsLargeWorkflow(wf) - if err != nil { - wf.Status.CompressedNodes = "" - wf.Status.Nodes = nodes - return err - } - if large { - compressedSize, err := getSize(wf) - wf.Status.CompressedNodes = "" - wf.Status.Nodes = nodes - if err != nil { - return err - } - return fmt.Errorf("%s compressed size %d > maxSize %d", tooLarge, compressedSize, getMaxWorkflowSize()) - } - return nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/templateresolution/context.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/templateresolution/context.go deleted file mode 100644 index 764984b9626..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/templateresolution/context.go +++ /dev/null @@ -1,275 +0,0 @@ -package templateresolution - -import ( - "context" - "fmt" - - log "github.com/sirupsen/logrus" - apierr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/argoproj/argo-workflows/v3/errors" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - typed "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/workflow/common" -) - -// maxResolveDepth is the limit of template reference resolution. -const maxResolveDepth int = 10 - -// workflowTemplateInterfaceWrapper is an internal struct to wrap clientset. -type workflowTemplateInterfaceWrapper struct { - clientset typed.WorkflowTemplateInterface -} - -func WrapWorkflowTemplateInterface(clientset typed.WorkflowTemplateInterface) WorkflowTemplateNamespacedGetter { - return &workflowTemplateInterfaceWrapper{clientset: clientset} -} - -// Get retrieves the WorkflowTemplate of a given name. -func (wrapper *workflowTemplateInterfaceWrapper) Get(name string) (*wfv1.WorkflowTemplate, error) { - ctx := context.TODO() - return wrapper.clientset.Get(ctx, name, metav1.GetOptions{}) -} - -// WorkflowTemplateNamespaceLister helps get WorkflowTemplates. -type WorkflowTemplateNamespacedGetter interface { - // Get retrieves the WorkflowTemplate from the indexer for a given name. - Get(name string) (*wfv1.WorkflowTemplate, error) -} - -// clusterWorkflowTemplateInterfaceWrapper is an internal struct to wrap clientset. -type clusterWorkflowTemplateInterfaceWrapper struct { - clientset typed.ClusterWorkflowTemplateInterface -} - -// WorkflowTemplateNamespaceLister helps get WorkflowTemplates. -type ClusterWorkflowTemplateGetter interface { - // Get retrieves the WorkflowTemplate from the indexer for a given name. - Get(name string) (*wfv1.ClusterWorkflowTemplate, error) -} - -func WrapClusterWorkflowTemplateInterface(clusterClientset typed.ClusterWorkflowTemplateInterface) ClusterWorkflowTemplateGetter { - return &clusterWorkflowTemplateInterfaceWrapper{clientset: clusterClientset} -} - -type NullClusterWorkflowTemplateGetter struct{} - -func (n *NullClusterWorkflowTemplateGetter) Get(name string) (*wfv1.ClusterWorkflowTemplate, error) { - return nil, errors.Errorf("", "invalid spec: clusterworkflowtemplates.argoproj.io `%s` is "+ - "forbidden: User cannot get resource 'clusterworkflowtemplates' in API group argoproj.io at the cluster scope", name) -} - -// Get retrieves the WorkflowTemplate of a given name. -func (wrapper *clusterWorkflowTemplateInterfaceWrapper) Get(name string) (*wfv1.ClusterWorkflowTemplate, error) { - ctx := context.TODO() - return wrapper.clientset.Get(ctx, name, metav1.GetOptions{}) -} - -// Context is a context of template search. -type Context struct { - // wftmplGetter is an interface to get WorkflowTemplates. - wftmplGetter WorkflowTemplateNamespacedGetter - // cwftmplGetter is an interface to get ClusterWorkflowTemplates - cwftmplGetter ClusterWorkflowTemplateGetter - // tmplBase is the base of local template search. - tmplBase wfv1.TemplateHolder - // workflow is the Workflow where templates will be stored - workflow *wfv1.Workflow - // log is a logrus entry. - log *log.Entry -} - -// NewContext returns new Context. -func NewContext(wftmplGetter WorkflowTemplateNamespacedGetter, cwftmplGetter ClusterWorkflowTemplateGetter, tmplBase wfv1.TemplateHolder, workflow *wfv1.Workflow) *Context { - return &Context{ - wftmplGetter: wftmplGetter, - cwftmplGetter: cwftmplGetter, - tmplBase: tmplBase, - workflow: workflow, - log: log.WithFields(log.Fields{}), - } -} - -// NewContext returns new Context. -func NewContextFromClientset(wftmplClientset typed.WorkflowTemplateInterface, clusterWftmplClient typed.ClusterWorkflowTemplateInterface, tmplBase wfv1.TemplateHolder, workflow *wfv1.Workflow) *Context { - return &Context{ - wftmplGetter: WrapWorkflowTemplateInterface(wftmplClientset), - cwftmplGetter: WrapClusterWorkflowTemplateInterface(clusterWftmplClient), - tmplBase: tmplBase, - workflow: workflow, - log: log.WithFields(log.Fields{}), - } -} - -// GetTemplateByName returns a template by name in the context. -func (ctx *Context) GetTemplateByName(name string) (*wfv1.Template, error) { - ctx.log.Debug("Getting the template by name") - - tmpl := ctx.tmplBase.GetTemplateByName(name) - if tmpl == nil { - return nil, errors.Errorf(errors.CodeNotFound, "template %s not found", name) - } - return tmpl.DeepCopy(), nil -} - -func (ctx *Context) GetTemplateGetterFromRef(tmplRef *wfv1.TemplateRef) (wfv1.TemplateHolder, error) { - if tmplRef.ClusterScope { - return ctx.cwftmplGetter.Get(tmplRef.Name) - } - return ctx.wftmplGetter.Get(tmplRef.Name) -} - -// GetTemplateFromRef returns a template found by a given template ref. -func (ctx *Context) GetTemplateFromRef(tmplRef *wfv1.TemplateRef) (*wfv1.Template, error) { - ctx.log.Debug("Getting the template from ref") - var template *wfv1.Template - var wftmpl wfv1.TemplateHolder - var err error - if tmplRef.ClusterScope { - wftmpl, err = ctx.cwftmplGetter.Get(tmplRef.Name) - } else { - wftmpl, err = ctx.wftmplGetter.Get(tmplRef.Name) - } - - if err != nil { - if apierr.IsNotFound(err) { - return nil, errors.Errorf(errors.CodeNotFound, "workflow template %s not found", tmplRef.Name) - } - return nil, err - } - - template = wftmpl.GetTemplateByName(tmplRef.Template) - - if template == nil { - return nil, errors.Errorf(errors.CodeNotFound, "template %s not found in workflow template %s", tmplRef.Template, tmplRef.Name) - } - return template.DeepCopy(), nil -} - -// GetTemplate returns a template found by template name or template ref. -func (ctx *Context) GetTemplate(h wfv1.TemplateReferenceHolder) (*wfv1.Template, error) { - ctx.log.Debug("Getting the template") - if x := h.GetTemplate(); x != nil { - return x, nil - } else if x := h.GetTemplateRef(); x != nil { - return ctx.GetTemplateFromRef(x) - } else if x := h.GetTemplateName(); x != "" { - return ctx.GetTemplateByName(x) - } - return nil, errors.Errorf(errors.CodeInternal, "failed to get a template") -} - -// GetCurrentTemplateBase returns the current template base of the context. -func (ctx *Context) GetCurrentTemplateBase() wfv1.TemplateHolder { - return ctx.tmplBase -} - -func (ctx *Context) GetTemplateScope() string { - return string(ctx.tmplBase.GetResourceScope()) + "/" + ctx.tmplBase.GetName() -} - -// ResolveTemplate digs into referenes and returns a merged template. -// This method is the public start point of template resolution. -func (ctx *Context) ResolveTemplate(tmplHolder wfv1.TemplateReferenceHolder) (*Context, *wfv1.Template, bool, error) { - return ctx.resolveTemplateImpl(tmplHolder, 0) -} - -// resolveTemplateImpl digs into references and returns a merged template. -// This method processes inputs and arguments so the inputs of the final -// resolved template include intermediate parameter passing. -// The other fields are just merged and shallower templates overwrite deeper. -func (ctx *Context) resolveTemplateImpl(tmplHolder wfv1.TemplateReferenceHolder, depth int) (*Context, *wfv1.Template, bool, error) { - ctx.log = ctx.log.WithFields(log.Fields{ - "depth": depth, - "base": common.GetTemplateGetterString(ctx.tmplBase), - "tmpl": common.GetTemplateHolderString(tmplHolder), - }) - // Avoid infinite references - if depth > maxResolveDepth { - return nil, nil, false, errors.Errorf(errors.CodeBadRequest, "template reference exceeded max depth (%d)", maxResolveDepth) - } - - ctx.log.Debug("Resolving the template") - - templateStored := false - var tmpl *wfv1.Template - if ctx.workflow != nil { - // Check if the template has been stored. - scope := ctx.tmplBase.GetResourceScope() - resourceName := ctx.tmplBase.GetName() - tmpl = ctx.workflow.GetStoredTemplate(scope, resourceName, tmplHolder) - } - if tmpl != nil { - ctx.log.Debug("Found stored template") - } else { - // Find newly appeared template. - newTmpl, err := ctx.GetTemplate(tmplHolder) - if err != nil { - return nil, nil, false, err - } - // Stored the found template. - if ctx.workflow != nil { - scope := ctx.tmplBase.GetResourceScope() - resourceName := ctx.tmplBase.GetName() - stored, err := ctx.workflow.SetStoredTemplate(scope, resourceName, tmplHolder, newTmpl) - if err != nil { - return nil, nil, false, err - } - if stored { - ctx.log.Debug("Stored the template") - templateStored = true - } - } - tmpl = newTmpl - } - - // Update the template base of the context. - newTmplCtx, err := ctx.WithTemplateHolder(tmplHolder) - if err != nil { - return nil, nil, false, err - } - - if tmpl.GetType() == wfv1.TemplateTypeUnknown { - return nil, nil, false, fmt.Errorf("template '%s' type is unknown", tmpl.Name) - } - - return newTmplCtx, tmpl, templateStored, nil -} - -// WithTemplateHolder creates new context with a template base of a given template holder. -func (ctx *Context) WithTemplateHolder(tmplHolder wfv1.TemplateReferenceHolder) (*Context, error) { - tmplRef := tmplHolder.GetTemplateRef() - if tmplRef != nil { - tmplName := tmplRef.Name - if tmplRef.ClusterScope { - return ctx.WithClusterWorkflowTemplate(tmplName) - } else { - return ctx.WithWorkflowTemplate(tmplName) - } - } - return ctx.WithTemplateBase(ctx.tmplBase), nil -} - -// WithTemplateBase creates new context with a wfv1.TemplateHolder. -func (ctx *Context) WithTemplateBase(tmplBase wfv1.TemplateHolder) *Context { - return NewContext(ctx.wftmplGetter, ctx.cwftmplGetter, tmplBase, ctx.workflow) -} - -// WithWorkflowTemplate creates new context with a wfv1.TemplateHolder. -func (ctx *Context) WithWorkflowTemplate(name string) (*Context, error) { - wftmpl, err := ctx.wftmplGetter.Get(name) - if err != nil { - return nil, err - } - return ctx.WithTemplateBase(wftmpl), nil -} - -// WithWorkflowTemplate creates new context with a wfv1.TemplateHolder. -func (ctx *Context) WithClusterWorkflowTemplate(name string) (*Context, error) { - cwftmpl, err := ctx.cwftmplGetter.Get(name) - if err != nil { - return nil, err - } - return ctx.WithTemplateBase(cwftmpl), nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/merge.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/merge.go deleted file mode 100644 index 976025c8911..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/merge.go +++ /dev/null @@ -1,104 +0,0 @@ -package util - -import ( - "encoding/json" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/strategicpatch" - - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" -) - -// MergeTo will merge one workflow (the "patch" workflow) into another (the "target" workflow. -// If the target workflow defines a field, this take precedence over the patch. -func MergeTo(patch, target *wfv1.Workflow) error { - if target == nil || patch == nil { - return nil - } - - patchWfBytes, err := json.Marshal(patch) - if err != nil { - return err - } - - targetWfByte, err := json.Marshal(target) - if err != nil { - return err - } - var mergedWfByte []byte - - mergedWfByte, err = strategicpatch.StrategicMergePatch(patchWfBytes, targetWfByte, wfv1.Workflow{}) - - if err != nil { - return err - } - err = json.Unmarshal(mergedWfByte, target) - if err != nil { - return err - } - return nil -} - -// mergeMap will merge all element from right map to left map if it is not present in left. -func mergeMap(from, to map[string]string) { - for key, val := range from { - if _, ok := to[key]; !ok { - to[key] = val - } - } -} - -// JoinWorkflowMetaData will join the workflow metadata with the following order of preference -// 1. Workflow, 2 WorkflowTemplate (WorkflowTemplateRef), 3. WorkflowDefault. -func JoinWorkflowMetaData(wfMetaData, wfDefaultMetaData *metav1.ObjectMeta) { - if wfDefaultMetaData != nil { - mergeMetaDataTo(wfDefaultMetaData, wfMetaData) - } -} - -// JoinWorkflowSpec will join the workflow specs with the following order of preference -// 1. Workflow Spec, 2 WorkflowTemplate Spec (WorkflowTemplateRef), 3. WorkflowDefault Spec. -func JoinWorkflowSpec(wfSpec, wftSpec, wfDefaultSpec *wfv1.WorkflowSpec) (*wfv1.Workflow, error) { - if wfSpec == nil { - return nil, nil - } - targetWf := wfv1.Workflow{Spec: *wfSpec.DeepCopy()} - if wftSpec != nil { - err := MergeTo(&wfv1.Workflow{Spec: *wftSpec.DeepCopy()}, &targetWf) - if err != nil { - return nil, err - } - } - if wfDefaultSpec != nil { - err := MergeTo(&wfv1.Workflow{Spec: *wfDefaultSpec.DeepCopy()}, &targetWf) - if err != nil { - return nil, err - } - } - - // This condition will update the workflow Spec suspend value if merged value is different. - // This scenario will happen when Workflow with WorkflowTemplateRef has suspend template - if wfSpec.Suspend != targetWf.Spec.Suspend { - targetWf.Spec.Suspend = wfSpec.Suspend - } - return &targetWf, nil -} - -// mergeMetadata will merge the labels and annotations into the target metadata. -func mergeMetaDataTo(from, to *metav1.ObjectMeta) { - if from == nil { - return - } - if from.Labels != nil { - if to.Labels == nil { - to.Labels = make(map[string]string) - } - mergeMap(from.Labels, to.Labels) - } - if from.Annotations != nil { - if to.Annotations == nil { - to.Annotations = make(map[string]string) - } - mergeMap(from.Annotations, to.Annotations) - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/pod_name.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/pod_name.go deleted file mode 100644 index 2fcca6647c6..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/pod_name.go +++ /dev/null @@ -1,92 +0,0 @@ -package util - -import ( - "fmt" - "hash/fnv" - "os" - - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/workflow/common" -) - -const ( - maxK8sResourceNameLength = 253 - k8sNamingHashLength = 10 -) - -// PodNameVersion stores which type of pod names should be used. -// v1 represents the node id. -// v2 is the combination of a node id and template name. -type PodNameVersion string - -const ( - // PodNameV1 is the v1 name that uses node ids for pod names - PodNameV1 PodNameVersion = "v1" - // PodNameV2 is the v2 name that uses node id combined with - // the template name - PodNameV2 PodNameVersion = "v2" - DefaultPodNameVersion PodNameVersion = PodNameV2 -) - -// String stringifies the pod name version -func (v PodNameVersion) String() string { - return string(v) -} - -// GetPodNameVersion returns the pod name version to be used -func GetPodNameVersion() PodNameVersion { - switch os.Getenv("POD_NAMES") { - case "v2": - return PodNameV2 - case "v1": - return PodNameV1 - default: - return DefaultPodNameVersion - } -} - -// PodName return a deterministic pod name -func PodName(workflowName, nodeName, templateName, nodeID string, version PodNameVersion) string { - if version == PodNameV1 { - return nodeID - } - - if workflowName == nodeName { - return workflowName - } - - prefix := fmt.Sprintf("%s-%s", workflowName, templateName) - prefix = ensurePodNamePrefixLength(prefix) - - h := fnv.New32a() - _, _ = h.Write([]byte(nodeName)) - - return fmt.Sprintf("%s-%v", prefix, h.Sum32()) - -} - -func ensurePodNamePrefixLength(prefix string) string { - maxPrefixLength := maxK8sResourceNameLength - k8sNamingHashLength - - if len(prefix) > maxPrefixLength-1 { - return prefix[0 : maxPrefixLength-1] - } - - return prefix -} - -// GetWorkflowPodNameVersion gets the pod name version from the annotation of a -// given workflow -func GetWorkflowPodNameVersion(wf *v1alpha1.Workflow) PodNameVersion { - annotations := wf.GetAnnotations() - version := annotations[common.AnnotationKeyPodNameVersion] - - switch version { - case PodNameV1.String(): - return PodNameV1 - case PodNameV2.String(): - return PodNameV2 - default: - return DefaultPodNameVersion - } -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/util.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/util.go index 739b786136b..392ea2b7fce 100644 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/util.go +++ b/vendor/github.com/argoproj/argo-workflows/v3/workflow/util/util.go @@ -1,1069 +1,32 @@ package util import ( - "bufio" "context" "encoding/json" "fmt" - "io/ioutil" - "math/rand" - "net/http" - "os" - "path/filepath" - "regexp" - nruntime "runtime" - "strconv" - "strings" - "time" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - apiv1 "k8s.io/api/core/v1" - apierr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/informers/internalinterfaces" - "k8s.io/client-go/tools/cache" - "k8s.io/utils/pointer" - "sigs.k8s.io/yaml" - "github.com/argoproj/argo-workflows/v3/errors" - "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow" wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - wfclientset "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned" "github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1" - cmdutil "github.com/argoproj/argo-workflows/v3/util/cmd" errorsutil "github.com/argoproj/argo-workflows/v3/util/errors" "github.com/argoproj/argo-workflows/v3/util/retry" - unstructutil "github.com/argoproj/argo-workflows/v3/util/unstructured" waitutil "github.com/argoproj/argo-workflows/v3/util/wait" - "github.com/argoproj/argo-workflows/v3/workflow/common" - "github.com/argoproj/argo-workflows/v3/workflow/hydrator" - "github.com/argoproj/argo-workflows/v3/workflow/packer" - "github.com/argoproj/argo-workflows/v3/workflow/templateresolution" - "github.com/argoproj/argo-workflows/v3/workflow/validate" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" ) -// NewWorkflowInformer returns the workflow informer used by the controller. This is actually -// a custom built UnstructuredInformer which is in actuality returning unstructured.Unstructured -// objects. We no longer return WorkflowInformer due to: -// https://github.com/kubernetes/kubernetes/issues/57705 -// https://github.com/argoproj/argo-workflows/issues/632 -func NewWorkflowInformer(dclient dynamic.Interface, ns string, resyncPeriod time.Duration, tweakListOptions internalinterfaces.TweakListOptionsFunc, indexers cache.Indexers) cache.SharedIndexInformer { - resource := schema.GroupVersionResource{ - Group: workflow.Group, - Version: "v1alpha1", - Resource: workflow.WorkflowPlural, - } - informer := unstructutil.NewFilteredUnstructuredInformer( - resource, - dclient, - ns, - resyncPeriod, - indexers, - tweakListOptions, - ) - return informer -} - -// InstanceIDRequirement returns the label requirement to filter against a controller instance (or not) -func InstanceIDRequirement(instanceID string) labels.Requirement { - var instanceIDReq *labels.Requirement - var err error - if instanceID != "" { - instanceIDReq, err = labels.NewRequirement(common.LabelKeyControllerInstanceID, selection.Equals, []string{instanceID}) - } else { - instanceIDReq, err = labels.NewRequirement(common.LabelKeyControllerInstanceID, selection.DoesNotExist, nil) - } - if err != nil { - panic(err) - } - return *instanceIDReq -} - -// WorkflowLister implements the List() method of v1alpha.WorkflowLister interface but does so using -// an Unstructured informer and converting objects to workflows. Ignores objects that failed to convert. -type WorkflowLister interface { - List() ([]*wfv1.Workflow, error) -} - -type workflowLister struct { - informer cache.SharedIndexInformer -} - -func (l *workflowLister) List() ([]*wfv1.Workflow, error) { - workflows := make([]*wfv1.Workflow, 0) - for _, m := range l.informer.GetStore().List() { - wf, err := FromUnstructured(m.(*unstructured.Unstructured)) - if err != nil { - log.Warnf("Failed to unmarshal workflow %v object: %v", m, err) - continue - } - workflows = append(workflows, wf) - } - return workflows, nil -} - -// NewWorkflowLister returns a new workflow lister -func NewWorkflowLister(informer cache.SharedIndexInformer) WorkflowLister { - return &workflowLister{ - informer: informer, - } -} - -// FromUnstructured converts an unstructured object to a workflow. -// This function performs a lot of allocations and con resulting in a lot of memory -// being used. Users should avoid invoking this function if the data they need is -// available from `unstructured.Unstructured`. especially if they're looping. -// Available values include: `GetLabels()`, `GetName()`, `GetNamespace()` etc. -// Single values can be accessed using `unstructured.Nested*`, e.g. -// `unstructured.NestedString(un.Object, "spec", "phase")`. -func FromUnstructured(un *unstructured.Unstructured) (*wfv1.Workflow, error) { - var wf wfv1.Workflow - err := FromUnstructuredObj(un, &wf) - return &wf, err -} - -func FromUnstructuredObj(un *unstructured.Unstructured, v interface{}) error { - err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, v) - if err != nil { - if err.Error() == "cannot convert int64 to v1alpha1.AnyString" { - data, err := json.Marshal(un) - if err != nil { - return err - } - return json.Unmarshal(data, v) - } - return err - } - return nil -} - -// ToUnstructured converts an workflow to an Unstructured object -func ToUnstructured(wf *wfv1.Workflow) (*unstructured.Unstructured, error) { - obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(wf) - if err != nil { - return nil, err - } - un := &unstructured.Unstructured{Object: obj} - // we need to add these values so that the `EventRecorder` does not error - un.SetKind(workflow.WorkflowKind) - un.SetAPIVersion(workflow.APIVersion) - return un, nil -} - -// IsWorkflowCompleted returns whether or not a workflow is considered completed -func IsWorkflowCompleted(wf *wfv1.Workflow) bool { - if wf.ObjectMeta.Labels != nil { - return wf.ObjectMeta.Labels[common.LabelKeyCompleted] == "true" - } - return false -} - -// SubmitWorkflow validates and submits a single workflow and overrides some of the fields of the workflow -func SubmitWorkflow(ctx context.Context, wfIf v1alpha1.WorkflowInterface, wfClientset wfclientset.Interface, namespace string, wf *wfv1.Workflow, opts *wfv1.SubmitOpts) (*wfv1.Workflow, error) { - err := ApplySubmitOpts(wf, opts) - if err != nil { - return nil, err - } - wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().WorkflowTemplates(namespace)) - cwftmplGetter := templateresolution.WrapClusterWorkflowTemplateInterface(wfClientset.ArgoprojV1alpha1().ClusterWorkflowTemplates()) - - err = validate.ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, validate.ValidateOpts{Submit: true}) - if err != nil { - return nil, err - } - if opts.DryRun { - return wf, nil - } else if opts.ServerDryRun { - wf, err := CreateServerDryRun(ctx, wf, wfClientset) - if err != nil { - return nil, err - } - return wf, err - } else { - return wfIf.Create(ctx, wf, metav1.CreateOptions{}) - } -} - -// CreateServerDryRun fills the workflow struct with the server's representation without creating it and returns an error, if there is any -func CreateServerDryRun(ctx context.Context, wf *wfv1.Workflow, wfClientset wfclientset.Interface) (*wfv1.Workflow, error) { - // Keep the workflow metadata because it will be overwritten by the Post request - workflowTypeMeta := wf.TypeMeta - err := wfClientset.ArgoprojV1alpha1().RESTClient().Post(). - Namespace(wf.Namespace). - Resource("workflows"). - Body(wf). - Param("dryRun", "All"). - Do(ctx). - Into(wf) - wf.TypeMeta = workflowTypeMeta - return wf, err -} - -func PopulateSubmitOpts(command *cobra.Command, submitOpts *wfv1.SubmitOpts, parameterFile *string, includeDryRun bool) { - command.Flags().StringVar(&submitOpts.Name, "name", "", "override metadata.name") - command.Flags().StringVar(&submitOpts.GenerateName, "generate-name", "", "override metadata.generateName") - command.Flags().StringVar(&submitOpts.Entrypoint, "entrypoint", "", "override entrypoint") - command.Flags().StringArrayVarP(&submitOpts.Parameters, "parameter", "p", []string{}, "pass an input parameter") - command.Flags().StringVar(&submitOpts.ServiceAccount, "serviceaccount", "", "run all pods in the workflow using specified serviceaccount") - command.Flags().StringVarP(parameterFile, "parameter-file", "f", "", "pass a file containing all input parameters") - command.Flags().StringVarP(&submitOpts.Labels, "labels", "l", "", "Comma separated labels to apply to the workflow. Will override previous values.") - - if includeDryRun { - command.Flags().BoolVar(&submitOpts.DryRun, "dry-run", false, "modify the workflow on the client-side without creating it") - command.Flags().BoolVar(&submitOpts.ServerDryRun, "server-dry-run", false, "send request to server with dry-run flag which will modify the workflow without creating it") - } -} - -// Apply the Submit options into workflow object -func ApplySubmitOpts(wf *wfv1.Workflow, opts *wfv1.SubmitOpts) error { - if wf == nil { - return fmt.Errorf("workflow cannot be nil") - } - if opts == nil { - opts = &wfv1.SubmitOpts{} - } - if opts.Entrypoint != "" { - wf.Spec.Entrypoint = opts.Entrypoint - } - if opts.ServiceAccount != "" { - wf.Spec.ServiceAccountName = opts.ServiceAccount - } - if opts.PodPriorityClassName != "" { - wf.Spec.PodPriorityClassName = opts.PodPriorityClassName - } - - if opts.Priority != nil { - wf.Spec.Priority = opts.Priority - } - - wfLabels := wf.GetLabels() - if wfLabels == nil { - wfLabels = make(map[string]string) - } - if opts.Labels != "" { - passedLabels, err := cmdutil.ParseLabels(opts.Labels) - if err != nil { - return fmt.Errorf("expected labels of the form: NAME1=VALUE2,NAME2=VALUE2. Received: %s: %w", opts.Labels, err) - } - for k, v := range passedLabels { - wfLabels[k] = v - } - } - wf.SetLabels(wfLabels) - wfAnnotations := wf.GetAnnotations() - if wfAnnotations == nil { - wfAnnotations = make(map[string]string) - } - if opts.Annotations != "" { - fmt.Println(opts.Annotations) - passedAnnotations, err := cmdutil.ParseLabels(opts.Annotations) - if err != nil { - return fmt.Errorf("expected Annotations of the form: NAME1=VALUE2,NAME2=VALUE2. Received: %s: %w", opts.Labels, err) - } - for k, v := range passedAnnotations { - wfAnnotations[k] = v - } - } - wf.SetAnnotations(wfAnnotations) - err := overrideParameters(wf, opts.Parameters) - if err != nil { - return err - } - if opts.GenerateName != "" { - wf.ObjectMeta.GenerateName = opts.GenerateName - } - if opts.Name != "" { - wf.ObjectMeta.Name = opts.Name - } - if opts.OwnerReference != nil { - wf.SetOwnerReferences(append(wf.GetOwnerReferences(), *opts.OwnerReference)) - } - return nil -} - -func overrideParameters(wf *wfv1.Workflow, parameters []string) error { - if len(parameters) > 0 { - newParams := make([]wfv1.Parameter, 0) - passedParams := make(map[string]bool) - for _, paramStr := range parameters { - parts := strings.SplitN(paramStr, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("expected parameter of the form: NAME=VALUE. Received: %s", paramStr) - } - param := wfv1.Parameter{Name: parts[0], Value: wfv1.AnyStringPtr(parts[1])} - newParams = append(newParams, param) - passedParams[param.Name] = true - } - for _, param := range wf.Spec.Arguments.Parameters { - if _, ok := passedParams[param.Name]; ok { - // this parameter was overridden via command line - continue - } - newParams = append(newParams, param) - } - wf.Spec.Arguments.Parameters = newParams - } - return nil -} - -func ReadParametersFile(file string, opts *wfv1.SubmitOpts) error { - var body []byte - var err error - if cmdutil.IsURL(file) { - body, err = ReadFromUrl(file) - if err != nil { - return err - } - } else { - body, err = ioutil.ReadFile(file) - if err != nil { - return err - } - } - - yamlParams := map[string]json.RawMessage{} - err = yaml.Unmarshal(body, &yamlParams) - if err != nil { - return err - } - - for k, v := range yamlParams { - // We get quoted strings from the yaml file. - value, err := strconv.Unquote(string(v)) - if err != nil { - // the string is already clean. - value = string(v) - } - opts.Parameters = append(opts.Parameters, fmt.Sprintf("%s=%s", k, value)) - } - return nil -} - -// SuspendWorkflow suspends a workflow by setting spec.suspend to true. Retries conflict errors -func SuspendWorkflow(ctx context.Context, wfIf v1alpha1.WorkflowInterface, workflowName string) error { - err := waitutil.Backoff(retry.DefaultRetry, func() (bool, error) { - wf, err := wfIf.Get(ctx, workflowName, metav1.GetOptions{}) - if err != nil { - return !errorsutil.IsTransientErr(err), err - } - if IsWorkflowCompleted(wf) { - return false, errSuspendedCompletedWorkflow - } - if wf.Spec.Suspend == nil || !*wf.Spec.Suspend { - wf.Spec.Suspend = pointer.BoolPtr(true) - _, err := wfIf.Update(ctx, wf, metav1.UpdateOptions{}) - if apierr.IsConflict(err) { - return false, nil - } - return !errorsutil.IsTransientErr(err), err - } - return true, nil - }) - return err -} - -// ResumeWorkflow resumes a workflow by setting spec.suspend to nil and any suspended nodes to Successful. -// Retries conflict errors -func ResumeWorkflow(ctx context.Context, wfIf v1alpha1.WorkflowInterface, hydrator hydrator.Interface, workflowName string, nodeFieldSelector string) error { - if len(nodeFieldSelector) > 0 { - return updateSuspendedNode(ctx, wfIf, hydrator, workflowName, nodeFieldSelector, SetOperationValues{Phase: wfv1.NodeSucceeded}) - } else { - err := waitutil.Backoff(retry.DefaultRetry, func() (bool, error) { - wf, err := wfIf.Get(ctx, workflowName, metav1.GetOptions{}) - if err != nil { - return !errorsutil.IsTransientErr(err), err - } - - err = hydrator.Hydrate(wf) - if err != nil { - return true, err - } - - workflowUpdated := false - if wf.Spec.Suspend != nil && *wf.Spec.Suspend { - wf.Spec.Suspend = nil - workflowUpdated = true - } - - // To resume a workflow with a suspended node we simply mark the node as Successful - for nodeID, node := range wf.Status.Nodes { - if node.IsActiveSuspendNode() { - if node.Outputs != nil { - for i, param := range node.Outputs.Parameters { - if param.ValueFrom != nil && param.ValueFrom.Supplied != nil { - if param.ValueFrom.Default != nil { - node.Outputs.Parameters[i].Value = param.ValueFrom.Default - node.Outputs.Parameters[i].ValueFrom = nil - } else { - return false, fmt.Errorf("raw output parameter '%s' has not been set and does not have a default value", param.Name) - } - } - } - } - node.Phase = wfv1.NodeSucceeded - node.FinishedAt = metav1.Time{Time: time.Now().UTC()} - wf.Status.Nodes[nodeID] = node - workflowUpdated = true - } - } - - if workflowUpdated { - err := hydrator.Dehydrate(wf) - if err != nil { - return false, fmt.Errorf("unable to compress or offload workflow nodes: %s", err) - } - - _, err = wfIf.Update(ctx, wf, metav1.UpdateOptions{}) - if err != nil { - if apierr.IsConflict(err) { - return false, nil - } - return false, err - } - } - return true, nil - }) - return err - } -} - -func SelectorMatchesNode(selector fields.Selector, node wfv1.NodeStatus) bool { - nodeFields := fields.Set{ - "displayName": node.DisplayName, - "templateName": node.TemplateName, - "phase": string(node.Phase), - "name": node.Name, - "id": node.ID, - } - if node.TemplateRef != nil { - nodeFields["templateRef.name"] = node.TemplateRef.Name - nodeFields["templateRef.template"] = node.TemplateRef.Template - } - if node.Inputs != nil { - for _, inParam := range node.Inputs.Parameters { - nodeFields[fmt.Sprintf("inputs.parameters.%s.value", inParam.Name)] = inParam.Value.String() - } - } - - return selector.Matches(nodeFields) -} - -type SetOperationValues struct { - Phase wfv1.NodePhase - Message string - OutputParameters map[string]string -} - -func AddParamToGlobalScope(wf *wfv1.Workflow, log *log.Entry, param wfv1.Parameter) bool { - wfUpdated := false - if param.GlobalName == "" { - return wfUpdated - } - index := -1 - if wf.Status.Outputs != nil { - for i, gParam := range wf.Status.Outputs.Parameters { - if gParam.Name == param.GlobalName { - index = i - break - } - } - } else { - wf.Status.Outputs = &wfv1.Outputs{} - } - paramName := fmt.Sprintf("workflow.outputs.parameters.%s", param.GlobalName) - if index == -1 { - log.Infof("setting %s: '%s'", paramName, param.Value) - gParam := wfv1.Parameter{Name: param.GlobalName, Value: param.Value} - wf.Status.Outputs.Parameters = append(wf.Status.Outputs.Parameters, gParam) - wfUpdated = true - } else { - prevVal := wf.Status.Outputs.Parameters[index].Value - if prevVal == nil || (param.Value != nil && *prevVal != *param.Value) { - log.Infof("overwriting %s: '%s' -> '%s'", paramName, wf.Status.Outputs.Parameters[index].Value, param.Value) - wf.Status.Outputs.Parameters[index].Value = param.Value - wfUpdated = true - } - } - return wfUpdated -} - -func updateSuspendedNode(ctx context.Context, wfIf v1alpha1.WorkflowInterface, hydrator hydrator.Interface, workflowName string, nodeFieldSelector string, values SetOperationValues) error { - selector, err := fields.ParseSelector(nodeFieldSelector) - if err != nil { - return err - } - err = waitutil.Backoff(retry.DefaultRetry, func() (bool, error) { - wf, err := wfIf.Get(ctx, workflowName, metav1.GetOptions{}) - if err != nil { - return !errorsutil.IsTransientErr(err), err - } - - err = hydrator.Hydrate(wf) - if err != nil { - return false, err - } - - nodeUpdated := false - for nodeID, node := range wf.Status.Nodes { - if node.IsActiveSuspendNode() { - if SelectorMatchesNode(selector, node) { - - // Update phase - if values.Phase != "" { - node.Phase = values.Phase - if values.Phase.Fulfilled() { - node.FinishedAt = metav1.Time{Time: time.Now().UTC()} - } - nodeUpdated = true - } - - // Update message - if values.Message != "" { - node.Message = values.Message - nodeUpdated = true - } - - // Update output parameters - if len(values.OutputParameters) > 0 { - if node.Outputs == nil { - return true, fmt.Errorf("cannot set output parameters because node is not expecting any raw parameters") - } - for name, val := range values.OutputParameters { - hit := false - for i, param := range node.Outputs.Parameters { - if param.Name == name { - if param.ValueFrom == nil || param.ValueFrom.Supplied == nil { - return true, fmt.Errorf("cannot set output parameter '%s' because it does not use valueFrom.raw or it was already set", param.Name) - } - node.Outputs.Parameters[i].Value = wfv1.AnyStringPtr(val) - node.Outputs.Parameters[i].ValueFrom = nil - nodeUpdated = true - hit = true - AddParamToGlobalScope(wf, log.NewEntry(log.StandardLogger()), node.Outputs.Parameters[i]) - break - } - } - if !hit { - return true, fmt.Errorf("node is not expecting output parameter '%s'", name) - } - } - } - - wf.Status.Nodes[nodeID] = node - } - } - } - - if !nodeUpdated { - return true, fmt.Errorf("currently, set only targets suspend nodes: no suspend nodes matching nodeFieldSelector: %s", nodeFieldSelector) - } - - err = hydrator.Dehydrate(wf) - if err != nil { - return true, fmt.Errorf("unable to compress or offload workflow nodes: %s", err) - } - - _, err = wfIf.Update(ctx, wf, metav1.UpdateOptions{}) - if err != nil { - if apierr.IsConflict(err) { - // Try again if we have a conflict - return false, nil - } - return true, err - } - - return true, nil - }) - return err -} - -const letters = "abcdefghijklmnopqrstuvwxyz0123456789" - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// generates an insecure random string -func randString(n int) string { - b := make([]byte, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] //nolint:gosec - } - return string(b) -} - -// RandSuffix generates a random suffix suitable for suffixing resource name. -func RandSuffix() string { - return randString(5) -} - -// FormulateResubmitWorkflow formulate a new workflow from a previous workflow, optionally re-using successful nodes -func FormulateResubmitWorkflow(wf *wfv1.Workflow, memoized bool, parameters []string) (*wfv1.Workflow, error) { - newWF := wfv1.Workflow{} - newWF.TypeMeta = wf.TypeMeta - - // Resubmitted workflow will use generated names - if wf.ObjectMeta.GenerateName != "" { - newWF.ObjectMeta.GenerateName = wf.ObjectMeta.GenerateName - } else { - newWF.ObjectMeta.GenerateName = wf.ObjectMeta.Name + "-" - } - // When resubmitting workflow with memoized nodes, we need to use a predetermined workflow name - // in order to formulate the node statuses. Which means we cannot reuse metadata.generateName - // The following simulates the behavior of generateName - if memoized { - switch wf.Status.Phase { - case wfv1.WorkflowFailed, wfv1.WorkflowError: - default: - return nil, errors.Errorf(errors.CodeBadRequest, "workflow must be Failed/Error to resubmit in memoized mode") - } - newWF.ObjectMeta.Name = newWF.ObjectMeta.GenerateName + RandSuffix() - } - - // carry over the unmodified spec - newWF.Spec = wf.Spec - - if newWF.Spec.ActiveDeadlineSeconds != nil && *newWF.Spec.ActiveDeadlineSeconds == 0 { - // if it was terminated, unset the deadline - newWF.Spec.ActiveDeadlineSeconds = nil - } - - newWF.Spec.Shutdown = "" - - // carry over user labels and annotations from previous workflow. - if newWF.ObjectMeta.Labels == nil { - newWF.ObjectMeta.Labels = make(map[string]string) - } - for key, val := range wf.ObjectMeta.Labels { - switch key { - case common.LabelKeyCreator, common.LabelKeyPhase, common.LabelKeyCompleted, common.LabelKeyWorkflowArchivingStatus: - // ignore - default: - newWF.ObjectMeta.Labels[key] = val - } - } - // Append an additional label so it's easy for user to see the - // name of the original workflow that has been resubmitted. - newWF.ObjectMeta.Labels[common.LabelKeyPreviousWorkflowName] = wf.ObjectMeta.Name - if newWF.ObjectMeta.Annotations == nil { - newWF.ObjectMeta.Annotations = make(map[string]string) - } - for key, val := range wf.ObjectMeta.Annotations { - newWF.ObjectMeta.Annotations[key] = val - } - - // Setting OwnerReference from original Workflow - newWF.OwnerReferences = append(newWF.OwnerReferences, wf.OwnerReferences...) - - // Override parameters - if parameters != nil { - if _, ok := wf.ObjectMeta.Labels[common.LabelKeyPreviousWorkflowName]; ok || memoized { - log.Warnln("Overriding parameters on memoized or resubmitted workflows may have unexpected results") - } - err := overrideParameters(&newWF, parameters) - if err != nil { - return nil, err - } - } - - if !memoized { - return &newWF, nil - } - - // Iterate the previous nodes. - replaceRegexp := regexp.MustCompile("^" + wf.ObjectMeta.Name) - newWF.Status.Nodes = make(map[string]wfv1.NodeStatus) - onExitNodeName := wf.ObjectMeta.Name + ".onExit" - err := packer.DecompressWorkflow(wf) - if err != nil { - log.Fatal(err) - } - for _, node := range wf.Status.Nodes { - newNode := node.DeepCopy() - if strings.HasPrefix(node.Name, onExitNodeName) { - continue - } - originalID := node.ID - newNode.Name = replaceRegexp.ReplaceAllString(node.Name, newWF.ObjectMeta.Name) - newNode.ID = newWF.NodeID(newNode.Name) - if node.BoundaryID != "" { - newNode.BoundaryID = convertNodeID(&newWF, replaceRegexp, node.BoundaryID, wf.Status.Nodes) - } - if newNode.FailedOrError() && newNode.Type == wfv1.NodeTypePod { - newNode.StartedAt = metav1.Time{} - newNode.FinishedAt = metav1.Time{} - } else { - newNode.StartedAt = metav1.Time{Time: time.Now().UTC()} - newNode.FinishedAt = newNode.StartedAt - } - newChildren := make([]string, len(node.Children)) - for i, childID := range node.Children { - newChildren[i] = convertNodeID(&newWF, replaceRegexp, childID, wf.Status.Nodes) - } - newNode.Children = newChildren - newOutboundNodes := make([]string, len(node.OutboundNodes)) - for i, outboundID := range node.OutboundNodes { - newOutboundNodes[i] = convertNodeID(&newWF, replaceRegexp, outboundID, wf.Status.Nodes) - } - newNode.OutboundNodes = newOutboundNodes - if !newNode.FailedOrError() && newNode.Type == wfv1.NodeTypePod { - newNode.Phase = wfv1.NodeSkipped - newNode.Type = wfv1.NodeTypeSkipped - newNode.Message = fmt.Sprintf("original pod: %s", originalID) - } else { - newNode.Phase = wfv1.NodePending - newNode.Message = "" - } - newWF.Status.Nodes[newNode.ID] = *newNode - } - - newWF.Status.StoredTemplates = make(map[string]wfv1.Template) - for id, tmpl := range wf.Status.StoredTemplates { - newWF.Status.StoredTemplates[id] = tmpl - } - - newWF.Status.Conditions = wfv1.Conditions{{Status: metav1.ConditionFalse, Type: wfv1.ConditionTypeCompleted}} - newWF.Status.Phase = wfv1.WorkflowUnknown - - return &newWF, nil -} - -// convertNodeID converts an old nodeID to a new nodeID -func convertNodeID(newWf *wfv1.Workflow, regex *regexp.Regexp, oldNodeID string, oldNodes map[string]wfv1.NodeStatus) string { - node := oldNodes[oldNodeID] - newNodeName := regex.ReplaceAllString(node.Name, newWf.ObjectMeta.Name) - return newWf.NodeID(newNodeName) -} - -func getDescendantNodeIDs(wf *wfv1.Workflow, node wfv1.NodeStatus) []string { - var descendantNodeIDs []string - descendantNodeIDs = append(descendantNodeIDs, node.Children...) - for _, child := range node.Children { - descendantNodeIDs = append(descendantNodeIDs, getDescendantNodeIDs(wf, wf.Status.Nodes[child])...) - } - return descendantNodeIDs -} - -func deletePodNodeDuringRetryWorkflow(wf *wfv1.Workflow, node wfv1.NodeStatus, deletedPods map[string]bool, podsToDelete []string) (map[string]bool, []string) { - templateName := GetTemplateFromNode(node) - version := GetWorkflowPodNameVersion(wf) - podName := PodName(wf.Name, node.Name, templateName, node.ID, version) - if _, ok := deletedPods[podName]; !ok { - deletedPods[podName] = true - podsToDelete = append(podsToDelete, podName) - } - return deletedPods, podsToDelete -} - -func containsNode(nodes []string, node string) bool { - for _, e := range nodes { - if e == node { - return true - } - } - return false -} - -func isGroupNode(node wfv1.NodeStatus) bool { - return node.Type == wfv1.NodeTypeDAG || node.Type == wfv1.NodeTypeTaskGroup || node.Type == wfv1.NodeTypeStepGroup || node.Type == wfv1.NodeTypeSteps -} - -func resetConnectedParentGroupNodes(oldWF *wfv1.Workflow, newWF *wfv1.Workflow, currentNode wfv1.NodeStatus, resetParentGroupNodes []string) (*wfv1.Workflow, []string) { - currentNodeID := currentNode.ID - for { - currentNode := oldWF.Status.Nodes[currentNodeID] - if !containsNode(resetParentGroupNodes, currentNodeID) { - newWF.Status.Nodes[currentNodeID] = resetNode(*currentNode.DeepCopy()) - resetParentGroupNodes = append(resetParentGroupNodes, currentNodeID) - log.Debugf("Reset connected group node %s", currentNode.Name) - } - if currentNode.BoundaryID != "" && currentNode.BoundaryID != oldWF.ObjectMeta.Name { - parentNode := oldWF.Status.Nodes[currentNode.BoundaryID] - if isGroupNode(parentNode) { - currentNodeID = parentNode.ID - } else { - break - } - } else { - break - } - } - return newWF, resetParentGroupNodes -} - -// FormulateRetryWorkflow formulates a previous workflow to be retried, deleting all failed steps as well as the onExit node (and children) -func FormulateRetryWorkflow(ctx context.Context, wf *wfv1.Workflow, restartSuccessful bool, nodeFieldSelector string, parameters []string) (*wfv1.Workflow, []string, error) { - - switch wf.Status.Phase { - case wfv1.WorkflowFailed, wfv1.WorkflowError: - default: - return nil, nil, errors.Errorf(errors.CodeBadRequest, "workflow must be Failed/Error to retry") - } - - newWF := wf.DeepCopy() - - // Delete/reset fields which indicate workflow completed - delete(newWF.Labels, common.LabelKeyCompleted) - delete(newWF.Labels, common.LabelKeyWorkflowArchivingStatus) - newWF.Status.Conditions.UpsertCondition(wfv1.Condition{Status: metav1.ConditionFalse, Type: wfv1.ConditionTypeCompleted}) - newWF.ObjectMeta.Labels[common.LabelKeyPhase] = string(wfv1.NodeRunning) - newWF.Status.Phase = wfv1.WorkflowRunning - newWF.Status.Nodes = make(wfv1.Nodes) - newWF.Status.Message = "" - newWF.Status.StartedAt = metav1.Time{Time: time.Now().UTC()} - newWF.Status.FinishedAt = metav1.Time{} - newWF.Spec.Shutdown = "" - if newWF.Spec.ActiveDeadlineSeconds != nil && *newWF.Spec.ActiveDeadlineSeconds == 0 { - // if it was terminated, unset the deadline - newWF.Spec.ActiveDeadlineSeconds = nil - } - // Override parameters - if parameters != nil { - if _, ok := wf.ObjectMeta.Labels[common.LabelKeyPreviousWorkflowName]; ok { - log.Warnln("Overriding parameters on resubmitted workflows may have unexpected results") - } - err := overrideParameters(newWF, parameters) - if err != nil { - return nil, nil, err - } - } - - onExitNodeName := wf.ObjectMeta.Name + ".onExit" - // Get all children of nodes that match filter - nodeIDsToReset, err := getNodeIDsToReset(restartSuccessful, nodeFieldSelector, wf.Status.Nodes) - if err != nil { - return nil, nil, err - } - - // Iterate the previous nodes. If it was successful Pod carry it forward - deletedNodes := make(map[string]bool) - deletedPods := make(map[string]bool) - var podsToDelete []string - var resetParentGroupNodes []string - for _, node := range wf.Status.Nodes { - doForceResetNode := false - if _, present := nodeIDsToReset[node.ID]; present { - // if we are resetting this node then don't carry it across regardless of its phase - doForceResetNode = true - } - switch node.Phase { - case wfv1.NodeSucceeded, wfv1.NodeSkipped: - if doForceResetNode { - log.Debugf("Force reset for node: %s", node.Name) - // Reset parent node if this node is a step/task group or DAG. - if isGroupNode(node) && node.BoundaryID != "" { - if node.ID != wf.ObjectMeta.Name { // Skip root node - descendantNodeIDs := getDescendantNodeIDs(wf, node) - var nodeGroupNeedsReset bool - // Only reset DAG that's in the same branch as the nodeIDsToReset - for _, child := range descendantNodeIDs { - childNode := wf.Status.Nodes[child] - if _, present := nodeIDsToReset[child]; present { - log.Debugf("Group node %s needs to reset since its child %s is in the force reset path", node.Name, childNode.Name) - nodeGroupNeedsReset = true - break - } - } - if nodeGroupNeedsReset { - newWF, resetParentGroupNodes = resetConnectedParentGroupNodes(wf, newWF, node, resetParentGroupNodes) - } - } - } else { - if node.Type == wfv1.NodeTypePod || node.Type == wfv1.NodeTypeSuspend { - newWF, resetParentGroupNodes = resetConnectedParentGroupNodes(wf, newWF, node, resetParentGroupNodes) - // Only remove the descendants of a suspended node but not the suspended node itself. The descendants - // of a suspended node need to be removed since the conditions should be re-evaluated based on - // the modified supplied parameter values. - if node.Type != wfv1.NodeTypeSuspend { - deletedNodes[node.ID] = true - deletedPods, podsToDelete = deletePodNodeDuringRetryWorkflow(wf, node, deletedPods, podsToDelete) - log.Debugf("Deleted pod node: %s", node.Name) - } - - descendantNodeIDs := getDescendantNodeIDs(wf, node) - for _, descendantNodeID := range descendantNodeIDs { - deletedNodes[descendantNodeID] = true - descendantNode := wf.Status.Nodes[descendantNodeID] - if descendantNode.Type == wfv1.NodeTypePod { - newWF, resetParentGroupNodes = resetConnectedParentGroupNodes(wf, newWF, node, resetParentGroupNodes) - deletedPods, podsToDelete = deletePodNodeDuringRetryWorkflow(wf, descendantNode, deletedPods, podsToDelete) - log.Debugf("Deleted pod node %s since it belongs to node %s", descendantNode.Name, node.Name) - } - } - } else { - log.Debugf("Reset non-pod/suspend node %s", node.Name) - newNode := node.DeepCopy() - newWF.Status.Nodes[newNode.ID] = resetNode(*newNode) - } - } - } else { - if !containsNode(resetParentGroupNodes, node.ID) { - log.Debugf("Node %s remains as is", node.Name) - newWF.Status.Nodes[node.ID] = node - } - } - case wfv1.NodeError, wfv1.NodeFailed, wfv1.NodeOmitted: - if !strings.HasPrefix(node.Name, onExitNodeName) && isGroupNode(node) { - newNode := node.DeepCopy() - newWF.Status.Nodes[newNode.ID] = resetNode(*newNode) - log.Debugf("Reset %s node %s since it's a group node", node.Name, string(node.Phase)) - continue - } else { - log.Debugf("Deleted %s node %s since it's not a group node", node.Name, string(node.Phase)) - deletedPods, podsToDelete = deletePodNodeDuringRetryWorkflow(wf, node, deletedPods, podsToDelete) - log.Debugf("Deleted pod node: %s", node.Name) - deletedNodes[node.ID] = true - } - // do not add this status to the node. pretend as if this node never existed. - default: - // Do not allow retry of workflows with pods in Running/Pending phase - return nil, nil, errors.InternalErrorf("Workflow cannot be retried with node %s in %s phase", node.Name, node.Phase) - } - - if node.Name == wf.ObjectMeta.Name { - log.Debugf("Reset root node: %s", node.Name) - newNode := node.DeepCopy() - newWF.Status.Nodes[newNode.ID] = resetNode(*newNode) - continue - } - } - - if len(deletedNodes) > 0 { - for _, node := range newWF.Status.Nodes { - if deletedNodes[node.ID] { - log.Debugf("Removed node: %s", node.Name) - delete(newWF.Status.Nodes, node.ID) - continue - } - - var newChildren []string - for _, child := range node.Children { - if !deletedNodes[child] { - newChildren = append(newChildren, child) - } - } - node.Children = newChildren - - var outboundNodes []string - for _, outboundNode := range node.OutboundNodes { - if !deletedNodes[outboundNode] { - outboundNodes = append(outboundNodes, outboundNode) - } - } - node.OutboundNodes = outboundNodes - - newWF.Status.Nodes[node.ID] = node - } - } - - newWF.Status.StoredTemplates = make(map[string]wfv1.Template) - for id, tmpl := range wf.Status.StoredTemplates { - newWF.Status.StoredTemplates[id] = tmpl - } - - return newWF, podsToDelete, nil -} - -func resetNode(node wfv1.NodeStatus) wfv1.NodeStatus { - // The previously supplied parameters needed to be reset. Otherwise, `argo node reset` would not work as expected. - if node.Type == wfv1.NodeTypeSuspend { - if node.Outputs != nil { - for i, param := range node.Outputs.Parameters { - node.Outputs.Parameters[i] = wfv1.Parameter{ - Name: param.Name, - Value: nil, - ValueFrom: &wfv1.ValueFrom{Supplied: &wfv1.SuppliedValueFrom{}}, - } - } - } - } - if node.Phase == wfv1.NodeSkipped { - // The skipped nodes need to be kept as skipped. Otherwise, the workflow will be stuck on running. - node.Phase = wfv1.NodeSkipped - } else { - node.Phase = wfv1.NodeRunning - } - node.Message = "" - node.StartedAt = metav1.Time{Time: time.Now().UTC()} - node.FinishedAt = metav1.Time{} - return node -} - -func GetTemplateFromNode(node wfv1.NodeStatus) string { - if node.TemplateRef != nil { - return node.TemplateRef.Template - } - return node.TemplateName -} - -func getNodeIDsToReset(restartSuccessful bool, nodeFieldSelector string, nodes wfv1.Nodes) (map[string]bool, error) { - nodeIDsToReset := make(map[string]bool) - if !restartSuccessful || len(nodeFieldSelector) == 0 { - return nodeIDsToReset, nil - } - - selector, err := fields.ParseSelector(nodeFieldSelector) - if err != nil { - return nil, err - } else { - for _, node := range nodes { - if SelectorMatchesNode(selector, node) { - // traverse all children of the node - var queue []string - queue = append(queue, node.ID) - - for len(queue) > 0 { - childNode := queue[0] - // if the child isn't already in nodeIDsToReset then we add it and traverse its children - if _, present := nodeIDsToReset[childNode]; !present { - nodeIDsToReset[childNode] = true - queue = append(queue, nodes[childNode].Children...) - } - queue = queue[1:] - } - } - } - } - return nodeIDsToReset, nil -} - -var errSuspendedCompletedWorkflow = errors.Errorf(errors.CodeBadRequest, "cannot suspend completed workflows") - -// IsWorkflowSuspended returns whether or not a workflow is considered suspended -func IsWorkflowSuspended(wf *wfv1.Workflow) bool { - if wf.Spec.Suspend != nil && *wf.Spec.Suspend { - return true - } - for _, node := range wf.Status.Nodes { - if node.IsActiveSuspendNode() { - return true - } - } - return false -} - // TerminateWorkflow terminates a workflow by setting its spec.shutdown to ShutdownStrategyTerminate func TerminateWorkflow(ctx context.Context, wfClient v1alpha1.WorkflowInterface, name string) error { return patchShutdownStrategy(ctx, wfClient, name, wfv1.ShutdownStrategyTerminate) } -// StopWorkflow terminates a workflow by setting its spec.shutdown to ShutdownStrategyStop -// Or terminates a single resume step referenced by nodeFieldSelector -func StopWorkflow(ctx context.Context, wfClient v1alpha1.WorkflowInterface, hydrator hydrator.Interface, name string, nodeFieldSelector string, message string) error { - if len(nodeFieldSelector) > 0 { - return updateSuspendedNode(ctx, wfClient, hydrator, name, nodeFieldSelector, SetOperationValues{Phase: wfv1.NodeFailed, Message: message}) - } - return patchShutdownStrategy(ctx, wfClient, name, wfv1.ShutdownStrategyStop) +type AlreadyShutdownError struct { + workflowName string + namespace string +} + +func (e AlreadyShutdownError) Error() string { + return fmt.Sprintf("cannot shutdown a completed workflow: workflow: %q, namespace: %q", e.workflowName, e.namespace) } // patchShutdownStrategy patches the shutdown strategy to a workflow. @@ -1079,7 +42,14 @@ func patchShutdownStrategy(ctx context.Context, wfClient v1alpha1.WorkflowInterf return errors.InternalWrapError(err) } err = waitutil.Backoff(retry.DefaultRetry, func() (bool, error) { - _, err := wfClient.Patch(ctx, name, types.MergePatchType, patch, metav1.PatchOptions{}) + wf, err := wfClient.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return !errorsutil.IsTransientErr(err), err + } + if wf.Status.Fulfilled() { + return true, AlreadyShutdownError{wf.Name, wf.Namespace} + } + _, err = wfClient.Patch(ctx, name, types.MergePatchType, patch, metav1.PatchOptions{}) if apierr.IsConflict(err) { return false, nil } @@ -1087,150 +57,3 @@ func patchShutdownStrategy(ctx context.Context, wfClient v1alpha1.WorkflowInterf }) return err } - -func SetWorkflow(ctx context.Context, wfClient v1alpha1.WorkflowInterface, hydrator hydrator.Interface, name string, nodeFieldSelector string, values SetOperationValues) error { - if nodeFieldSelector != "" { - return updateSuspendedNode(ctx, wfClient, hydrator, name, nodeFieldSelector, values) - } - return fmt.Errorf("'set' currently only targets suspend nodes, use a node field selector to target them") -} - -// Reads from stdin -func ReadFromStdin() ([]byte, error) { - reader := bufio.NewReader(os.Stdin) - body, err := ioutil.ReadAll(reader) - if err != nil { - return []byte{}, err - } - return body, err -} - -// Reads the content of a url -func ReadFromUrl(url string) ([]byte, error) { - response, err := http.Get(url) //nolint:gosec - if err != nil { - return nil, err - } - body, err := ioutil.ReadAll(response.Body) - _ = response.Body.Close() - if err != nil { - return nil, err - } - return body, err -} - -// ReadFromFilePathsOrUrls reads the content of a single or a list of file paths and/or urls -func ReadFromFilePathsOrUrls(filePathsOrUrls ...string) ([][]byte, error) { - var fileContents [][]byte - var body []byte - var err error - for _, filePathOrUrl := range filePathsOrUrls { - if cmdutil.IsURL(filePathOrUrl) { - body, err = ReadFromUrl(filePathOrUrl) - if err != nil { - return [][]byte{}, err - } - } else { - body, err = ioutil.ReadFile(filepath.Clean(filePathOrUrl)) - if err != nil { - return [][]byte{}, err - } - } - fileContents = append(fileContents, body) - } - return fileContents, err -} - -// ReadManifest reads from stdin, a single file/url, or a list of files and/or urls -func ReadManifest(manifestPaths ...string) ([][]byte, error) { - var manifestContents [][]byte - var err error - if len(manifestPaths) == 1 && manifestPaths[0] == "-" { - body, err := ReadFromStdin() - if err != nil { - return [][]byte{}, err - } - manifestContents = append(manifestContents, body) - } else { - manifestContents, err = ReadFromFilePathsOrUrls(manifestPaths...) - if err != nil { - return [][]byte{}, err - } - } - return manifestContents, err -} - -func IsJSONStr(str string) bool { - str = strings.TrimSpace(str) - return len(str) > 0 && str[0] == '{' -} - -func ConvertYAMLToJSON(str string) (string, error) { - if !IsJSONStr(str) { - jsonStr, err := yaml.YAMLToJSON([]byte(str)) - if err != nil { - return str, err - } - return string(jsonStr), nil - } - return str, nil -} - -// PodSpecPatchMerge will do strategic merge the workflow level PodSpecPatch and template level PodSpecPatch -func PodSpecPatchMerge(wf *wfv1.Workflow, tmpl *wfv1.Template) (string, error) { - wfPatch, err := ConvertYAMLToJSON(wf.Spec.PodSpecPatch) - if err != nil { - return "", err - } - tmplPatch, err := ConvertYAMLToJSON(tmpl.PodSpecPatch) - if err != nil { - return "", err - } - data, err := strategicpatch.StrategicMergePatch([]byte(wfPatch), []byte(tmplPatch), apiv1.PodSpec{}) - return string(data), err -} - -func GetNodeType(tmpl *wfv1.Template) wfv1.NodeType { - return tmpl.GetNodeType() -} - -// IsWindowsUNCPath checks if path is prefixed with \\ -// This can be used to skip any processing of paths -// that point to SMB shares, local named pipes and local UNC path -func IsWindowsUNCPath(path string, tmpl *wfv1.Template) bool { - if !HasWindowsOSNodeSelector(tmpl.NodeSelector) && nruntime.GOOS != "windows" { - return false - } - // Check for UNC prefix \\ - if strings.HasPrefix(path, `\\`) { - return true - } - return false -} - -func HasWindowsOSNodeSelector(nodeSelector map[string]string) bool { - if nodeSelector == nil { - return false - } - - if platform, keyExists := nodeSelector["kubernetes.io/os"]; keyExists && platform == "windows" { - return true - } - - return false -} - -func FindWaitCtrIndex(pod *apiv1.Pod) (int, error) { - waitCtrIndex := -1 - for i, ctr := range pod.Spec.Containers { - switch ctr.Name { - case common.WaitContainerName: - waitCtrIndex = i - } - } - if waitCtrIndex == -1 { - err := errors.Errorf("-1", "Could not find wait container in pod spec") - return -1, err - } - return waitCtrIndex, nil -} diff --git a/vendor/github.com/argoproj/argo-workflows/v3/workflow/validate/validate.go b/vendor/github.com/argoproj/argo-workflows/v3/workflow/validate/validate.go deleted file mode 100644 index e41c4307009..00000000000 --- a/vendor/github.com/argoproj/argo-workflows/v3/workflow/validate/validate.go +++ /dev/null @@ -1,1461 +0,0 @@ -package validate - -import ( - "encoding/json" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "golang.org/x/exp/maps" - - "github.com/robfig/cron/v3" - "github.com/sirupsen/logrus" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apivalidation "k8s.io/apimachinery/pkg/util/validation" - "sigs.k8s.io/yaml" - - "github.com/argoproj/argo-workflows/v3/errors" - wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo-workflows/v3/util" - "github.com/argoproj/argo-workflows/v3/util/intstr" - "github.com/argoproj/argo-workflows/v3/util/sorting" - "github.com/argoproj/argo-workflows/v3/util/template" - "github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs" - "github.com/argoproj/argo-workflows/v3/workflow/common" - "github.com/argoproj/argo-workflows/v3/workflow/metrics" - "github.com/argoproj/argo-workflows/v3/workflow/templateresolution" -) - -// ValidateOpts provides options when linting -type ValidateOpts struct { - // Lint indicates if this is performing validation in the context of linting. If true, will - // skip some validations which is permissible during linting but not submission (e.g. missing - // input parameters to the workflow) - Lint bool - - // IgnoreEntrypoint indicates to skip/ignore the EntryPoint validation on workflow spec. - // Entrypoint is optional for WorkflowTemplate and ClusterWorkflowTemplate - IgnoreEntrypoint bool - - // WorkflowTemplateValidation indicates that the current context is validating a WorkflowTemplate or ClusterWorkflowTemplate - WorkflowTemplateValidation bool - - // Submit indicates that the current operation is a workflow submission. This will impose - // more stringent requirements (e.g. require input values for all spec arguments) - Submit bool -} - -// templateValidationCtx is the context for validating a workflow spec -type templateValidationCtx struct { - ValidateOpts - - // globalParams keeps track of variables which are available the global - // scope and can be referenced from anywhere. - globalParams map[string]string - // results tracks if validation has already been run on a template - results map[string]bool - // wf is the Workflow resource which is used to validate templates. - // It will be omitted in WorkflowTemplate validation. - wf *wfv1.Workflow -} - -func newTemplateValidationCtx(wf *wfv1.Workflow, opts ValidateOpts) *templateValidationCtx { - globalParams := make(map[string]string) - globalParams[common.GlobalVarWorkflowName] = placeholderGenerator.NextPlaceholder() - globalParams[common.GlobalVarWorkflowNamespace] = placeholderGenerator.NextPlaceholder() - globalParams[common.GlobalVarWorkflowServiceAccountName] = placeholderGenerator.NextPlaceholder() - globalParams[common.GlobalVarWorkflowUID] = placeholderGenerator.NextPlaceholder() - return &templateValidationCtx{ - ValidateOpts: opts, - globalParams: globalParams, - results: make(map[string]bool), - wf: wf, - } -} - -const ( - // anyItemMagicValue is a magic value set in addItemsToScope() and checked in - // resolveAllVariables() to determine if any {{item.name}} can be accepted during - // variable resolution (to support withParam) - anyItemMagicValue = "item.*" - anyWorkflowOutputParameterMagicValue = "workflow.outputs.parameters.*" - anyWorkflowOutputArtifactMagicValue = "workflow.outputs.artifacts.*" - // The maximum length of maxCharsInObjectName is 63 characters because of the limitation of Kubernetes label - // For details, please refer to: https://stackoverflow.com/questions/50412837/kubernetes-label-name-63-character-limit - maxCharsInObjectName = 63 - // CronWorkflows have fewer max chars allowed in their name because when workflows are created from them, they - // are appended with the unix timestamp (`-1615836720`). This lower character allowance allows for that timestamp - // to still fit within the 63 character maximum. - maxCharsInCronWorkflowName = 52 -) - -var placeholderGenerator = common.NewPlaceholderGenerator() - -type FakeArguments struct{} - -func (args *FakeArguments) GetParameterByName(name string) *wfv1.Parameter { - s := placeholderGenerator.NextPlaceholder() - return &wfv1.Parameter{Name: name, Value: wfv1.AnyStringPtr(s)} -} - -func (args *FakeArguments) GetArtifactByName(name string) *wfv1.Artifact { - return &wfv1.Artifact{Name: name} -} - -var _ wfv1.ArgumentsProvider = &FakeArguments{} - -// ValidateWorkflow accepts a workflow and performs validation against it. -func ValidateWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, wf *wfv1.Workflow, opts ValidateOpts) error { - ctx := newTemplateValidationCtx(wf, opts) - tmplCtx := templateresolution.NewContext(wftmplGetter, cwftmplGetter, wf, wf) - var wfSpecHolder wfv1.WorkflowSpecHolder - var wfTmplRef *wfv1.TemplateRef - var err error - - if len(wf.Name) > maxCharsInObjectName { - return fmt.Errorf("workflow name %q must not be more than 63 characters long (currently %d)", wf.Name, len(wf.Name)) - } - - entrypoint := wf.Spec.Entrypoint - - hasWorkflowTemplateRef := wf.Spec.WorkflowTemplateRef != nil - - if hasWorkflowTemplateRef { - err := ValidateWorkflowTemplateRefFields(wf.Spec) - if err != nil { - return err - } - if wf.Spec.WorkflowTemplateRef.ClusterScope { - wfSpecHolder, err = cwftmplGetter.Get(wf.Spec.WorkflowTemplateRef.Name) - } else { - wfSpecHolder, err = wftmplGetter.Get(wf.Spec.WorkflowTemplateRef.Name) - } - if err != nil { - return err - } - if entrypoint == "" { - entrypoint = wfSpecHolder.GetWorkflowSpec().Entrypoint - } - wfTmplRef = wf.Spec.WorkflowTemplateRef.ToTemplateRef(entrypoint) - } - err = validateWorkflowFieldNames(wf.Spec.Templates) - - wfArgs := wf.Spec.Arguments - - if wf.Spec.WorkflowTemplateRef != nil { - wfArgs.Parameters = util.MergeParameters(wfArgs.Parameters, wfSpecHolder.GetWorkflowSpec().Arguments.Parameters) - wfArgs.Artifacts = util.MergeArtifacts(wfArgs.Artifacts, wfSpecHolder.GetWorkflowSpec().Arguments.Artifacts) - } - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "spec.templates%s", err.Error()) - } - - // if we are linting, we don't care if spec.arguments.parameters.XXX doesn't have an - // explicit value. Workflow templates without a default value are also a desired use - // case, since values will be provided during workflow submission. - allowEmptyValues := ctx.Lint || (ctx.WorkflowTemplateValidation && !ctx.Submit) - err = validateArguments("spec.arguments.", wfArgs, allowEmptyValues) - if err != nil { - return err - } - if len(wfArgs.Parameters) > 0 { - ctx.globalParams[common.GlobalVarWorkflowParameters] = placeholderGenerator.NextPlaceholder() - ctx.globalParams[common.GlobalVarWorkflowParametersJSON] = placeholderGenerator.NextPlaceholder() - } - - for _, param := range wfArgs.Parameters { - if param.Name != "" { - if param.Value != nil { - ctx.globalParams["workflow.parameters."+param.Name] = param.Value.String() - } else { - ctx.globalParams["workflow.parameters."+param.Name] = placeholderGenerator.NextPlaceholder() - } - } - } - - annotationSources := [][]string{maps.Keys(wf.ObjectMeta.Annotations)} - labelSources := [][]string{maps.Keys(wf.ObjectMeta.Labels)} - if wf.Spec.WorkflowMetadata != nil { - annotationSources = append(annotationSources, maps.Keys(wf.Spec.WorkflowMetadata.Annotations)) - labelSources = append(labelSources, maps.Keys(wf.Spec.WorkflowMetadata.Labels), maps.Keys(wf.Spec.WorkflowMetadata.LabelsFrom)) - } - if wf.Spec.WorkflowTemplateRef != nil && wfSpecHolder.GetWorkflowSpec().WorkflowMetadata != nil { - annotationSources = append(annotationSources, maps.Keys(wfSpecHolder.GetWorkflowSpec().WorkflowMetadata.Annotations)) - labelSources = append(labelSources, maps.Keys(wfSpecHolder.GetWorkflowSpec().WorkflowMetadata.Labels), maps.Keys(wfSpecHolder.GetWorkflowSpec().WorkflowMetadata.LabelsFrom)) - } - mergedAnnotations := getUniqueKeys(annotationSources...) - mergedLabels := getUniqueKeys(labelSources...) - - for k := range mergedAnnotations { - ctx.globalParams["workflow.annotations."+k] = placeholderGenerator.NextPlaceholder() - } - ctx.globalParams[common.GlobalVarWorkflowAnnotations] = placeholderGenerator.NextPlaceholder() - ctx.globalParams[common.GlobalVarWorkflowAnnotationsJSON] = placeholderGenerator.NextPlaceholder() - - for k := range mergedLabels { - ctx.globalParams["workflow.labels."+k] = placeholderGenerator.NextPlaceholder() - } - ctx.globalParams[common.GlobalVarWorkflowLabels] = placeholderGenerator.NextPlaceholder() - ctx.globalParams[common.GlobalVarWorkflowLabelsJSON] = placeholderGenerator.NextPlaceholder() - - if wf.Spec.Priority != nil { - ctx.globalParams[common.GlobalVarWorkflowPriority] = strconv.Itoa(int(*wf.Spec.Priority)) - } - ctx.globalParams[common.GlobalVarWorkflowStatus] = placeholderGenerator.NextPlaceholder() - - if !opts.IgnoreEntrypoint && entrypoint == "" { - return errors.New(errors.CodeBadRequest, "spec.entrypoint is required") - } - - if !opts.IgnoreEntrypoint { - var args wfv1.ArgumentsProvider - args = &wfArgs - if opts.WorkflowTemplateValidation { - args = &FakeArguments{} - } - tmpl := &wfv1.WorkflowStep{Template: entrypoint} - if hasWorkflowTemplateRef { - tmpl = &wfv1.WorkflowStep{TemplateRef: wfTmplRef} - } - _, err = ctx.validateTemplateHolder(tmpl, tmplCtx, args, opts.WorkflowTemplateValidation) - if err != nil { - return err - } - } - if wf.Spec.OnExit != "" { - ctx.globalParams[common.GlobalVarWorkflowFailures] = placeholderGenerator.NextPlaceholder() - _, err = ctx.validateTemplateHolder(&wfv1.WorkflowStep{Template: wf.Spec.OnExit}, tmplCtx, &wf.Spec.Arguments, opts.WorkflowTemplateValidation) - if err != nil { - return err - } - } - - if !wf.Spec.PodGC.GetStrategy().IsValid() { - return errors.Errorf(errors.CodeBadRequest, "podGC.strategy unknown strategy '%s'", wf.Spec.PodGC.Strategy) - } - if _, err := wf.Spec.PodGC.GetLabelSelector(); err != nil { - return errors.Errorf(errors.CodeBadRequest, "podGC.labelSelector invalid: %v", err) - } - - // Check if all templates can be resolved. - for _, template := range wf.Spec.Templates { - _, err := ctx.validateTemplateHolder(&wfv1.WorkflowStep{Template: template.Name}, tmplCtx, &FakeArguments{}, opts.WorkflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s %s", template.Name, err.Error()) - } - } - return nil -} - -// construct a Set of unique keys -func getUniqueKeys(labelSources ...[]string) map[string]struct{} { - uniqueKeys := make(map[string]struct{}) - for _, labelSource := range labelSources { - for _, label := range labelSource { - uniqueKeys[label] = struct{}{} // dummy value - } - } - return uniqueKeys -} - -func ValidateWorkflowTemplateRefFields(wfSpec wfv1.WorkflowSpec) error { - if len(wfSpec.Templates) > 0 { - return errors.Errorf(errors.CodeBadRequest, "Templates is invalid field in spec if workflow referred WorkflowTemplate reference") - } - return nil -} - -// ValidateWorkflowTemplate accepts a workflow template and performs validation against it. -func ValidateWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, wftmpl *wfv1.WorkflowTemplate, opts ValidateOpts) error { - if len(wftmpl.Name) > maxCharsInObjectName { - return fmt.Errorf("workflow template name %q must not be more than 63 characters long (currently %d)", wftmpl.Name, len(wftmpl.Name)) - } - - wf := &wfv1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Labels: wftmpl.ObjectMeta.Labels, - Annotations: wftmpl.ObjectMeta.Annotations, - }, - Spec: wftmpl.Spec, - } - opts.IgnoreEntrypoint = wf.Spec.Entrypoint == "" - opts.WorkflowTemplateValidation = true - return ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, opts) -} - -// ValidateClusterWorkflowTemplate accepts a cluster workflow template and performs validation against it. -func ValidateClusterWorkflowTemplate(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, cwftmpl *wfv1.ClusterWorkflowTemplate, opts ValidateOpts) error { - if len(cwftmpl.Name) > maxCharsInObjectName { - return fmt.Errorf("cluster workflow template name %q must not be more than 63 characters long (currently %d)", cwftmpl.Name, len(cwftmpl.Name)) - } - - wf := &wfv1.Workflow{ - ObjectMeta: v1.ObjectMeta{ - Labels: cwftmpl.ObjectMeta.Labels, - Annotations: cwftmpl.ObjectMeta.Annotations, - }, - Spec: cwftmpl.Spec, - } - opts.IgnoreEntrypoint = wf.Spec.Entrypoint == "" - opts.WorkflowTemplateValidation = true - return ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, opts) -} - -// ValidateCronWorkflow validates a CronWorkflow -func ValidateCronWorkflow(wftmplGetter templateresolution.WorkflowTemplateNamespacedGetter, cwftmplGetter templateresolution.ClusterWorkflowTemplateGetter, cronWf *wfv1.CronWorkflow) error { - // CronWorkflows have fewer max chars allowed in their name because when workflows are created from them, they - // are appended with the unix timestamp (`-1615836720`). This lower character allowance allows for that timestamp - // to still fit within the 63 character maximum. - if len(cronWf.Name) > maxCharsInCronWorkflowName { - return fmt.Errorf("cron workflow name %q must not be more than 52 characters long (currently %d)", cronWf.Name, len(cronWf.Name)) - } - - if _, err := cron.ParseStandard(cronWf.Spec.Schedule); err != nil { - return errors.Errorf(errors.CodeBadRequest, "cron schedule is malformed: %s", err) - } - - switch cronWf.Spec.ConcurrencyPolicy { - case wfv1.AllowConcurrent, wfv1.ForbidConcurrent, wfv1.ReplaceConcurrent, "": - // Do nothing - default: - return errors.Errorf(errors.CodeBadRequest, "'%s' is not a valid concurrencyPolicy", cronWf.Spec.ConcurrencyPolicy) - } - - if cronWf.Spec.StartingDeadlineSeconds != nil && *cronWf.Spec.StartingDeadlineSeconds < 0 { - return errors.Errorf(errors.CodeBadRequest, "startingDeadlineSeconds must be positive") - } - - wf := common.ConvertCronWorkflowToWorkflow(cronWf) - - err := ValidateWorkflow(wftmplGetter, cwftmplGetter, wf, ValidateOpts{}) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "cannot validate Workflow: %s", err) - } - return nil -} - -func (ctx *templateValidationCtx) validateInitContainers(containers []wfv1.UserContainer) error { - for _, container := range containers { - if len(container.Container.Name) == 0 { - return errors.Errorf(errors.CodeBadRequest, "initContainers must all have container name") - } - } - return nil -} - -func (ctx *templateValidationCtx) validateTemplate(tmpl *wfv1.Template, tmplCtx *templateresolution.Context, args wfv1.ArgumentsProvider, workflowTemplateValidation bool) error { - if err := validateTemplateType(tmpl); err != nil { - return err - } - - scope, err := validateInputs(tmpl) - if err != nil { - return err - } - - if err := ctx.validateInitContainers(tmpl.InitContainers); err != nil { - return err - } - - localParams := make(map[string]string) - if tmpl.IsPodType() { - localParams[common.LocalVarPodName] = placeholderGenerator.NextPlaceholder() - scope[common.LocalVarPodName] = placeholderGenerator.NextPlaceholder() - } - if tmpl.RetryStrategy != nil { - localParams[common.LocalVarRetries] = placeholderGenerator.NextPlaceholder() - localParams[common.LocalVarRetriesLastExitCode] = placeholderGenerator.NextPlaceholder() - localParams[common.LocalVarRetriesLastStatus] = placeholderGenerator.NextPlaceholder() - localParams[common.LocalVarRetriesLastDuration] = placeholderGenerator.NextPlaceholder() - scope[common.LocalVarRetries] = placeholderGenerator.NextPlaceholder() - scope[common.LocalVarRetriesLastExitCode] = placeholderGenerator.NextPlaceholder() - scope[common.LocalVarRetriesLastStatus] = placeholderGenerator.NextPlaceholder() - scope[common.LocalVarRetriesLastDuration] = placeholderGenerator.NextPlaceholder() - } - if tmpl.IsLeaf() { - for _, art := range tmpl.Outputs.Artifacts { - if art.Path != "" { - scope[fmt.Sprintf("outputs.artifacts.%s.path", art.Name)] = true - } - } - for _, param := range tmpl.Outputs.Parameters { - if param.ValueFrom != nil && param.ValueFrom.Path != "" { - scope[fmt.Sprintf("outputs.parameters.%s.path", param.Name)] = true - } - } - } - - newTmpl, err := common.ProcessArgs(tmpl, args, ctx.globalParams, localParams, true, "", nil) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s %s", tmpl.Name, err) - } - - if newTmpl.Timeout != "" { - if !newTmpl.IsLeaf() { - return fmt.Errorf("%s template doesn't support timeout field.", newTmpl.GetType()) - } - // Check timeout should not be a whole number - _, err := strconv.Atoi(newTmpl.Timeout) - if err == nil { - return fmt.Errorf("%s has invalid duration format in timeout.", newTmpl.Name) - } - - } - - tmplID := getTemplateID(tmpl) - _, ok := ctx.results[tmplID] - if ok { - // we can skip the rest since it has been validated. - return nil - } - ctx.results[tmplID] = true - - for globalVar, val := range ctx.globalParams { - scope[globalVar] = val - } - switch newTmpl.GetType() { - case wfv1.TemplateTypeSteps: - err = ctx.validateSteps(scope, tmplCtx, newTmpl, workflowTemplateValidation) - case wfv1.TemplateTypeDAG: - err = ctx.validateDAG(scope, tmplCtx, newTmpl, workflowTemplateValidation) - default: - err = ctx.validateLeaf(scope, newTmpl, workflowTemplateValidation) - } - if err != nil { - return err - } - err = validateOutputs(scope, ctx.globalParams, newTmpl, workflowTemplateValidation) - if err != nil { - return err - } - if newTmpl.ArchiveLocation != nil { - errPrefix := fmt.Sprintf("templates.%s.archiveLocation", newTmpl.Name) - err = validateArtifactLocation(errPrefix, *newTmpl.ArchiveLocation) - if err != nil { - return err - } - } - if newTmpl.Metrics != nil { - for _, metric := range newTmpl.Metrics.Prometheus { - if !metrics.IsValidMetricName(metric.Name) { - return errors.Errorf(errors.CodeBadRequest, "templates.%s metric name '%s' is invalid. Metric names must contain alphanumeric characters, '_', or ':'", tmpl.Name, metric.Name) - } - if err := metrics.ValidateMetricLabels(metric.GetMetricLabels()); err != nil { - return err - } - if metric.Help == "" { - return errors.Errorf(errors.CodeBadRequest, "templates.%s metric '%s' must contain a help string under 'help: ' field", tmpl.Name, metric.Name) - } - if err := metrics.ValidateMetricValues(metric); err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s metric '%s' error: %s", tmpl.Name, metric.Name, err) - } - } - } - return nil -} - -// validateTemplateHolder validates a template holder and returns the validated template. -func (ctx *templateValidationCtx) validateTemplateHolder(tmplHolder wfv1.TemplateReferenceHolder, tmplCtx *templateresolution.Context, args wfv1.ArgumentsProvider, workflowTemplateValidation bool) (*wfv1.Template, error) { - tmplRef := tmplHolder.GetTemplateRef() - tmplName := tmplHolder.GetTemplateName() - if tmplRef != nil { - if tmplName != "" { - return nil, errors.New(errors.CodeBadRequest, "template name cannot be specified with templateRef.") - } - if tmplRef.Name == "" { - return nil, errors.New(errors.CodeBadRequest, "resource name is required") - } - if tmplRef.Template == "" { - return nil, errors.New(errors.CodeBadRequest, "template name is required") - } - } else if tmplName != "" { - _, err := tmplCtx.GetTemplateByName(tmplName) - if err != nil { - if argoerr, ok := err.(errors.ArgoError); ok && argoerr.Code() == errors.CodeNotFound { - return nil, errors.Errorf(errors.CodeBadRequest, "template name '%s' undefined", tmplName) - } - return nil, err - } - } - - tmplCtx, resolvedTmpl, _, err := tmplCtx.ResolveTemplate(tmplHolder) - if err != nil { - if argoerr, ok := err.(errors.ArgoError); ok && argoerr.Code() == errors.CodeNotFound { - if tmplRef != nil { - return nil, errors.Errorf(errors.CodeBadRequest, "template reference %s.%s not found", tmplRef.Name, tmplRef.Template) - } - // this error should not occur. - return nil, errors.InternalWrapError(err) - } - return nil, err - } - - // Validate retryStrategy - if resolvedTmpl.RetryStrategy != nil { - switch resolvedTmpl.RetryStrategy.RetryPolicy { - case wfv1.RetryPolicyAlways, wfv1.RetryPolicyOnError, wfv1.RetryPolicyOnFailure, wfv1.RetryPolicyOnTransientError, "": - // Passes validation - default: - return nil, fmt.Errorf("%s is not a valid RetryPolicy", resolvedTmpl.RetryStrategy.RetryPolicy) - } - } - - return resolvedTmpl, ctx.validateTemplate(resolvedTmpl, tmplCtx, args, workflowTemplateValidation) -} - -// validateTemplateType validates that only one template type is defined -func validateTemplateType(tmpl *wfv1.Template) error { - numTypes := 0 - for _, tmplType := range []interface{}{tmpl.Container, tmpl.ContainerSet, tmpl.Steps, tmpl.Script, tmpl.Resource, tmpl.DAG, tmpl.Suspend, tmpl.Data, tmpl.HTTP, tmpl.Plugin} { - if !reflect.ValueOf(tmplType).IsNil() { - numTypes++ - } - } - switch numTypes { - case 0: - return errors.Errorf(errors.CodeBadRequest, "templates.%s template type unspecified. choose one of: container, containerSet, steps, script, resource, dag, suspend, template, template ref", tmpl.Name) - case 1: - // Do nothing - default: - return errors.Errorf(errors.CodeBadRequest, "templates.%s multiple template types specified. choose one of: container, containerSet, steps, script, resource, dag, suspend, template, template ref", tmpl.Name) - } - return nil -} - -func validateInputs(tmpl *wfv1.Template) (map[string]interface{}, error) { - err := validateWorkflowFieldNames(tmpl.Inputs.Parameters) - if err != nil { - return nil, errors.Errorf(errors.CodeBadRequest, "templates.%s.inputs.parameters%s", tmpl.Name, err.Error()) - } - err = validateWorkflowFieldNames(tmpl.Inputs.Artifacts) - if err != nil { - return nil, errors.Errorf(errors.CodeBadRequest, "templates.%s.inputs.artifacts%s", tmpl.Name, err.Error()) - } - scope := make(map[string]interface{}) - for _, param := range tmpl.Inputs.Parameters { - scope[fmt.Sprintf("inputs.parameters.%s", param.Name)] = true - } - if len(tmpl.Inputs.Parameters) > 0 { - scope["inputs.parameters"] = true - } - - for _, art := range tmpl.Inputs.Artifacts { - artRef := fmt.Sprintf("inputs.artifacts.%s", art.Name) - scope[artRef] = true - if tmpl.IsLeaf() { - err = art.CleanPath() - if err != nil { - return nil, errors.Errorf(errors.CodeBadRequest, "error in templates.%s.%s: %s", tmpl.Name, artRef, err.Error()) - } - scope[fmt.Sprintf("inputs.artifacts.%s.path", art.Name)] = true - } else { - if art.Path != "" { - return nil, errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.path only valid in container/script templates", tmpl.Name, artRef) - } - } - if art.From != "" { - return nil, errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.from not valid in inputs", tmpl.Name, artRef) - } - errPrefix := fmt.Sprintf("templates.%s.%s", tmpl.Name, artRef) - err = validateArtifactLocation(errPrefix, art.ArtifactLocation) - if err != nil { - return nil, err - } - } - return scope, nil -} - -func validateArtifactLocation(errPrefix string, art wfv1.ArtifactLocation) error { - if art.Git != nil { - if art.Git.Repo == "" { - return errors.Errorf(errors.CodeBadRequest, "%s.git.repo is required", errPrefix) - } - } - if art.HDFS != nil { - err := hdfs.ValidateArtifact(fmt.Sprintf("%s.hdfs", errPrefix), art.HDFS) - if err != nil { - return err - } - } - // TODO: validate other artifact locations - return nil -} - -// resolveAllVariables is a helper to ensure all {{variables}} are resolvable from current scope -func resolveAllVariables(scope map[string]interface{}, globalParams map[string]string, tmplStr string, workflowTemplateValidation bool) error { - _, allowAllItemRefs := scope[anyItemMagicValue] // 'item.*' is a magic placeholder value set by addItemsToScope - _, allowAllWorkflowOutputParameterRefs := scope[anyWorkflowOutputParameterMagicValue] - _, allowAllWorkflowOutputArtifactRefs := scope[anyWorkflowOutputArtifactMagicValue] - return template.Validate(tmplStr, func(tag string) error { - // Skip the custom variable references - if !checkValidWorkflowVariablePrefix(tag) { - return nil - } - _, ok := scope[tag] - _, isGlobal := globalParams[tag] - if !ok && !isGlobal { - if (tag == "item" || strings.HasPrefix(tag, "item.")) && allowAllItemRefs { - // we are *probably* referencing a undetermined item using withParam - // NOTE: this is far from foolproof. - } else if strings.HasPrefix(tag, "workflow.outputs.parameters.") && allowAllWorkflowOutputParameterRefs { - // Allow runtime resolution of workflow output parameter names - } else if strings.HasPrefix(tag, "workflow.outputs.artifacts.") && allowAllWorkflowOutputArtifactRefs { - // Allow runtime resolution of workflow output artifact names - } else if strings.HasPrefix(tag, "outputs.") { - // We are self referencing for metric emission, allow it. - } else if strings.HasPrefix(tag, common.GlobalVarWorkflowCreationTimestamp) { - } else if strings.HasPrefix(tag, common.GlobalVarWorkflowCronScheduleTime) { - // Allow runtime resolution for "scheduledTime" which will pass from CronWorkflow - } else if strings.HasPrefix(tag, common.GlobalVarWorkflowDuration) { - } else if strings.HasPrefix(tag, "tasks.name") { - } else if strings.HasPrefix(tag, "steps.name") { - } else if strings.HasPrefix(tag, "node.name") { - } else if strings.HasPrefix(tag, "workflow.parameters") && workflowTemplateValidation { - // If we are simply validating a WorkflowTemplate in isolation, some of the parameters may come from the Workflow that uses it - } else { - return fmt.Errorf("failed to resolve {{%s}}", tag) - } - } - return nil - }) -} - -// checkValidWorkflowVariablePrefix is a helper methood check variable starts workflow root elements -func checkValidWorkflowVariablePrefix(tag string) bool { - for _, rootTag := range common.GlobalVarValidWorkflowVariablePrefix { - if strings.HasPrefix(tag, rootTag) { - return true - } - } - return false -} - -func validateNonLeaf(tmpl *wfv1.Template) error { - if tmpl.ActiveDeadlineSeconds != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.activeDeadlineSeconds is only valid for leaf templates", tmpl.Name) - } - return nil -} - -func (ctx *templateValidationCtx) validateLeaf(scope map[string]interface{}, tmpl *wfv1.Template, workflowTemplateValidation bool) error { - tmplBytes, err := json.Marshal(tmpl) - if err != nil { - return errors.InternalWrapError(err) - } - err = resolveAllVariables(scope, ctx.globalParams, string(tmplBytes), workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s: %s", tmpl.Name, err.Error()) - } - if tmpl.Container != nil { - // Ensure there are no collisions with volume mountPaths and artifact load paths - mountPaths := make(map[string]string) - for i, volMount := range tmpl.Container.VolumeMounts { - if prev, ok := mountPaths[volMount.MountPath]; ok { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.container.volumeMounts[%d].mountPath '%s' already mounted in %s", tmpl.Name, i, volMount.MountPath, prev) - } - mountPaths[volMount.MountPath] = fmt.Sprintf("container.volumeMounts.%s", volMount.Name) - } - for i, art := range tmpl.Inputs.Artifacts { - if prev, ok := mountPaths[art.Path]; ok { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.inputs.artifacts[%d].path '%s' already mounted in %s", tmpl.Name, i, art.Path, prev) - } - mountPaths[art.Path] = fmt.Sprintf("inputs.artifacts.%s", art.Name) - } - if tmpl.Container.Image == "" { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.container.image may not be empty", tmpl.Name) - } - } - if tmpl.ContainerSet != nil { - err = tmpl.ContainerSet.Validate() - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.containerSet.%s", tmpl.Name, err.Error()) - } - if len(tmpl.Inputs.Artifacts) > 0 || len(tmpl.Outputs.Parameters) > 0 || len(tmpl.Outputs.Artifacts) > 0 { - if !tmpl.ContainerSet.HasContainerNamed("main") { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.containerSet.containers must have a container named \"main\" for input or output", tmpl.Name) - } - } - - } - if tmpl.Resource != nil { - if !placeholderGenerator.IsPlaceholder(tmpl.Resource.Action) { - switch tmpl.Resource.Action { - case "get", "create", "apply", "delete", "replace", "patch": - // OK - default: - return errors.Errorf(errors.CodeBadRequest, "templates.%s.resource.action must be one of: get, create, apply, delete, replace, patch", tmpl.Name) - } - } - if tmpl.Resource.Action != "delete" && tmpl.Resource.Action != "get" { - if tmpl.Resource.Manifest == "" && tmpl.Resource.ManifestFrom == nil { - return errors.Errorf(errors.CodeBadRequest, "either templates.%s.resource.manifest or templates.%s.resource.manifestFrom must be specified", tmpl.Name, tmpl.Name) - } - if tmpl.Resource.Manifest != "" && tmpl.Resource.ManifestFrom != nil { - return errors.Errorf(errors.CodeBadRequest, "shouldn't have both `manifest` and `manifestFrom` specified in `Manifest` for resource template") - } - if tmpl.Resource.ManifestFrom != nil && tmpl.Resource.ManifestFrom.Artifact != nil { - var found bool - for _, art := range tmpl.Inputs.Artifacts { - if tmpl.Resource.ManifestFrom.Artifact.Name == art.Name { - found = true - break - } - } - if !found { - return errors.Errorf(errors.CodeBadRequest, "artifact %s in `manifestFrom` refer to a non-exist artifact", tmpl.Resource.ManifestFrom.Artifact.Name) - } - } - if tmpl.Resource.Manifest != "" && !placeholderGenerator.IsPlaceholder(tmpl.Resource.Manifest) { - // Try to unmarshal the given manifest, just ensuring it's a valid YAML. - var obj interface{} - err := yaml.Unmarshal([]byte(tmpl.Resource.Manifest), &obj) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.resource.manifest must be a valid yaml", tmpl.Name) - } - } - } - } - if tmpl.Script != nil { - if tmpl.Script.Image == "" { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.script.image may not be empty", tmpl.Name) - } - } - // we don't validate tmpl.Plugin, because this is done by Plugin.UnmarshallJSON - if tmpl.ActiveDeadlineSeconds != nil { - if !intstr.IsValidIntOrArgoVariable(tmpl.ActiveDeadlineSeconds) && !placeholderGenerator.IsPlaceholder(tmpl.ActiveDeadlineSeconds.StrVal) { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.activeDeadlineSeconds must be a positive integer > 0 or an argo variable", tmpl.Name) - } - if i, err := intstr.Int(tmpl.ActiveDeadlineSeconds); err == nil && i != nil && *i < 0 { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.activeDeadlineSeconds must be a positive integer > 0 or an argo variable", tmpl.Name) - } - } - if tmpl.Parallelism != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.parallelism is only valid for steps and dag templates", tmpl.Name) - } - return nil -} - -func validateArguments(prefix string, arguments wfv1.Arguments, allowEmptyValues bool) error { - err := validateArgumentsFieldNames(prefix, arguments) - if err != nil { - return err - } - return validateArgumentsValues(prefix, arguments, allowEmptyValues) -} - -func validateArgumentsFieldNames(prefix string, arguments wfv1.Arguments) error { - fieldToSlices := map[string]interface{}{ - "parameters": arguments.Parameters, - "artifacts": arguments.Artifacts, - } - for fieldName, lst := range fieldToSlices { - err := validateWorkflowFieldNames(lst) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "%s%s%s", prefix, fieldName, err.Error()) - } - } - return nil -} - -// validateArgumentsValues ensures that all arguments have parameter values or artifact locations -func validateArgumentsValues(prefix string, arguments wfv1.Arguments, allowEmptyValues bool) error { - for _, param := range arguments.Parameters { - if param.ValueFrom == nil && param.Value == nil { - if !allowEmptyValues { - return errors.Errorf(errors.CodeBadRequest, "%s%s.value is required", prefix, param.Name) - } - } - if param.Enum != nil { - if len(param.Enum) == 0 { - return errors.Errorf(errors.CodeBadRequest, "%s%s.enum should contain at least one value", prefix, param.Name) - } - if param.Value == nil { - return errors.Errorf(errors.CodeBadRequest, "%s%s.value is required", prefix, param.Name) - } - valueSpecifiedInEnumList := false - for _, enum := range param.Enum { - if enum == *param.Value { - valueSpecifiedInEnumList = true - break - } - } - if !valueSpecifiedInEnumList { - return errors.Errorf(errors.CodeBadRequest, "%s%s.value should be present in %s%s.enum list", prefix, param.Name, prefix, param.Name) - } - } - } - for _, art := range arguments.Artifacts { - if art.From == "" && !art.HasLocationOrKey() { - return errors.Errorf(errors.CodeBadRequest, "%s%s.from, artifact location, or key is required", prefix, art.Name) - } - if art.From != "" && art.FromExpression != "" { - return errors.Errorf(errors.CodeBadRequest, "%s%s shouldn't have both `from` and `fromExpression` in Artifact", prefix, art.Name) - } - } - return nil -} - -func (ctx *templateValidationCtx) validateSteps(scope map[string]interface{}, tmplCtx *templateresolution.Context, tmpl *wfv1.Template, workflowTemplateValidation bool) error { - err := validateNonLeaf(tmpl) - if err != nil { - return err - } - stepNames := make(map[string]bool) - resolvedTemplates := make(map[string]*wfv1.Template) - for i, stepGroup := range tmpl.Steps { - for _, step := range stepGroup.Steps { - if step.Name == "" { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].name is required", tmpl.Name, i) - } - _, ok := stepNames[step.Name] - if ok { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].name '%s' is not unique", tmpl.Name, i, step.Name) - } - if errs := isValidWorkflowFieldName(step.Name); len(errs) != 0 { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].name '%s' is invalid: %s", tmpl.Name, i, step.Name, strings.Join(errs, ";")) - } - stepNames[step.Name] = true - prefix := fmt.Sprintf("steps.%s", step.Name) - scope[fmt.Sprintf("%s.status", prefix)] = true - err := addItemsToScope(step.WithItems, step.WithParam, step.WithSequence, scope) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].%s %s", tmpl.Name, i, step.Name, err.Error()) - } - err = validateArguments(fmt.Sprintf("templates.%s.steps[%d].%s.arguments.", tmpl.Name, i, step.Name), step.Arguments, false) - if err != nil { - return err - } - resolvedTmpl, err := ctx.validateTemplateHolder(&step, tmplCtx, &FakeArguments{}, workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].%s %s", tmpl.Name, i, step.Name, err.Error()) - } - - if step.HasExitHook() { - ctx.addOutputsToScope(resolvedTmpl, fmt.Sprintf("steps.%s", step.Name), scope, false, false) - } - resolvedTemplates[step.Name] = resolvedTmpl - } - - stepBytes, err := json.Marshal(stepGroup) - if err != nil { - return errors.InternalWrapError(err) - } - err = resolveAllVariables(scope, ctx.globalParams, string(stepBytes), workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps %s", tmpl.Name, err.Error()) - } - - for _, step := range stepGroup.Steps { - aggregate := len(step.WithItems) > 0 || step.WithParam != "" - resolvedTmpl := resolvedTemplates[step.Name] - ctx.addOutputsToScope(resolvedTmpl, fmt.Sprintf("steps.%s", step.Name), scope, aggregate, false) - - // Validate the template again with actual arguments. - _, err = ctx.validateTemplateHolder(&step, tmplCtx, &step.Arguments, workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].%s %s", tmpl.Name, i, step.Name, err.Error()) - } - } - } - return nil -} - -func addItemsToScope(withItems []wfv1.Item, withParam string, withSequence *wfv1.Sequence, scope map[string]interface{}) error { - defined := 0 - if len(withItems) > 0 { - defined++ - } - if withParam != "" { - defined++ - } - if withSequence != nil { - defined++ - } - if defined > 1 { - return fmt.Errorf("only one of withItems, withParam, withSequence can be specified") - } - if len(withItems) > 0 { - for i := range withItems { - val := withItems[i] - switch val.GetType() { - case wfv1.String, wfv1.Number, wfv1.Bool: - scope["item"] = true - case wfv1.List: - for i := range val.GetListVal() { - scope[fmt.Sprintf("item.[%v]", i)] = true - } - case wfv1.Map: - for itemKey := range val.GetMapVal() { - scope[fmt.Sprintf("item.%s", itemKey)] = true - } - default: - return fmt.Errorf("unsupported withItems type: %v", val) - } - } - } else if withParam != "" { - scope["item"] = true - // 'item.*' is magic placeholder value which resolveAllVariables() will look for - // when considering if all variables are resolveable. - scope[anyItemMagicValue] = true - } else if withSequence != nil { - if withSequence.Count != nil && withSequence.End != nil { - return errors.New(errors.CodeBadRequest, "only one of count or end can be defined in withSequence") - } - scope["item"] = true - } - return nil -} - -func (ctx *templateValidationCtx) addOutputsToScope(tmpl *wfv1.Template, prefix string, scope map[string]interface{}, aggregate bool, isAncestor bool) { - scope[fmt.Sprintf("%s.id", prefix)] = true - scope[fmt.Sprintf("%s.startedAt", prefix)] = true - scope[fmt.Sprintf("%s.finishedAt", prefix)] = true - if tmpl.Daemon != nil && *tmpl.Daemon { - scope[fmt.Sprintf("%s.ip", prefix)] = true - } - if tmpl.HasOutput() { - scope[fmt.Sprintf("%s.outputs.result", prefix)] = true - scope[fmt.Sprintf("%s.exitCode", prefix)] = true - } - for _, param := range tmpl.Outputs.Parameters { - scope[fmt.Sprintf("%s.outputs.parameters.%s", prefix, param.Name)] = true - if param.GlobalName != "" { - if !isParameter(param.GlobalName) { - globalParamName := fmt.Sprintf("workflow.outputs.parameters.%s", param.GlobalName) - scope[globalParamName] = true - ctx.globalParams[globalParamName] = placeholderGenerator.NextPlaceholder() - } else { - logrus.Warnf("GlobalName '%s' is a parameter and won't be validated until runtime", param.GlobalName) - scope[anyWorkflowOutputParameterMagicValue] = true - } - } - } - for _, art := range tmpl.Outputs.Artifacts { - scope[fmt.Sprintf("%s.outputs.artifacts.%s", prefix, art.Name)] = true - if art.GlobalName != "" { - if !isParameter(art.GlobalName) { - globalArtName := fmt.Sprintf("workflow.outputs.artifacts.%s", art.GlobalName) - scope[globalArtName] = true - ctx.globalParams[globalArtName] = placeholderGenerator.NextPlaceholder() - } else { - logrus.Warnf("GlobalName '%s' is a parameter and won't be validated until runtime", art.GlobalName) - scope[anyWorkflowOutputArtifactMagicValue] = true - } - } - } - if aggregate { - switch tmpl.GetType() { - // Not that we don't also include TemplateTypeContainer here, even though it uses `outputs.result` it uses - // `outputs.parameters` as its aggregator. - case wfv1.TemplateTypeScript, wfv1.TemplateTypeContainerSet: - scope[fmt.Sprintf("%s.outputs.result", prefix)] = true - scope[fmt.Sprintf("%s.exitCode", prefix)] = true - scope[fmt.Sprintf("%s.outputs.parameters", prefix)] = true - default: - scope[fmt.Sprintf("%s.outputs.parameters", prefix)] = true - } - } - if isAncestor { - scope[fmt.Sprintf("%s.status", prefix)] = true - } -} - -func validateOutputs(scope map[string]interface{}, globalParams map[string]string, tmpl *wfv1.Template, workflowTemplateValidation bool) error { - err := validateWorkflowFieldNames(tmpl.Outputs.Parameters) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs.parameters %s", tmpl.Name, err.Error()) - } - err = validateWorkflowFieldNames(tmpl.Outputs.Artifacts) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs.artifacts %s", tmpl.Name, err.Error()) - } - outputBytes, err := json.Marshal(tmpl.Outputs) - if err != nil { - return errors.InternalWrapError(err) - } - err = resolveAllVariables(scope, globalParams, string(outputBytes), workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.outputs %s", tmpl.Name, err.Error()) - } - - for _, art := range tmpl.Outputs.Artifacts { - artRef := fmt.Sprintf("outputs.artifacts.%s", art.Name) - if tmpl.IsLeaf() { - err = art.CleanPath() - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "error in templates.%s.%s: %s", tmpl.Name, artRef, err.Error()) - } - } else { - if art.Path != "" { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.path only valid in container/script templates", tmpl.Name, artRef) - } - } - if art.GlobalName != "" && !isParameter(art.GlobalName) { - errs := isValidParamOrArtifactName(art.GlobalName) - if len(errs) > 0 { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.%s.globalName: %s", tmpl.Name, artRef, errs[0]) - } - } - } - for _, param := range tmpl.Outputs.Parameters { - paramRef := fmt.Sprintf("templates.%s.outputs.parameters.%s", tmpl.Name, param.Name) - err = validateOutputParameter(paramRef, ¶m) - if err != nil { - return err - } - if param.ValueFrom != nil { - tmplType := tmpl.GetType() - switch tmplType { - case wfv1.TemplateTypeContainer, wfv1.TemplateTypeContainerSet, wfv1.TemplateTypeScript: - if param.ValueFrom.Path == "" { - return errors.Errorf(errors.CodeBadRequest, "%s.path must be specified for %s templates", paramRef, tmplType) - } - case wfv1.TemplateTypeResource: - if param.ValueFrom.JQFilter == "" && param.ValueFrom.JSONPath == "" { - return errors.Errorf(errors.CodeBadRequest, "%s .jqFilter or jsonPath must be specified for %s templates", paramRef, tmplType) - } - case wfv1.TemplateTypeDAG, wfv1.TemplateTypeSteps: - if param.ValueFrom.Parameter == "" && param.ValueFrom.Expression == "" { - return errors.Errorf(errors.CodeBadRequest, "%s.parameter or expression must be specified for %s templates", paramRef, tmplType) - } - if param.ValueFrom.Expression != "" && param.ValueFrom.Parameter != "" { - return errors.Errorf(errors.CodeBadRequest, "%s shouldn't have both `from` and `expression` specified in `ValueFrom` for %s templates", paramRef, tmplType) - } - } - } - if param.GlobalName != "" && !isParameter(param.GlobalName) { - errs := isValidParamOrArtifactName(param.GlobalName) - if len(errs) > 0 { - return errors.Errorf(errors.CodeBadRequest, "%s.globalName: %s", paramRef, errs[0]) - } - } - } - return nil -} - -// validateOutputParameter verifies that only one of valueFrom is defined in an output -func validateOutputParameter(paramRef string, param *wfv1.Parameter) error { - if param.ValueFrom != nil && param.Value != nil { - return errors.Errorf(errors.CodeBadRequest, "%s has both valueFrom and value specified. Choose one.", paramRef) - } - if param.Value != nil { - return nil - } - if param.ValueFrom == nil { - return errors.Errorf(errors.CodeBadRequest, "%s does not have valueFrom or value specified", paramRef) - } - paramTypes := 0 - for _, value := range []string{param.ValueFrom.Path, param.ValueFrom.JQFilter, param.ValueFrom.JSONPath, param.ValueFrom.Parameter, param.ValueFrom.Expression} { - if value != "" { - paramTypes++ - } - } - if param.ValueFrom.Supplied != nil { - paramTypes++ - } - switch paramTypes { - case 0: - return errors.New(errors.CodeBadRequest, "valueFrom type unspecified. choose one of: path, jqFilter, jsonPath, parameter, raw, expression") - case 1: - default: - return errors.New(errors.CodeBadRequest, "multiple valueFrom types specified. choose one of: path, jqFilter, jsonPath, parameter, raw") - } - return nil -} - -// validateWorkflowFieldNames accepts a slice of structs and -// verifies that the Name field of the structs are: -// * unique -// * non-empty -// * matches matches our regex requirements -func validateWorkflowFieldNames(slice interface{}) error { - s := reflect.ValueOf(slice) - if s.Kind() != reflect.Slice { - return errors.InternalErrorf("validateWorkflowFieldNames given a non-slice type") - } - items := make([]interface{}, s.Len()) - for i := 0; i < s.Len(); i++ { - items[i] = s.Index(i).Interface() - } - names := make(map[string]bool) - getNameFieldValue := func(val interface{}) (string, error) { - s := reflect.ValueOf(val) - for i := 0; i < s.NumField(); i++ { - typeField := s.Type().Field(i) - if typeField.Name == "Name" { - return s.Field(i).String(), nil - } - } - return "", errors.InternalError("No 'Name' field in struct") - } - - for i, item := range items { - name, err := getNameFieldValue(item) - if err != nil { - return err - } - if name == "" { - return errors.Errorf(errors.CodeBadRequest, "[%d].name is required", i) - } - var errs []string - t := reflect.TypeOf(item) - if t == reflect.TypeOf(wfv1.Parameter{}) || t == reflect.TypeOf(wfv1.Artifact{}) { - errs = isValidParamOrArtifactName(name) - } else { - errs = isValidWorkflowFieldName(name) - } - if len(errs) != 0 { - return errors.Errorf(errors.CodeBadRequest, "[%d].name: '%s' is invalid: %s", i, name, strings.Join(errs, ";")) - } - _, ok := names[name] - if ok { - return errors.Errorf(errors.CodeBadRequest, "[%d].name '%s' is not unique", i, name) - } - names[name] = true - } - return nil -} - -type dagValidationContext struct { - tasks map[string]wfv1.DAGTask - dependencies map[string]map[string]common.DependencyType // map of DAG tasks, each one containing a map of [task it's dependent on] -> [dependency type] -} - -func (d *dagValidationContext) GetTask(taskName string) *wfv1.DAGTask { - task := d.tasks[taskName] - return &task -} - -func (d *dagValidationContext) GetTaskDependencies(taskName string) []string { - dependencies := d.GetTaskDependenciesWithDependencyTypes(taskName) - - var dependencyTasks []string - for task := range dependencies { - dependencyTasks = append(dependencyTasks, task) - } - - return dependencyTasks -} - -func (d *dagValidationContext) GetTaskDependenciesWithDependencyTypes(taskName string) map[string]common.DependencyType { - if dependencies, ok := d.dependencies[taskName]; ok { - return dependencies - } - task := d.GetTask(taskName) - dependencies, _ := common.GetTaskDependencies(task, d) - d.dependencies[taskName] = dependencies - return d.dependencies[taskName] -} - -func (d *dagValidationContext) GetTaskFinishedAtTime(taskName string) time.Time { - return time.Now() -} - -func (ctx *templateValidationCtx) validateDAG(scope map[string]interface{}, tmplCtx *templateresolution.Context, tmpl *wfv1.Template, workflowTemplateValidation bool) error { - err := validateNonLeaf(tmpl) - if err != nil { - return err - } - if len(tmpl.DAG.Tasks) == 0 { - return errors.Errorf(errors.CodeBadRequest, "templates.%s must have at least one task", tmpl.Name) - } - usingDepends := false - nameToTask := make(map[string]wfv1.DAGTask) - for _, task := range tmpl.DAG.Tasks { - if task.Depends != "" { - usingDepends = true - } - nameToTask[task.Name] = task - } - - dagValidationCtx := &dagValidationContext{ - tasks: nameToTask, - dependencies: make(map[string]map[string]common.DependencyType), - } - err = sortDAGTasks(tmpl, dagValidationCtx) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s sorting failed: %s", tmpl.Name, err.Error()) - } - - err = validateWorkflowFieldNames(tmpl.DAG.Tasks) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks%s", tmpl.Name, err.Error()) - } - - resolvedTemplates := make(map[string]*wfv1.Template) - - // Verify dependencies for all tasks can be resolved as well as template names - for _, task := range tmpl.DAG.Tasks { - - if (usingDepends || len(task.Dependencies) > 0) && '0' <= task.Name[0] && task.Name[0] <= '9' { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s name cannot begin with a digit when using either 'depends' or 'dependencies'", tmpl.Name, task.Name) - } - - if usingDepends && len(task.Dependencies) > 0 { - return errors.Errorf(errors.CodeBadRequest, "templates.%s cannot use both 'depends' and 'dependencies' in the same DAG template", tmpl.Name) - } - - if usingDepends && task.ContinueOn != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s cannot use 'continueOn' when using 'depends'. Instead use 'dep-task.Failed'/'dep-task.Errored'", tmpl.Name) - } - - resolvedTmpl, err := ctx.validateTemplateHolder(&task, tmplCtx, &FakeArguments{}, workflowTemplateValidation) - - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s %s", tmpl.Name, task.Name, err.Error()) - } - - resolvedTemplates[task.Name] = resolvedTmpl - - prefix := fmt.Sprintf("tasks.%s", task.Name) - aggregate := len(task.WithItems) > 0 || task.WithParam != "" - ctx.addOutputsToScope(resolvedTmpl, prefix, scope, aggregate, false) - - err = common.ValidateTaskResults(&task) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s %s", tmpl.Name, task.Name, err.Error()) - } - - for depName, depType := range dagValidationCtx.GetTaskDependenciesWithDependencyTypes(task.Name) { - task, ok := dagValidationCtx.tasks[depName] - if !ok { - return errors.Errorf(errors.CodeBadRequest, - "templates.%s.tasks.%s dependency '%s' not defined", - tmpl.Name, task.Name, depName) - } else if depType == common.DependencyTypeItems && len(task.WithItems) == 0 && task.WithParam == "" && task.WithSequence == nil { - return errors.Errorf(errors.CodeBadRequest, - "templates.%s.tasks.%s dependency '%s' uses an items-based condition such as .AnySucceeded or .AllFailed but does not contain any items", - tmpl.Name, task.Name, depName) - } - } - } - - if err = verifyNoCycles(tmpl, dagValidationCtx); err != nil { - return err - } - err = resolveAllVariables(scope, ctx.globalParams, tmpl.DAG.Target, workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.targets %s", tmpl.Name, err.Error()) - } - if err = validateDAGTargets(tmpl, dagValidationCtx.tasks); err != nil { - return err - } - - for _, task := range tmpl.DAG.Tasks { - resolvedTmpl := resolvedTemplates[task.Name] - // add all tasks outputs to scope so that a nested DAGs can have outputs - prefix := fmt.Sprintf("tasks.%s", task.Name) - // add self status reference for hooks - if task.Hooks != nil { - scope[fmt.Sprintf("%s.status", prefix)] = true - } - ctx.addOutputsToScope(resolvedTmpl, prefix, scope, false, false) - if task.HasExitHook() { - ctx.addOutputsToScope(resolvedTmpl, prefix, scope, false, false) - } - taskBytes, err := json.Marshal(task) - if err != nil { - return errors.InternalWrapError(err) - } - taskScope := make(map[string]interface{}) - for k, v := range scope { - taskScope[k] = v - } - ancestry := common.GetTaskAncestry(dagValidationCtx, task.Name) - for _, ancestor := range ancestry { - ancestorTask := dagValidationCtx.GetTask(ancestor) - resolvedTmpl := resolvedTemplates[ancestor] - ancestorPrefix := fmt.Sprintf("tasks.%s", ancestor) - aggregate := len(ancestorTask.WithItems) > 0 || ancestorTask.WithParam != "" - ctx.addOutputsToScope(resolvedTmpl, ancestorPrefix, taskScope, aggregate, true) - } - if i := task.Inline; i != nil { - for _, p := range i.Inputs.Parameters { - taskScope["inputs.parameters."+p.Name] = placeholderGenerator.NextPlaceholder() - } - } - - err = addItemsToScope(task.WithItems, task.WithParam, task.WithSequence, taskScope) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s %s", tmpl.Name, task.Name, err.Error()) - } - err = resolveAllVariables(taskScope, ctx.globalParams, string(taskBytes), workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s %s", tmpl.Name, task.Name, err.Error()) - } - err = validateArguments(fmt.Sprintf("templates.%s.tasks.%s.arguments.", tmpl.Name, task.Name), task.Arguments, false) - if err != nil { - return err - } - err = validateDAGTaskArgumentDependency(task.Arguments, ancestry) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s %s", tmpl.Name, task.Name, err.Error()) - } - // Validate the template again with actual arguments. - _, err = ctx.validateTemplateHolder(&task, tmplCtx, &task.Arguments, workflowTemplateValidation) - if err != nil { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.tasks.%s %s", tmpl.Name, task.Name, err.Error()) - } - } - - return nil -} - -func validateDAGTaskArgumentDependency(arguments wfv1.Arguments, ancestry []string) error { - ancestryMap := make(map[string]struct{}, len(ancestry)) - for _, a := range ancestry { - ancestryMap[a] = struct{}{} - } - - for _, param := range arguments.Parameters { - if param.Value == nil { - return errors.Errorf(errors.CodeBadRequest, "missing value for parameter '%s'", param.Name) - } - if strings.HasPrefix(param.Value.String(), "{{tasks.") { - // All parameter values should have been validated, so - // index 1 should exist. - refTaskName := strings.Split(param.Value.String(), ".")[1] - - if _, dependencyExists := ancestryMap[refTaskName]; !dependencyExists { - return errors.Errorf(errors.CodeBadRequest, "missing dependency '%s' for parameter '%s'", refTaskName, param.Name) - } - } - } - - for _, artifact := range arguments.Artifacts { - if strings.HasPrefix(artifact.From, "{{tasks.") { - // All parameter values should have been validated, so - // index 1 should exist. - refTaskName := strings.Split(artifact.From, ".")[1] - - if _, dependencyExists := ancestryMap[refTaskName]; !dependencyExists { - return errors.Errorf(errors.CodeBadRequest, "missing dependency '%s' for artifact '%s'", refTaskName, artifact.Name) - } - } - } - return nil -} - -func validateDAGTargets(tmpl *wfv1.Template, nameToTask map[string]wfv1.DAGTask) error { - if tmpl.DAG.Target == "" { - return nil - } - for _, targetName := range strings.Split(tmpl.DAG.Target, " ") { - if isParameter(targetName) { - continue - } - if _, ok := nameToTask[targetName]; !ok { - return errors.Errorf(errors.CodeBadRequest, "templates.%s.targets: target '%s' is not defined", tmpl.Name, targetName) - } - } - return nil -} - -// verifyNoCycles verifies there are no cycles in the DAG graph -func verifyNoCycles(tmpl *wfv1.Template, ctx *dagValidationContext) error { - visited := make(map[string]bool) - var noCyclesHelper func(taskName string, cycle []string) error - noCyclesHelper = func(taskName string, cycle []string) error { - if _, ok := visited[taskName]; ok { - return nil - } - task := ctx.GetTask(taskName) - for _, depName := range ctx.GetTaskDependencies(task.Name) { - for _, name := range cycle { - if name == depName { - return errors.Errorf(errors.CodeBadRequest, - "templates.%s.tasks dependency cycle detected: %s->%s", - tmpl.Name, strings.Join(cycle, "->"), name) - } - } - cycle = append(cycle, depName) - err := noCyclesHelper(depName, cycle) - if err != nil { - return err - } - cycle = cycle[0 : len(cycle)-1] - } - visited[taskName] = true - return nil - } - - for _, task := range tmpl.DAG.Tasks { - err := noCyclesHelper(task.Name, []string{}) - if err != nil { - return err - } - } - return nil -} - -func sortDAGTasks(tmpl *wfv1.Template, ctx *dagValidationContext) error { - taskMap := make(map[string]*wfv1.DAGTask, len(tmpl.DAG.Tasks)) - sortingGraph := make([]*sorting.TopologicalSortingNode, len(tmpl.DAG.Tasks)) - for index := range tmpl.DAG.Tasks { - task := tmpl.DAG.Tasks[index] - taskMap[task.Name] = &task - dependenciesMap, _ := common.GetTaskDependencies(&task, ctx) - var dependencies []string - for taskName := range dependenciesMap { - dependencies = append(dependencies, taskName) - } - sortingGraph[index] = &sorting.TopologicalSortingNode{ - NodeName: task.Name, - Dependencies: dependencies, - } - } - sortingResult, err := sorting.TopologicalSorting(sortingGraph) - if err != nil { - return err - } - tmpl.DAG.Tasks = make([]wfv1.DAGTask, len(tmpl.DAG.Tasks)) - for index, node := range sortingResult { - tmpl.DAG.Tasks[index] = *taskMap[node.NodeName] - } - return nil -} - -var ( - // paramRegex matches a parameter. e.g. {{inputs.parameters.blah}} - paramRegex = regexp.MustCompile(`{{[-a-zA-Z0-9]+(\.[-a-zA-Z0-9_]+)*}}`) - paramOrArtifactNameRegex = regexp.MustCompile(`^[-a-zA-Z0-9_]+[-a-zA-Z0-9_]*$`) - workflowFieldNameRegex = regexp.MustCompile("^" + workflowFieldNameFmt + "$") -) - -func isParameter(p string) bool { - return paramRegex.MatchString(p) -} - -func isValidParamOrArtifactName(p string) []string { - var errs []string - if !paramOrArtifactNameRegex.MatchString(p) { - return append(errs, "Parameter/Artifact name must consist of alpha-numeric characters, '_' or '-' e.g. my_param_1, MY-PARAM-1") - } - return errs -} - -const ( - workflowFieldNameFmt string = "[a-zA-Z0-9][-a-zA-Z0-9]*" - workflowFieldNameErrMsg string = "name must consist of alpha-numeric characters or '-', and must start with an alpha-numeric character" - workflowFieldMaxLength int = 128 -) - -// isValidWorkflowFieldName : workflow field name must consist of alpha-numeric characters or '-', and must start with an alpha-numeric character -func isValidWorkflowFieldName(name string) []string { - var errs []string - if len(name) > workflowFieldMaxLength { - errs = append(errs, apivalidation.MaxLenError(workflowFieldMaxLength)) - } - if !workflowFieldNameRegex.MatchString(name) { - msg := workflowFieldNameErrMsg + " (e.g. My-name1-2, 123-NAME)" - errs = append(errs, msg) - } - return errs -} - -func getTemplateID(tmpl *wfv1.Template) string { - return tmpl.Name -} diff --git a/vendor/github.com/argoproj/pkg/expr/function.go b/vendor/github.com/argoproj/pkg/expr/function.go deleted file mode 100644 index cce363710b4..00000000000 --- a/vendor/github.com/argoproj/pkg/expr/function.go +++ /dev/null @@ -1,108 +0,0 @@ -package expr - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/oliveagle/jsonpath" -) - -func GetExprEnvFunctionMap() map[string]interface{} { - return map[string]interface{}{ - "asInt": AsInt, - "asFloat": AsFloat, - "string": AsStr, - "jsonpath": JsonPath, - } -} - -func AsStr(val interface{}) interface{} { - return fmt.Sprintf("%v", val) -} - -func JsonPath(jsonStr string, path string) interface{} { - var jsonMap interface{} - err := json.Unmarshal([]byte(jsonStr), &jsonMap) - if err != nil { - panic(err) - } - value, err := jsonpath.JsonPathLookup(jsonMap, path) - if err != nil { - panic(err) - } - return value -} - -func AsInt(in interface{}) int64 { - switch i := in.(type) { - case float64: - return int64(i) - case float32: - return int64(i) - case int64: - return i - case int32: - return int64(i) - case int16: - return int64(i) - case int8: - return int64(i) - case int: - return int64(i) - case uint64: - return int64(i) - case uint32: - return int64(i) - case uint16: - return int64(i) - case uint8: - return int64(i) - case uint: - return int64(i) - case string: - inAsInt, err := strconv.ParseInt(i, 10, 64) - if err == nil { - return inAsInt - } - panic(err) - } - panic(fmt.Sprintf("asInt() not supported on %v %v", reflect.TypeOf(in), in)) -} - -func AsFloat(in interface{}) float64 { - switch i := in.(type) { - case float64: - return i - case float32: - return float64(i) - case int64: - return float64(i) - case int32: - return float64(i) - case int16: - return float64(i) - case int8: - return float64(i) - case int: - return float64(i) - case uint64: - return float64(i) - case uint32: - return float64(i) - case uint16: - return float64(i) - case uint8: - return float64(i) - case uint: - return float64(i) - case string: - inAsFloat, err := strconv.ParseFloat(i, 64) - if err == nil { - return inAsFloat - } - panic(err) - } - panic(fmt.Sprintf("asFloat() not supported on %v %v", reflect.TypeOf(in), in)) -} diff --git a/vendor/github.com/argoproj/pkg/file/file.go b/vendor/github.com/argoproj/pkg/file/file.go deleted file mode 100644 index b35c7ed0a14..00000000000 --- a/vendor/github.com/argoproj/pkg/file/file.go +++ /dev/null @@ -1,27 +0,0 @@ -package file - -import ( - "os" - - "github.com/pkg/errors" -) - -// IsDirectory returns whether or not the given file is a directory -func IsDirectory(path string) (bool, error) { - fileOrDir, err := os.Open(path) - if err != nil { - return false, errors.WithStack(err) - } - defer func() { _ = fileOrDir.Close() }() - stat, err := fileOrDir.Stat() - if err != nil { - return false, errors.WithStack(err) - } - return stat.IsDir(), nil -} - -// Exists returns whether or not a path exists -func Exists(path string) bool { - _, err := os.Stat(path) - return !os.IsNotExist(err) -} diff --git a/vendor/github.com/argoproj/pkg/json/json.go b/vendor/github.com/argoproj/pkg/json/json.go deleted file mode 100644 index 3cfe1c88848..00000000000 --- a/vendor/github.com/argoproj/pkg/json/json.go +++ /dev/null @@ -1,36 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" -) - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} - -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal is a convenience wrapper around json.Unmarshal to support json decode options -func Unmarshal(j []byte, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(bytes.NewReader(j)) - for _, opt := range opts { - d = opt(d) - } - return d.Decode(&o) -} - -// UnmarshalStrict is a convenience wrapper around json.Unmarshal with strict unmarshal options -func UnmarshalStrict(j []byte, o interface{}) error { - return Unmarshal(j, o, DisallowUnknownFields) -} - -// IsJSON tests whether or not the suppied byte array is valid JSON -func IsJSON(j []byte) bool { - var js json.RawMessage - return json.Unmarshal(j, &js) == nil -} diff --git a/vendor/github.com/colinmarc/hdfs/.gitignore b/vendor/github.com/colinmarc/hdfs/.gitignore deleted file mode 100644 index cb7391a9d27..00000000000 --- a/vendor/github.com/colinmarc/hdfs/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -build -hdfs -!hdfs/ -minicluster.log diff --git a/vendor/github.com/colinmarc/hdfs/.travis.yml b/vendor/github.com/colinmarc/hdfs/.travis.yml deleted file mode 100644 index de65cb28118..00000000000 --- a/vendor/github.com/colinmarc/hdfs/.travis.yml +++ /dev/null @@ -1,33 +0,0 @@ -sudo: required -language: go -go_import_path: github.com/colinmarc/hdfs -go: 1.11beta2 -env: -- PLATFORM=cdh5 -- PLATFORM=cdh5 KERBEROS=true -- PLATFORM=hdp2 -install: -- git clone https://github.com/sstephenson/bats $HOME/bats -- mkdir -p $HOME/bats/build -- "$HOME/bats/install.sh $HOME/bats/build" -- export PATH="$PATH:$HOME/bats/build/bin" -- ./travis-setup.sh $PLATFORM -before_script: -- export HADOOP_CONF_DIR="/etc/hadoop/conf" -- find protocol -name *.pb.go | xargs touch # so make doesn't try to regen protobuf files -script: make test -before_deploy: make release -deploy: - skip_cleanup: true - provider: releases - api_key: - secure: HgyYfxoZfsZhDNeeL4Myi85aeyei80hQL29VhQKqkFrcoKL4V4+fJo7uG5XfKLCU0nQrRA98EtQO6w8AD+ULn/Ez8DA/RHey3Ny5GzX2ZaQ35KiuM71jPcvggxh8e2EJ14txxm7TAnqCxP7p5sJggiU0xj2w3vDUUJp5Q+vP3WE= - file: gohdfs-*.tar.gz - file_glob: true - on: - repo: colinmarc/hdfs - tags: true - all_branches: true - condition: $PLATFORM = hdp2 -cache: -- "$HOME/bats" diff --git a/vendor/github.com/colinmarc/hdfs/CODE_OF_CONDUCT.md b/vendor/github.com/colinmarc/hdfs/CODE_OF_CONDUCT.md deleted file mode 100644 index e58da74292f..00000000000 --- a/vendor/github.com/colinmarc/hdfs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at colinmarc@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/colinmarc/hdfs/LICENSE.txt b/vendor/github.com/colinmarc/hdfs/LICENSE.txt deleted file mode 100644 index 457ab51eace..00000000000 --- a/vendor/github.com/colinmarc/hdfs/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2014 Colin Marc (colinmarc@gmail.com) - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/colinmarc/hdfs/Makefile b/vendor/github.com/colinmarc/hdfs/Makefile deleted file mode 100644 index 4ab8ad3bade..00000000000 --- a/vendor/github.com/colinmarc/hdfs/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -HADOOP_COMMON_PROTOS = $(shell find protocol/hadoop_common -name '*.proto') -HADOOP_HDFS_PROTOS = $(shell find protocol/hadoop_hdfs -name '*.proto') -GENERATED_PROTOS = $(shell echo "$(HADOOP_HDFS_PROTOS) $(HADOOP_COMMON_PROTOS)" | sed 's/\.proto/\.pb\.go/g') -SOURCES = $(shell find . -name '*.go') $(GENERATED_PROTOS) - -# Protobuf needs one of these for every 'import "foo.proto"' in .protoc files. -PROTO_MAPPING = MSecurity.proto=github.com/colinmarc/hdfs/protocol/hadoop_common - -TRAVIS_TAG ?= $(shell git rev-parse HEAD) -ARCH = $(shell go env GOOS)-$(shell go env GOARCH) -RELEASE_NAME = gohdfs-$(TRAVIS_TAG)-$(ARCH) - -all: hdfs - -%.pb.go: $(HADOOP_HDFS_PROTOS) $(HADOOP_COMMON_PROTOS) - protoc --go_out='$(PROTO_MAPPING):protocol/hadoop_common' -Iprotocol/hadoop_common -Iprotocol/hadoop_hdfs $(HADOOP_COMMON_PROTOS) - protoc --go_out='$(PROTO_MAPPING):protocol/hadoop_hdfs' -Iprotocol/hadoop_common -Iprotocol/hadoop_hdfs $(HADOOP_HDFS_PROTOS) - -clean-protos: - find . -name *.pb.go | xargs rm - -hdfs: clean $(SOURCES) - go build -ldflags "-X main.version=$(TRAVIS_TAG)" ./cmd/hdfs - -test: hdfs - go test -v -race ./... - bats ./cmd/hdfs/test/*.bats - -clean: - rm -f ./hdfs - rm -rf gohdfs-* - -release: hdfs - mkdir -p $(RELEASE_NAME) - cp hdfs README.md LICENSE.txt cmd/hdfs/bash_completion $(RELEASE_NAME)/ - tar -cvzf $(RELEASE_NAME).tar.gz $(RELEASE_NAME) - -.PHONY: clean clean-protos install test release diff --git a/vendor/github.com/colinmarc/hdfs/README.md b/vendor/github.com/colinmarc/hdfs/README.md deleted file mode 100644 index 11b6464723f..00000000000 --- a/vendor/github.com/colinmarc/hdfs/README.md +++ /dev/null @@ -1,131 +0,0 @@ -HDFS for Go -=========== - -[![GoDoc](https://godoc.org/github.com/colinmarc/hdfs/web?status.svg)](https://godoc.org/github.com/colinmarc/hdfs) [![build](https://travis-ci.org/colinmarc/hdfs.svg?branch=master)](https://travis-ci.org/colinmarc/hdfs) - -This is a native golang client for hdfs. It connects directly to the namenode using -the protocol buffers API. - -It tries to be idiomatic by aping the stdlib `os` package, where possible, and -implements the interfaces from it, including `os.FileInfo` and `os.PathError`. - -Here's what it looks like in action: - -```go -client, _ := hdfs.New("namenode:8020") - -file, _ := client.Open("/mobydick.txt") - -buf := make([]byte, 59) -file.ReadAt(buf, 48847) - -fmt.Println(string(buf)) -// => Abominable are the tumblers into which he pours his poison. -``` - -For complete documentation, check out the [Godoc][1]. - -The `hdfs` Binary ------------------ - -Along with the library, this repo contains a commandline client for HDFS. Like -the library, its primary aim is to be idiomatic, by enabling your favorite unix -verbs: - - - $ hdfs --help - Usage: hdfs COMMAND - The flags available are a subset of the POSIX ones, but should behave similarly. - - Valid commands: - ls [-lah] [FILE]... - rm [-rf] FILE... - mv [-fT] SOURCE... DEST - mkdir [-p] FILE... - touch [-amc] FILE... - chmod [-R] OCTAL-MODE FILE... - chown [-R] OWNER[:GROUP] FILE... - cat SOURCE... - head [-n LINES | -c BYTES] SOURCE... - tail [-n LINES | -c BYTES] SOURCE... - du [-sh] FILE... - checksum FILE... - get SOURCE [DEST] - getmerge SOURCE DEST - put SOURCE DEST - -Since it doesn't have to wait for the JVM to start up, it's also a lot faster -`hadoop -fs`: - - $ time hadoop fs -ls / > /dev/null - - real 0m2.218s - user 0m2.500s - sys 0m0.376s - - $ time hdfs ls / > /dev/null - - real 0m0.015s - user 0m0.004s - sys 0m0.004s - -Best of all, it comes with bash tab completion for paths! - -Installing the library ----------------------- - -To install the library, once you have Go [all set up][2]: - - $ go get -u github.com/colinmarc/hdfs - -Installing the commandline client ---------------------------------- - -Grab a tarball from the [releases page](https://github.com/colinmarc/hdfs/releases) -and unzip it wherever you like. - -To configure the client, make sure one or both of these environment variables -point to your Hadoop configuration (`core-site.xml` and `hdfs-site.xml`). On -systems with Hadoop installed, they should already be set. - - $ export HADOOP_HOME="/etc/hadoop" - $ export HADOOP_CONF_DIR="/etc/hadoop/conf" - -To install tab completion globally on linux, copy or link the `bash_completion` -file which comes with the tarball into the right place: - - $ ln -sT bash_completion /etc/bash_completion.d/gohdfs - -By default on non-kerberized clusters, the HDFS user is set to the -currently-logged-in user. You can override this with another environment -variable: - - $ export HADOOP_USER_NAME=username - -Using the commandline client with Kerberos authentication ---------------------------------------------------------- - -Like `hadoop fs`, the commandline client expects a `ccache` file in the default -location: `/tmp/krb5cc_`. That means it should 'just work' to use `kinit`: - - $ kinit bob@EXAMPLE.com - $ hdfs ls / - -If that doesn't work, try setting the `KRB5CCNAME` environment variable to -wherever you have the `ccache` saved. - -Compatibility -------------- - -This library uses "Version 9" of the HDFS protocol, which means it should work -with hadoop distributions based on 2.2.x and above. The tests run against CDH -5.x and HDP 2.x. - -Acknowledgements ----------------- - -This library is heavily indebted to [snakebite][3]. - -[1]: https://godoc.org/github.com/colinmarc/hdfs -[2]: https://golang.org/doc/install -[3]: https://github.com/spotify/snakebite diff --git a/vendor/github.com/colinmarc/hdfs/client.go b/vendor/github.com/colinmarc/hdfs/client.go deleted file mode 100644 index 690648b3666..00000000000 --- a/vendor/github.com/colinmarc/hdfs/client.go +++ /dev/null @@ -1,293 +0,0 @@ -package hdfs - -import ( - "context" - "errors" - "io" - "io/ioutil" - "net" - "os" - "os/user" - "strings" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - krb "gopkg.in/jcmturner/gokrb5.v5/client" -) - -// A Client represents a connection to an HDFS cluster -type Client struct { - namenode *rpc.NamenodeConnection - defaults *hdfs.FsServerDefaultsProto - options ClientOptions -} - -// ClientOptions represents the configurable options for a client. -// The NamenodeDialFunc and DatanodeDialFunc options can be used to set -// connection timeouts: -// -// dialFunc := (&net.Dialer{ -// Timeout: 30 * time.Second, -// KeepAlive: 30 * time.Second, -// DualStack: true, -// }).DialContext -// -// options := ClientOptions{ -// Addresses: []string{"nn1:9000"}, -// NamenodeDialFunc: dialFunc, -// DatanodeDialFunc: dialFunc, -// } -type ClientOptions struct { - // Addresses specifies the namenode(s) to connect to. - Addresses []string - // User specifies which HDFS user the client will act as. It is required - // unless kerberos authentication is enabled, in which case it will be - // determined from the provided credentials if empty. - User string - // UseDatanodeHostname specifies whether the client should connect to the - // datanodes via hostname (which is useful in multi-homed setups) or IP - // address, which may be required if DNS isn't available. - UseDatanodeHostname bool - // NamenodeDialFunc is used to connect to the datanodes. If nil, then - // (&net.Dialer{}).DialContext is used. - NamenodeDialFunc func(ctx context.Context, network, addr string) (net.Conn, error) - // DatanodeDialFunc is used to connect to the datanodes. If nil, then - // (&net.Dialer{}).DialContext is used. - DatanodeDialFunc func(ctx context.Context, network, addr string) (net.Conn, error) - // KerberosClient is used to connect to kerberized HDFS clusters. If provided, - // the client will always mutually athenticate when connecting to the - // namenode(s). - KerberosClient *krb.Client - // KerberosServicePrincipleName specifies the Service Principle Name - // (/) for the namenode(s). Like in the - // dfs.namenode.kerberos.principal property of core-site.xml, the special - // string '_HOST' can be substituted for the address of the namenode in a - // multi-namenode setup (for example: 'nn/_HOST'). It is required if - // KerberosClient is provided. - KerberosServicePrincipleName string - // Namenode optionally specifies an existing NamenodeConnection to wrap. This - // is useful if you needed to create the namenode net.Conn manually for - // whatever reason. - // - // Deprecated: use NamenodeDialFunc instead. - Namenode *rpc.NamenodeConnection -} - -// ClientOptionsFromConf attempts to load any relevant configuration options -// from the given Hadoop configuration and create a ClientOptions struct -// suitable for creating a Client. Currently this sets the following fields -// on the resulting ClientOptions: -// -// // Determined by fs.defaultFS (or the deprecated fs.default.name), or -// // fields beginning with dfs.namenode.rpc-address. -// Addresses []string -// -// // Determined by dfs.client.use.datanode.hostname. -// UseDatanodeHostname bool -// -// // Set to a non-nil but empty client (without credentials) if the value of -// // hadoop.security.authentication is 'kerberos'. It must then be replaced -// // with a credentialed Kerberos client. -// KerberosClient *krb.Client -// -// // Determined by dfs.namenode.kerberos.principal, with the realm -// // (everything after the first '@') chopped off. -// KerberosServicePrincipleName string -// -// Because of the way Kerberos can be forced by the Hadoop configuration but not -// actually configured, you should check for whether KerberosClient is set in -// the resulting ClientOptions before proceeding: -// -// options, _ := ClientOptionsFromConf(conf) -// if options.KerberosClient != nil { -// // Replace with a valid credentialed client. -// options.KerberosClient = getKerberosClient() -// } -func ClientOptionsFromConf(conf HadoopConf) (ClientOptions, error) { - namenodes, err := conf.Namenodes() - options := ClientOptions{Addresses: namenodes} - - options.UseDatanodeHostname = (conf["dfs.client.use.datanode.hostname"] == "true") - - if strings.ToLower(conf["hadoop.security.authentication"]) == "kerberos" { - // Set an empty KerberosClient here so that the user is forced to either - // unset it (disabling kerberos altogether) or replace it with a valid - // client. If the user does neither, NewClient will return an error. - options.KerberosClient = &krb.Client{} - } - - if conf["dfs.namenode.kerberos.principal"] != "" { - options.KerberosServicePrincipleName = strings.Split(conf["dfs.namenode.kerberos.principal"], "@")[0] - } - - return options, err -} - -// NewClient returns a connected Client for the given options, or an error if -// the client could not be created. -func NewClient(options ClientOptions) (*Client, error) { - var err error - if options.Namenode == nil { - if options.KerberosClient != nil && options.KerberosClient.Credentials == nil { - return nil, errors.New("kerberos enabled, but kerberos client is missing credentials") - } - - if options.KerberosClient != nil && options.KerberosServicePrincipleName == "" { - return nil, errors.New("kerberos enabled, but kerberos namenode SPN is not provided") - } - - if options.User == "" { - if options.KerberosClient != nil { - creds := options.KerberosClient.Credentials - options.User = creds.Username + "@" + creds.Realm - } else { - return nil, errors.New("user not specified") - } - } - - options.Namenode, err = rpc.NewNamenodeConnectionWithOptions( - rpc.NamenodeConnectionOptions{ - Addresses: options.Addresses, - User: options.User, - DialFunc: options.NamenodeDialFunc, - KerberosClient: options.KerberosClient, - KerberosServicePrincipleName: options.KerberosServicePrincipleName, - }, - ) - - if err != nil { - return nil, err - } - } - - return &Client{namenode: options.Namenode, options: options}, nil -} - -// New returns a connected Client, or an error if it can't connect. The user -// will be the current system user. Any relevantoptions (including the -// address(es) of the namenode(s), if an empty string is passed) will be loaded -// from the Hadoop configuration present at HADOOP_CONF_DIR. Note, however, -// that New will not attempt any Kerberos authentication; use NewClient if you -// need that. -func New(address string) (*Client, error) { - conf := LoadHadoopConf("") - options, err := ClientOptionsFromConf(conf) - if err != nil { - options = ClientOptions{} - } - - if address != "" { - options.Addresses = strings.Split(address, ",") - } - - u, err := user.Current() - if err != nil { - return nil, err - } - - options.User = u.Username - return NewClient(options) -} - -// NewForUser returns a connected Client with the user specified, or an error if -// it can't connect. -// -// Deprecated: Use NewClient with ClientOptions instead. -func NewForUser(address string, user string) (*Client, error) { - return NewClient(ClientOptions{ - Addresses: []string{address}, - User: user, - }) -} - -// NewForConnection returns Client with the specified, underlying rpc.NamenodeConnection. -// You can use rpc.WrapNamenodeConnection to wrap your own net.Conn. -// -// Deprecated: Use NewClient with ClientOptions instead. -func NewForConnection(namenode *rpc.NamenodeConnection) *Client { - client, _ := NewClient(ClientOptions{Namenode: namenode}) - return client -} - -// ReadFile reads the file named by filename and returns the contents. -func (c *Client) ReadFile(filename string) ([]byte, error) { - f, err := c.Open(filename) - if err != nil { - return nil, err - } - - defer f.Close() - return ioutil.ReadAll(f) -} - -// CopyToLocal copies the HDFS file specified by src to the local file at dst. -// If dst already exists, it will be overwritten. -func (c *Client) CopyToLocal(src string, dst string) error { - remote, err := c.Open(src) - if err != nil { - return err - } - defer remote.Close() - - local, err := os.Create(dst) - if err != nil { - return err - } - defer local.Close() - - _, err = io.Copy(local, remote) - return err -} - -// CopyToRemote copies the local file specified by src to the HDFS file at dst. -func (c *Client) CopyToRemote(src string, dst string) error { - local, err := os.Open(src) - if err != nil { - return err - } - defer local.Close() - - remote, err := c.Create(dst) - if err != nil { - return err - } - defer remote.Close() - - _, err = io.Copy(remote, local) - return err -} - -func (c *Client) fetchDefaults() (*hdfs.FsServerDefaultsProto, error) { - if c.defaults != nil { - return c.defaults, nil - } - - req := &hdfs.GetServerDefaultsRequestProto{} - resp := &hdfs.GetServerDefaultsResponseProto{} - - err := c.namenode.Execute("getServerDefaults", req, resp) - if err != nil { - return nil, err - } - - c.defaults = resp.GetServerDefaults() - return c.defaults, nil -} - -// Close terminates all underlying socket connections to remote server. -func (c *Client) Close() error { - return c.namenode.Close() -} - -// Username returns the current system user if it is not set. -// -// Deprecated: just use user.Current. Previous versions of this function would -// check the env variable HADOOP_USER_NAME; this functionality was removed. -func Username() (string, error) { - currentUser, err := user.Current() - if err != nil { - return "", err - } - - return currentUser.Username, nil -} diff --git a/vendor/github.com/colinmarc/hdfs/conf.go b/vendor/github.com/colinmarc/hdfs/conf.go deleted file mode 100644 index 8eb59ffbf11..00000000000 --- a/vendor/github.com/colinmarc/hdfs/conf.go +++ /dev/null @@ -1,91 +0,0 @@ -package hdfs - -import ( - "encoding/xml" - "errors" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "sort" - "strings" -) - -// Property is the struct representation of hadoop configuration -// key value pair. -type Property struct { - Name string `xml:"name"` - Value string `xml:"value"` -} - -type propertyList struct { - Property []Property `xml:"property"` -} - -// HadoopConf represents a map of all the key value configutation -// pairs found in a user's hadoop configuration files. -type HadoopConf map[string]string - -var errNoNamenodesInConf = errors.New("no namenode address(es) in configuration") - -// LoadHadoopConf returns a HadoopConf object representing configuration from -// the specified path, or finds the correct path in the environment. If -// path or the env variable HADOOP_CONF_DIR is specified, it should point -// directly to the directory where the xml files are. If neither is specified, -// ${HADOOP_HOME}/conf will be used. -func LoadHadoopConf(path string) HadoopConf { - if path == "" { - path = os.Getenv("HADOOP_CONF_DIR") - if path == "" { - path = filepath.Join(os.Getenv("HADOOP_HOME"), "conf") - } - } - - hadoopConf := make(HadoopConf) - for _, file := range []string{"core-site.xml", "hdfs-site.xml"} { - pList := propertyList{} - f, err := ioutil.ReadFile(filepath.Join(path, file)) - if err != nil { - continue - } - - err = xml.Unmarshal(f, &pList) - if err != nil { - continue - } - - for _, prop := range pList.Property { - hadoopConf[prop.Name] = prop.Value - } - } - - return hadoopConf -} - -// Namenodes returns the namenode hosts present in the configuration. The -// returned slice will be sorted and deduped. The values are loaded from -// fs.defaultFS (or the deprecated fs.default.name), or fields beginning with -// dfs.namenode.rpc-address. -func (conf HadoopConf) Namenodes() ([]string, error) { - nns := make(map[string]bool) - for key, value := range conf { - if strings.Contains(key, "fs.default") { - nnUrl, _ := url.Parse(value) - nns[nnUrl.Host] = true - } else if strings.HasPrefix(key, "dfs.namenode.rpc-address") { - nns[value] = true - } - } - - if len(nns) == 0 { - return nil, errNoNamenodesInConf - } - - keys := make([]string, 0, len(nns)) - for k, _ := range nns { - keys = append(keys, k) - } - - sort.Strings(keys) - return keys, nil -} diff --git a/vendor/github.com/colinmarc/hdfs/content_summary.go b/vendor/github.com/colinmarc/hdfs/content_summary.go deleted file mode 100644 index dd436d737f5..00000000000 --- a/vendor/github.com/colinmarc/hdfs/content_summary.go +++ /dev/null @@ -1,84 +0,0 @@ -package hdfs - -import ( - "os" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// ContentSummary represents a set of information about a file or directory in -// HDFS. It's provided directly by the namenode, and has no unix filesystem -// analogue. -type ContentSummary struct { - name string - contentSummary *hdfs.ContentSummaryProto -} - -// GetContentSummary returns a ContentSummary representing the named file or -// directory. The summary contains information about the entire tree rooted -// in the named file; for instance, it can return the total size of all -func (c *Client) GetContentSummary(name string) (*ContentSummary, error) { - cs, err := c.getContentSummary(name) - if err != nil { - err = &os.PathError{"content summary", name, err} - } - - return cs, err -} - -func (c *Client) getContentSummary(name string) (*ContentSummary, error) { - req := &hdfs.GetContentSummaryRequestProto{Path: proto.String(name)} - resp := &hdfs.GetContentSummaryResponseProto{} - - err := c.namenode.Execute("getContentSummary", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return nil, err - } - - return &ContentSummary{name, resp.GetSummary()}, nil -} - -// Size returns the total size of the named path, including any subdirectories. -func (cs *ContentSummary) Size() int64 { - return int64(cs.contentSummary.GetLength()) -} - -// SizeAfterReplication returns the total size of the named path, including any -// subdirectories. Unlike Size, it counts the total replicated size of each -// file, and represents the total on-disk footprint for a tree in HDFS. -func (cs *ContentSummary) SizeAfterReplication() int64 { - return int64(cs.contentSummary.GetSpaceConsumed()) -} - -// FileCount returns the number of files under the named path, including any -// subdirectories. If the named path is a file, FileCount returns 1. -func (cs *ContentSummary) FileCount() int { - return int(cs.contentSummary.GetFileCount()) -} - -// DirectoryCount returns the number of directories under the named one, -// including any subdirectories, and including the root directory itself. If -// the named path is a file, this returns 0. -func (cs *ContentSummary) DirectoryCount() int { - return int(cs.contentSummary.GetDirectoryCount()) -} - -// NameQuota returns the HDFS configured "name quota" for the named path. The -// name quota is a hard limit on the number of directories and files inside a -// directory; see http://goo.gl/sOSJmJ for more information. -func (cs *ContentSummary) NameQuota() int { - return int(cs.contentSummary.GetQuota()) -} - -// SpaceQuota returns the HDFS configured "name quota" for the named path. The -// name quota is a hard limit on the number of directories and files inside -// a directory; see http://goo.gl/sOSJmJ for more information. -func (cs *ContentSummary) SpaceQuota() int64 { - return int64(cs.contentSummary.GetSpaceQuota()) -} diff --git a/vendor/github.com/colinmarc/hdfs/exceptions.go b/vendor/github.com/colinmarc/hdfs/exceptions.go deleted file mode 100644 index d896a17b534..00000000000 --- a/vendor/github.com/colinmarc/hdfs/exceptions.go +++ /dev/null @@ -1,19 +0,0 @@ -package hdfs - -import "os" - -const ( - fileNotFoundException = "java.io.FileNotFoundException" - permissionDeniedException = "org.apache.hadoop.security.AccessControlException" -) - -func interpretException(exception string, err error) error { - switch exception { - case fileNotFoundException: - return os.ErrNotExist - case permissionDeniedException: - return os.ErrPermission - default: - return err - } -} diff --git a/vendor/github.com/colinmarc/hdfs/file_reader.go b/vendor/github.com/colinmarc/hdfs/file_reader.go deleted file mode 100644 index 592022d2c90..00000000000 --- a/vendor/github.com/colinmarc/hdfs/file_reader.go +++ /dev/null @@ -1,417 +0,0 @@ -package hdfs - -import ( - "crypto/md5" - "errors" - "fmt" - "io" - "os" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// A FileReader represents an existing file or directory in HDFS. It implements -// io.Reader, io.ReaderAt, io.Seeker, and io.Closer, and can only be used for -// reads. For writes, see FileWriter and Client.Create. -type FileReader struct { - client *Client - name string - info os.FileInfo - - blocks []*hdfs.LocatedBlockProto - blockReader *rpc.BlockReader - deadline time.Time - offset int64 - - readdirLast string - - closed bool -} - -// Open returns an FileReader which can be used for reading. -func (c *Client) Open(name string) (*FileReader, error) { - info, err := c.getFileInfo(name) - if err != nil { - return nil, &os.PathError{"open", name, err} - } - - return &FileReader{ - client: c, - name: name, - info: info, - closed: false, - }, nil -} - -// Name returns the name of the file. -func (f *FileReader) Name() string { - return f.info.Name() -} - -// Stat returns the FileInfo structure describing file. -func (f *FileReader) Stat() os.FileInfo { - return f.info -} - -// SetDeadline sets the deadline for future Read, ReadAt, and Checksum calls. A -// zero value for t means those calls will not time out. -func (f *FileReader) SetDeadline(t time.Time) error { - f.deadline = t - if f.blockReader != nil { - return f.blockReader.SetDeadline(t) - } - - // Return the error at connection time. - return nil -} - -// Checksum returns HDFS's internal "MD5MD5CRC32C" checksum for a given file. -// -// Internally to HDFS, it works by calculating the MD5 of all the CRCs (which -// are stored alongside the data) for each block, and then calculating the MD5 -// of all of those. -func (f *FileReader) Checksum() ([]byte, error) { - if f.info.IsDir() { - return nil, &os.PathError{ - "checksum", - f.name, - errors.New("is a directory"), - } - } - - if f.blocks == nil { - err := f.getBlocks() - if err != nil { - return nil, err - } - } - - // Hadoop calculates this by writing the checksums out to a byte array, which - // is automatically padded with zeroes out to the next power of 2 - // (with a minimum of 32)... and then takes the MD5 of that array, including - // the zeroes. This is pretty shady business, but we want to track - // the 'hadoop fs -checksum' behavior if possible. - paddedLength := 32 - totalLength := 0 - checksum := md5.New() - for _, block := range f.blocks { - cr := &rpc.ChecksumReader{ - Block: block, - UseDatanodeHostname: f.client.options.UseDatanodeHostname, - DialFunc: f.client.options.DatanodeDialFunc, - } - - err := cr.SetDeadline(f.deadline) - if err != nil { - return nil, err - } - - blockChecksum, err := cr.ReadChecksum() - if err != nil { - return nil, err - } - - checksum.Write(blockChecksum) - totalLength += len(blockChecksum) - if paddedLength < totalLength { - paddedLength *= 2 - } - } - - checksum.Write(make([]byte, paddedLength-totalLength)) - return checksum.Sum(nil), nil -} - -// Seek implements io.Seeker. -// -// The seek is virtual - it starts a new block read at the new position. -func (f *FileReader) Seek(offset int64, whence int) (int64, error) { - if f.closed { - return 0, io.ErrClosedPipe - } - - var off int64 - if whence == 0 { - off = offset - } else if whence == 1 { - off = f.offset + offset - } else if whence == 2 { - off = f.info.Size() + offset - } else { - return f.offset, fmt.Errorf("invalid whence: %d", whence) - } - - if off < 0 || off > f.info.Size() { - return f.offset, fmt.Errorf("invalid resulting offset: %d", off) - } - - if f.offset != off { - f.offset = off - if f.blockReader != nil { - f.blockReader.Close() - f.blockReader = nil - } - } - return f.offset, nil -} - -// Read implements io.Reader. -func (f *FileReader) Read(b []byte) (int, error) { - if f.closed { - return 0, io.ErrClosedPipe - } - - if f.info.IsDir() { - return 0, &os.PathError{ - "read", - f.name, - errors.New("is a directory"), - } - } - - if f.offset >= f.info.Size() { - return 0, io.EOF - } - - if len(b) == 0 { - return 0, nil - } - - if f.blocks == nil { - err := f.getBlocks() - if err != nil { - return 0, err - } - } - - if f.blockReader == nil { - err := f.getNewBlockReader() - if err != nil { - return 0, err - } - } - - for { - n, err := f.blockReader.Read(b) - f.offset += int64(n) - - if err != nil && err != io.EOF { - f.blockReader.Close() - f.blockReader = nil - return n, err - } else if n > 0 { - return n, nil - } else { - f.blockReader.Close() - f.getNewBlockReader() - } - } -} - -// ReadAt implements io.ReaderAt. -func (f *FileReader) ReadAt(b []byte, off int64) (int, error) { - if f.closed { - return 0, io.ErrClosedPipe - } - - if off < 0 { - return 0, &os.PathError{"readat", f.name, errors.New("negative offset")} - } - - _, err := f.Seek(off, 0) - if err != nil { - return 0, err - } - - n, err := io.ReadFull(f, b) - - // For some reason, os.File.ReadAt returns io.EOF in this case instead of - // io.ErrUnexpectedEOF. - if err == io.ErrUnexpectedEOF { - err = io.EOF - } - - return n, err -} - -// Readdir reads the contents of the directory associated with file and returns -// a slice of up to n os.FileInfo values, as would be returned by Stat, in -// directory order. Subsequent calls on the same file will yield further -// os.FileInfos. -// -// If n > 0, Readdir returns at most n os.FileInfo values. In this case, if -// Readdir returns an empty slice, it will return a non-nil error explaining -// why. At the end of a directory, the error is io.EOF. -// -// If n <= 0, Readdir returns all the os.FileInfo from the directory in a single -// slice. In this case, if Readdir succeeds (reads all the way to the end of -// the directory), it returns the slice and a nil error. If it encounters an -// error before the end of the directory, Readdir returns the os.FileInfo read -// until that point and a non-nil error. -// -// The os.FileInfo values returned will not have block location attached to -// the struct returned by Sys(). To fetch that information, make a separate -// call to Stat. -// -// Note that making multiple calls to Readdir with a smallish n (as you might do -// with the os version) is slower than just requesting everything at once. -// That's because HDFS has no mechanism for limiting the number of entries -// returned; whatever extra entries it returns are simply thrown away. -func (f *FileReader) Readdir(n int) ([]os.FileInfo, error) { - if f.closed { - return nil, io.ErrClosedPipe - } - - if !f.info.IsDir() { - return nil, &os.PathError{ - "readdir", - f.name, - errors.New("the file is not a directory"), - } - } - - if n <= 0 { - f.readdirLast = "" - } - - res := make([]os.FileInfo, 0) - for { - batch, remaining, err := f.readdir() - if err != nil { - return nil, &os.PathError{"readdir", f.name, err} - } - - if len(batch) > 0 { - f.readdirLast = batch[len(batch)-1].Name() - } - - res = append(res, batch...) - if remaining == 0 || (n > 0 && len(res) >= n) { - break - } - } - - if n > 0 { - if len(res) == 0 { - return nil, io.EOF - } - - if len(res) > n { - res = res[:n] - f.readdirLast = res[len(res)-1].Name() - } - } - - return res, nil -} - -func (f *FileReader) readdir() ([]os.FileInfo, int, error) { - req := &hdfs.GetListingRequestProto{ - Src: proto.String(f.name), - StartAfter: []byte(f.readdirLast), - NeedLocation: proto.Bool(false), - } - resp := &hdfs.GetListingResponseProto{} - - err := f.client.namenode.Execute("getListing", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return nil, 0, err - } else if resp.GetDirList() == nil { - return nil, 0, os.ErrNotExist - } - - list := resp.GetDirList().GetPartialListing() - res := make([]os.FileInfo, 0, len(list)) - for _, status := range list { - res = append(res, newFileInfo(status, "")) - } - - remaining := int(resp.GetDirList().GetRemainingEntries()) - return res, remaining, nil -} - -// Readdirnames reads and returns a slice of names from the directory f. -// -// If n > 0, Readdirnames returns at most n names. In this case, if Readdirnames -// returns an empty slice, it will return a non-nil error explaining why. At the -// end of a directory, the error is io.EOF. -// -// If n <= 0, Readdirnames returns all the names from the directory in a single -// slice. In this case, if Readdirnames succeeds (reads all the way to the end -// of the directory), it returns the slice and a nil error. If it encounters an -// error before the end of the directory, Readdirnames returns the names read -// until that point and a non-nil error. -func (f *FileReader) Readdirnames(n int) ([]string, error) { - if f.closed { - return nil, io.ErrClosedPipe - } - - fis, err := f.Readdir(n) - if err != nil { - return nil, err - } - - names := make([]string, 0, len(fis)) - for _, fi := range fis { - names = append(names, fi.Name()) - } - - return names, nil -} - -// Close implements io.Closer. -func (f *FileReader) Close() error { - f.closed = true - - if f.blockReader != nil { - f.blockReader.Close() - } - - return nil -} - -func (f *FileReader) getBlocks() error { - req := &hdfs.GetBlockLocationsRequestProto{ - Src: proto.String(f.name), - Offset: proto.Uint64(0), - Length: proto.Uint64(uint64(f.info.Size())), - } - resp := &hdfs.GetBlockLocationsResponseProto{} - - err := f.client.namenode.Execute("getBlockLocations", req, resp) - if err != nil { - return err - } - - f.blocks = resp.GetLocations().GetBlocks() - return nil -} - -func (f *FileReader) getNewBlockReader() error { - off := uint64(f.offset) - for _, block := range f.blocks { - start := block.GetOffset() - end := start + block.GetB().GetNumBytes() - - if start <= off && off < end { - f.blockReader = &rpc.BlockReader{ - ClientName: f.client.namenode.ClientName(), - Block: block, - Offset: int64(off - start), - UseDatanodeHostname: f.client.options.UseDatanodeHostname, - DialFunc: f.client.options.DatanodeDialFunc, - } - - return f.SetDeadline(f.deadline) - } - } - - return errors.New("invalid offset") -} diff --git a/vendor/github.com/colinmarc/hdfs/file_writer.go b/vendor/github.com/colinmarc/hdfs/file_writer.go deleted file mode 100644 index aa685871c72..00000000000 --- a/vendor/github.com/colinmarc/hdfs/file_writer.go +++ /dev/null @@ -1,309 +0,0 @@ -package hdfs - -import ( - "io" - "os" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// A FileWriter represents a writer for an open file in HDFS. It implements -// Writer and Closer, and can only be used for writes. For reads, see -// FileReader and Client.Open. -type FileWriter struct { - client *Client - name string - replication int - blockSize int64 - - blockWriter *rpc.BlockWriter - deadline time.Time - closed bool -} - -// Create opens a new file in HDFS with the default replication, block size, -// and permissions (0644), and returns an io.WriteCloser for writing -// to it. Because of the way that HDFS writes are buffered and acknowledged -// asynchronously, it is very important that Close is called after all data has -// been written. -func (c *Client) Create(name string) (*FileWriter, error) { - _, err := c.getFileInfo(name) - if err == nil { - return nil, &os.PathError{"create", name, os.ErrExist} - } else if !os.IsNotExist(err) { - return nil, &os.PathError{"create", name, err} - } - - defaults, err := c.fetchDefaults() - if err != nil { - return nil, err - } - - replication := int(defaults.GetReplication()) - blockSize := int64(defaults.GetBlockSize()) - return c.CreateFile(name, replication, blockSize, 0644) -} - -// CreateFile opens a new file in HDFS with the given replication, block size, -// and permissions, and returns an io.WriteCloser for writing to it. Because of -// the way that HDFS writes are buffered and acknowledged asynchronously, it is -// very important that Close is called after all data has been written. -func (c *Client) CreateFile(name string, replication int, blockSize int64, perm os.FileMode) (*FileWriter, error) { - createReq := &hdfs.CreateRequestProto{ - Src: proto.String(name), - Masked: &hdfs.FsPermissionProto{Perm: proto.Uint32(uint32(perm))}, - ClientName: proto.String(c.namenode.ClientName()), - CreateFlag: proto.Uint32(1), - CreateParent: proto.Bool(false), - Replication: proto.Uint32(uint32(replication)), - BlockSize: proto.Uint64(uint64(blockSize)), - } - createResp := &hdfs.CreateResponseProto{} - - err := c.namenode.Execute("create", createReq, createResp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return nil, &os.PathError{"create", name, err} - } - - return &FileWriter{ - client: c, - name: name, - replication: replication, - blockSize: blockSize, - }, nil -} - -// Append opens an existing file in HDFS and returns an io.WriteCloser for -// writing to it. Because of the way that HDFS writes are buffered and -// acknowledged asynchronously, it is very important that Close is called after -// all data has been written. -func (c *Client) Append(name string) (*FileWriter, error) { - _, err := c.getFileInfo(name) - if err != nil { - return nil, &os.PathError{"append", name, err} - } - - appendReq := &hdfs.AppendRequestProto{ - Src: proto.String(name), - ClientName: proto.String(c.namenode.ClientName()), - } - appendResp := &hdfs.AppendResponseProto{} - - err = c.namenode.Execute("append", appendReq, appendResp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return nil, &os.PathError{"append", name, err} - } - - f := &FileWriter{ - client: c, - name: name, - replication: int(appendResp.Stat.GetBlockReplication()), - blockSize: int64(appendResp.Stat.GetBlocksize()), - } - - // This returns nil if there are no blocks (it's an empty file) or if the - // last block is full (so we have to start a fresh block). - block := appendResp.GetBlock() - if block == nil { - return f, nil - } - - f.blockWriter = &rpc.BlockWriter{ - ClientName: f.client.namenode.ClientName(), - Block: block, - BlockSize: f.blockSize, - Offset: int64(block.B.GetNumBytes()), - Append: true, - UseDatanodeHostname: f.client.options.UseDatanodeHostname, - DialFunc: f.client.options.DatanodeDialFunc, - } - - err = f.blockWriter.SetDeadline(f.deadline) - if err != nil { - return nil, err - } - - return f, nil -} - -// CreateEmptyFile creates a empty file at the given name, with the -// permissions 0644. -func (c *Client) CreateEmptyFile(name string) error { - f, err := c.Create(name) - if err != nil { - return err - } - - return f.Close() -} - -// SetDeadline sets the deadline for future Write, Flush, and Close calls. A -// zero value for t means those calls will not time out. -// -// Note that because of buffering, Write calls that do not result in a blocking -// network call may still succeed after the deadline. -func (f *FileWriter) SetDeadline(t time.Time) error { - f.deadline = t - if f.blockWriter != nil { - return f.blockWriter.SetDeadline(t) - } - - // Return the error at connection time. - return nil -} - -// Write implements io.Writer for writing to a file in HDFS. Internally, it -// writes data to an internal buffer first, and then later out to HDFS. Because -// of this, it is important that Close is called after all data has been -// written. -func (f *FileWriter) Write(b []byte) (int, error) { - if f.closed { - return 0, io.ErrClosedPipe - } - - if f.blockWriter == nil { - err := f.startNewBlock() - if err != nil { - return 0, err - } - } - - off := 0 - for off < len(b) { - n, err := f.blockWriter.Write(b[off:]) - off += n - if err == rpc.ErrEndOfBlock { - err = f.startNewBlock() - } - - if err != nil { - return off, err - } - } - - return off, nil -} - -// Flush flushes any buffered data out to the datanodes. Even immediately after -// a call to Flush, it is still necessary to call Close once all data has been -// written. -func (f *FileWriter) Flush() error { - if f.closed { - return io.ErrClosedPipe - } - - if f.blockWriter != nil { - return f.blockWriter.Flush() - } - - return nil -} - -// Close closes the file, writing any remaining data out to disk and waiting -// for acknowledgements from the datanodes. It is important that Close is called -// after all data has been written. -func (f *FileWriter) Close() error { - if f.closed { - return io.ErrClosedPipe - } - - var lastBlock *hdfs.ExtendedBlockProto - if f.blockWriter != nil { - lastBlock = f.blockWriter.Block.GetB() - - // Close the blockWriter, flushing any buffered packets. - err := f.finalizeBlock() - if err != nil { - return err - } - } - - completeReq := &hdfs.CompleteRequestProto{ - Src: proto.String(f.name), - ClientName: proto.String(f.client.namenode.ClientName()), - Last: lastBlock, - } - completeResp := &hdfs.CompleteResponseProto{} - - err := f.client.namenode.Execute("complete", completeReq, completeResp) - if err != nil { - return &os.PathError{"create", f.name, err} - } - - return nil -} - -func (f *FileWriter) startNewBlock() error { - var previous *hdfs.ExtendedBlockProto - if f.blockWriter != nil { - previous = f.blockWriter.Block.GetB() - - // TODO: We don't actually need to wait for previous blocks to ack before - // continuing. - err := f.finalizeBlock() - if err != nil { - return err - } - } - - addBlockReq := &hdfs.AddBlockRequestProto{ - Src: proto.String(f.name), - ClientName: proto.String(f.client.namenode.ClientName()), - Previous: previous, - } - addBlockResp := &hdfs.AddBlockResponseProto{} - - err := f.client.namenode.Execute("addBlock", addBlockReq, addBlockResp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return &os.PathError{"create", f.name, err} - } - - f.blockWriter = &rpc.BlockWriter{ - ClientName: f.client.namenode.ClientName(), - Block: addBlockResp.GetBlock(), - BlockSize: f.blockSize, - UseDatanodeHostname: f.client.options.UseDatanodeHostname, - DialFunc: f.client.options.DatanodeDialFunc, - } - - return f.blockWriter.SetDeadline(f.deadline) -} - -func (f *FileWriter) finalizeBlock() error { - err := f.blockWriter.Close() - if err != nil { - return err - } - - // Finalize the block on the namenode. - lastBlock := f.blockWriter.Block.GetB() - lastBlock.NumBytes = proto.Uint64(uint64(f.blockWriter.Offset)) - updateReq := &hdfs.UpdateBlockForPipelineRequestProto{ - Block: lastBlock, - ClientName: proto.String(f.client.namenode.ClientName()), - } - updateResp := &hdfs.UpdateBlockForPipelineResponseProto{} - - err = f.client.namenode.Execute("updateBlockForPipeline", updateReq, updateResp) - if err != nil { - return err - } - - f.blockWriter = nil - return nil -} diff --git a/vendor/github.com/colinmarc/hdfs/fixtures.sh b/vendor/github.com/colinmarc/hdfs/fixtures.sh deleted file mode 100644 index 6ff221bcfa8..00000000000 --- a/vendor/github.com/colinmarc/hdfs/fixtures.sh +++ /dev/null @@ -1,8 +0,0 @@ -set -e - -HADOOP_FS=${HADOOP_FS-"hadoop fs"} -$HADOOP_FS -mkdir -p "/_test" -$HADOOP_FS -chmod 777 "/_test" - -$HADOOP_FS -put ./testdata/foo.txt "/_test/foo.txt" -$HADOOP_FS -Ddfs.block.size=1048576 -put ./testdata/mobydick.txt "/_test/mobydick.txt" diff --git a/vendor/github.com/colinmarc/hdfs/hdfs.go b/vendor/github.com/colinmarc/hdfs/hdfs.go deleted file mode 100644 index 8e805c520b5..00000000000 --- a/vendor/github.com/colinmarc/hdfs/hdfs.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Package hdfs provides a native, idiomatic interface to HDFS. Where possible, -it mimics the functionality and signatures of the standard `os` package. - -Example: - - client, _ := hdfs.New("namenode:8020") - - file, _ := client.Open("/mobydick.txt") - - buf := make([]byte, 59) - file.ReadAt(buf, 48847) - - fmt.Println(string(buf)) - // => Abominable are the tumblers into which he pours his poison. -*/ -package hdfs diff --git a/vendor/github.com/colinmarc/hdfs/minicluster.sh b/vendor/github.com/colinmarc/hdfs/minicluster.sh deleted file mode 100644 index 5b2ca146725..00000000000 --- a/vendor/github.com/colinmarc/hdfs/minicluster.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/sh - -HADOOP_HOME=${HADOOP_HOME-"/tmp/hadoop"} -NN_PORT=${NN_PORT-"9000"} -HADOOP_NAMENODE="localhost:$NN_PORT" - -if [ ! -d "$HADOOP_HOME" ]; then - mkdir -p $HADOOP_HOME - - echo "Downloading latest CDH to ${HADOOP_HOME}/hadoop.tar.gz" - curl -o ${HADOOP_HOME}/hadoop.tar.gz -L http://archive.cloudera.com/cdh5/cdh/5/hadoop-latest.tar.gz - - echo "Extracting ${HADOOP_HOME}/hadoop.tar.gz into $HADOOP_HOME" - tar zxf ${HADOOP_HOME}/hadoop.tar.gz --strip-components 1 -C $HADOOP_HOME -fi - -MINICLUSTER_JAR=$(find $HADOOP_HOME -name "hadoop-mapreduce-client-jobclient*.jar" | grep -v tests | grep -v sources | head -1) -if [ ! -f "$MINICLUSTER_JAR" ]; then - echo "Couldn't find minicluster jar!" - exit 1 -fi - -echo "Starting minicluster..." -$HADOOP_HOME/bin/hadoop jar $MINICLUSTER_JAR minicluster -nnport $NN_PORT -datanodes 3 -nomr -format "$@" > minicluster.log 2>&1 & - -export HADOOP_CONF_DIR=$(mktemp -d) -cat > $HADOOP_CONF_DIR/core-site.xml < - - fs.defaultFS - hdfs://$HADOOP_NAMENODE - - -EOF - -echo "Waiting for namenode to start up..." -$HADOOP_HOME/bin/hdfs dfsadmin -safemode wait - -export HADOOP_CONF_DIR=$(mktemp -d) -cat > $HADOOP_CONF_DIR/core-site.xml < - - fs.defaultFS - hdfs://$HADOOP_NAMENODE - - -EOF - -export HADOOP_FS="$HADOOP_HOME/bin/hadoop fs" -./fixtures.sh - -echo "Please run the following commands:" -echo "export HADOOP_CONF_DIR='$HADOOP_CONF_DIR'" -echo "export HADOOP_FS='$HADOOP_HOME/bin/hadoop fs'" diff --git a/vendor/github.com/colinmarc/hdfs/mkdir.go b/vendor/github.com/colinmarc/hdfs/mkdir.go deleted file mode 100644 index 436a8b30c7b..00000000000 --- a/vendor/github.com/colinmarc/hdfs/mkdir.go +++ /dev/null @@ -1,55 +0,0 @@ -package hdfs - -import ( - "os" - "path" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// Mkdir creates a new directory with the specified name and permission bits. -func (c *Client) Mkdir(dirname string, perm os.FileMode) error { - return c.mkdir(dirname, perm, false) -} - -// MkdirAll creates a directory for dirname, along with any necessary parents, -// and returns nil, or else returns an error. The permission bits perm are used -// for all directories that MkdirAll creates. If dirname is already a directory, -// MkdirAll does nothing and returns nil. -func (c *Client) MkdirAll(dirname string, perm os.FileMode) error { - return c.mkdir(dirname, perm, true) -} - -func (c *Client) mkdir(dirname string, perm os.FileMode, createParent bool) error { - dirname = path.Clean(dirname) - - info, err := c.getFileInfo(dirname) - if err == nil { - if createParent && info.IsDir() { - return nil - } - return &os.PathError{"mkdir", dirname, os.ErrExist} - } else if !os.IsNotExist(err) { - return &os.PathError{"mkdir", dirname, err} - } - - req := &hdfs.MkdirsRequestProto{ - Src: proto.String(dirname), - Masked: &hdfs.FsPermissionProto{Perm: proto.Uint32(uint32(perm))}, - CreateParent: proto.Bool(createParent), - } - resp := &hdfs.MkdirsResponseProto{} - - err = c.namenode.Execute("mkdirs", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return &os.PathError{"mkdir", dirname, err} - } - - return nil -} diff --git a/vendor/github.com/colinmarc/hdfs/perms.go b/vendor/github.com/colinmarc/hdfs/perms.go deleted file mode 100644 index f3b36ac3c1f..00000000000 --- a/vendor/github.com/colinmarc/hdfs/perms.go +++ /dev/null @@ -1,76 +0,0 @@ -package hdfs - -import ( - "os" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// Chmod changes the mode of the named file to mode. -func (c *Client) Chmod(name string, perm os.FileMode) error { - req := &hdfs.SetPermissionRequestProto{ - Src: proto.String(name), - Permission: &hdfs.FsPermissionProto{Perm: proto.Uint32(uint32(perm))}, - } - resp := &hdfs.SetPermissionResponseProto{} - - err := c.namenode.Execute("setPermission", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return &os.PathError{"chmod", name, err} - } - - return nil -} - -// Chown changes the user and group of the file. Unlike os.Chown, this takes -// a string username and group (since that's what HDFS uses.) -// -// If an empty string is passed for user or group, that field will not be -// changed remotely. -func (c *Client) Chown(name string, user, group string) error { - req := &hdfs.SetOwnerRequestProto{ - Src: proto.String(name), - Username: proto.String(user), - Groupname: proto.String(group), - } - resp := &hdfs.SetOwnerResponseProto{} - - err := c.namenode.Execute("setOwner", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return &os.PathError{"chown", name, err} - } - - return nil -} - -// Chtimes changes the access and modification times of the named file. -func (c *Client) Chtimes(name string, atime time.Time, mtime time.Time) error { - req := &hdfs.SetTimesRequestProto{ - Src: proto.String(name), - Mtime: proto.Uint64(uint64(mtime.Unix()) * 1000), - Atime: proto.Uint64(uint64(atime.Unix()) * 1000), - } - resp := &hdfs.SetTimesResponseProto{} - - err := c.namenode.Execute("setTimes", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return &os.PathError{"chtimes", name, err} - } - - return nil -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.pb.go deleted file mode 100644 index 35eadf6609e..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.pb.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: GenericRefreshProtocol.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Refresh request. -type GenericRefreshRequestProto struct { - Identifier *string `protobuf:"bytes,1,opt,name=identifier" json:"identifier,omitempty"` - Args []string `protobuf:"bytes,2,rep,name=args" json:"args,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GenericRefreshRequestProto) Reset() { *m = GenericRefreshRequestProto{} } -func (m *GenericRefreshRequestProto) String() string { return proto.CompactTextString(m) } -func (*GenericRefreshRequestProto) ProtoMessage() {} -func (*GenericRefreshRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } - -func (m *GenericRefreshRequestProto) GetIdentifier() string { - if m != nil && m.Identifier != nil { - return *m.Identifier - } - return "" -} - -func (m *GenericRefreshRequestProto) GetArgs() []string { - if m != nil { - return m.Args - } - return nil -} - -// * -// A single response from a refresh handler. -type GenericRefreshResponseProto struct { - ExitStatus *int32 `protobuf:"varint,1,opt,name=exitStatus" json:"exitStatus,omitempty"` - UserMessage *string `protobuf:"bytes,2,opt,name=userMessage" json:"userMessage,omitempty"` - SenderName *string `protobuf:"bytes,3,opt,name=senderName" json:"senderName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GenericRefreshResponseProto) Reset() { *m = GenericRefreshResponseProto{} } -func (m *GenericRefreshResponseProto) String() string { return proto.CompactTextString(m) } -func (*GenericRefreshResponseProto) ProtoMessage() {} -func (*GenericRefreshResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } - -func (m *GenericRefreshResponseProto) GetExitStatus() int32 { - if m != nil && m.ExitStatus != nil { - return *m.ExitStatus - } - return 0 -} - -func (m *GenericRefreshResponseProto) GetUserMessage() string { - if m != nil && m.UserMessage != nil { - return *m.UserMessage - } - return "" -} - -func (m *GenericRefreshResponseProto) GetSenderName() string { - if m != nil && m.SenderName != nil { - return *m.SenderName - } - return "" -} - -// * -// Collection of responses from zero or more handlers. -type GenericRefreshResponseCollectionProto struct { - Responses []*GenericRefreshResponseProto `protobuf:"bytes,1,rep,name=responses" json:"responses,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GenericRefreshResponseCollectionProto) Reset() { *m = GenericRefreshResponseCollectionProto{} } -func (m *GenericRefreshResponseCollectionProto) String() string { return proto.CompactTextString(m) } -func (*GenericRefreshResponseCollectionProto) ProtoMessage() {} -func (*GenericRefreshResponseCollectionProto) Descriptor() ([]byte, []int) { - return fileDescriptor3, []int{2} -} - -func (m *GenericRefreshResponseCollectionProto) GetResponses() []*GenericRefreshResponseProto { - if m != nil { - return m.Responses - } - return nil -} - -func init() { - proto.RegisterType((*GenericRefreshRequestProto)(nil), "hadoop.common.GenericRefreshRequestProto") - proto.RegisterType((*GenericRefreshResponseProto)(nil), "hadoop.common.GenericRefreshResponseProto") - proto.RegisterType((*GenericRefreshResponseCollectionProto)(nil), "hadoop.common.GenericRefreshResponseCollectionProto") -} - -func init() { proto.RegisterFile("GenericRefreshProtocol.proto", fileDescriptor3) } - -var fileDescriptor3 = []byte{ - // 293 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x4a, 0xc3, 0x40, - 0x10, 0xc6, 0xd9, 0x56, 0x91, 0x4e, 0xf1, 0xb2, 0xa7, 0xd0, 0xaa, 0x94, 0x80, 0x10, 0x3d, 0xe4, - 0x50, 0x7c, 0x01, 0xeb, 0x41, 0x2f, 0x4a, 0xd9, 0x3e, 0xc1, 0xb2, 0x99, 0x26, 0x2b, 0xc9, 0x6e, - 0xba, 0xb3, 0x11, 0x6f, 0x5e, 0x05, 0x5f, 0xc2, 0x47, 0x95, 0x24, 0x6a, 0xd2, 0x52, 0xff, 0x9c, - 0x76, 0xf8, 0x66, 0xe7, 0x9b, 0xdf, 0x0c, 0x03, 0x27, 0xb7, 0x68, 0xd0, 0x69, 0x25, 0x70, 0xed, - 0x90, 0xb2, 0xa5, 0xb3, 0xde, 0x2a, 0x9b, 0xc7, 0x65, 0x1d, 0xf0, 0xe3, 0x4c, 0x26, 0xd6, 0x96, - 0xb1, 0xb2, 0x45, 0x61, 0x4d, 0xb8, 0x84, 0xc9, 0xf6, 0x77, 0x81, 0x9b, 0x0a, 0xc9, 0x37, 0x55, - 0xfc, 0x0c, 0x40, 0x27, 0x68, 0xbc, 0x5e, 0x6b, 0x74, 0x01, 0x9b, 0xb1, 0x68, 0x24, 0x7a, 0x0a, - 0xe7, 0x70, 0x20, 0x5d, 0x4a, 0xc1, 0x60, 0x36, 0x8c, 0x46, 0xa2, 0x89, 0xc3, 0x17, 0x98, 0xee, - 0x3a, 0x52, 0x69, 0x0d, 0xe1, 0xb7, 0x25, 0x3e, 0x6b, 0xbf, 0xf2, 0xd2, 0x57, 0xd4, 0x58, 0x1e, - 0x8a, 0x9e, 0xc2, 0x67, 0x30, 0xae, 0x08, 0xdd, 0x3d, 0x12, 0xc9, 0x14, 0x83, 0x41, 0xd3, 0xb3, - 0x2f, 0xd5, 0x0e, 0x84, 0x26, 0x41, 0xf7, 0x20, 0x0b, 0x0c, 0x86, 0x2d, 0x54, 0xa7, 0x84, 0x1b, - 0x38, 0xdf, 0x0f, 0x70, 0x63, 0xf3, 0x1c, 0x95, 0xd7, 0xd6, 0xb4, 0x28, 0x77, 0x30, 0x72, 0x9f, - 0xa9, 0x9a, 0x64, 0x18, 0x8d, 0xe7, 0x97, 0xf1, 0xd6, 0x7a, 0xe2, 0x5f, 0x26, 0x11, 0x5d, 0xf1, - 0xfc, 0x8d, 0xc1, 0xe9, 0xfe, 0xad, 0xaf, 0xd0, 0x3d, 0x69, 0x85, 0xfc, 0x11, 0x8e, 0x5c, 0x9b, - 0xe1, 0x17, 0x7f, 0xf4, 0xe8, 0xf6, 0x3f, 0xb9, 0xfa, 0x17, 0xce, 0xce, 0x5c, 0x8b, 0x6b, 0x98, - 0x5a, 0x97, 0xc6, 0xb2, 0x94, 0x2a, 0xc3, 0x2f, 0x07, 0x5d, 0xaa, 0xf6, 0x02, 0x16, 0x3f, 0xdc, - 0x47, 0xf3, 0xd2, 0x2b, 0x63, 0xef, 0x8c, 0x7d, 0x04, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x01, 0x5c, - 0x07, 0x44, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.proto deleted file mode 100644 index fe465490b19..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GenericRefreshProtocol.proto +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ipc.proto"; -option java_outer_classname = "GenericRefreshProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Refresh request. - */ -message GenericRefreshRequestProto { - optional string identifier = 1; - repeated string args = 2; -} - -/** - * A single response from a refresh handler. - */ -message GenericRefreshResponseProto { - optional int32 exitStatus = 1; // unix exit status to return - optional string userMessage = 2; // to be displayed to the user - optional string senderName = 3; // which handler sent this message -} - -/** - * Collection of responses from zero or more handlers. - */ -message GenericRefreshResponseCollectionProto { - repeated GenericRefreshResponseProto responses = 1; -} - -/** - * Protocol which is used to refresh a user-specified feature. - */ -service GenericRefreshProtocolService { - rpc refresh(GenericRefreshRequestProto) - returns(GenericRefreshResponseCollectionProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.pb.go deleted file mode 100644 index 5358c0e5ca6..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.pb.go +++ /dev/null @@ -1,76 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: GetUserMappingsProtocol.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Get groups for user request. -type GetGroupsForUserRequestProto struct { - User *string `protobuf:"bytes,1,req,name=user" json:"user,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetGroupsForUserRequestProto) Reset() { *m = GetGroupsForUserRequestProto{} } -func (m *GetGroupsForUserRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetGroupsForUserRequestProto) ProtoMessage() {} -func (*GetGroupsForUserRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{0} } - -func (m *GetGroupsForUserRequestProto) GetUser() string { - if m != nil && m.User != nil { - return *m.User - } - return "" -} - -// * -// Response for get groups. -type GetGroupsForUserResponseProto struct { - Groups []string `protobuf:"bytes,1,rep,name=groups" json:"groups,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetGroupsForUserResponseProto) Reset() { *m = GetGroupsForUserResponseProto{} } -func (m *GetGroupsForUserResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetGroupsForUserResponseProto) ProtoMessage() {} -func (*GetGroupsForUserResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor10, []int{1} } - -func (m *GetGroupsForUserResponseProto) GetGroups() []string { - if m != nil { - return m.Groups - } - return nil -} - -func init() { - proto.RegisterType((*GetGroupsForUserRequestProto)(nil), "hadoop.common.GetGroupsForUserRequestProto") - proto.RegisterType((*GetGroupsForUserResponseProto)(nil), "hadoop.common.GetGroupsForUserResponseProto") -} - -func init() { proto.RegisterFile("GetUserMappingsProtocol.proto", fileDescriptor10) } - -var fileDescriptor10 = []byte{ - // 213 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x75, 0x4f, 0x2d, 0x09, - 0x2d, 0x4e, 0x2d, 0xf2, 0x4d, 0x2c, 0x28, 0xc8, 0xcc, 0x4b, 0x2f, 0x0e, 0x28, 0xca, 0x2f, 0xc9, - 0x4f, 0xce, 0xcf, 0xd1, 0x2b, 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, 0x53, 0xf2, 0xf3, 0x0b, 0xf4, - 0x92, 0xf3, 0x73, 0x73, 0xf3, 0xf3, 0x94, 0x8c, 0xb8, 0x64, 0xdc, 0x53, 0x4b, 0xdc, 0x8b, 0xf2, - 0x4b, 0x0b, 0x8a, 0xdd, 0xf2, 0x8b, 0x40, 0x1a, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0xc0, - 0xfa, 0x84, 0x84, 0xb8, 0x58, 0x4a, 0x8b, 0x53, 0x8b, 0x24, 0x18, 0x15, 0x98, 0x34, 0x38, 0x83, - 0xc0, 0x6c, 0x25, 0x73, 0xb0, 0x1d, 0x68, 0x7a, 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x21, 0x9a, - 0xc4, 0xb8, 0xd8, 0xd2, 0xc1, 0xb2, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0x9c, 0x41, 0x50, 0x9e, 0x51, - 0x3f, 0x23, 0x97, 0x1c, 0x0e, 0xd7, 0x05, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x0a, 0xe5, 0x72, - 0x09, 0xa4, 0xa3, 0x99, 0x2d, 0xa4, 0xad, 0x87, 0xe2, 0x66, 0x3d, 0x7c, 0x0e, 0x96, 0xd2, 0x21, - 0xa8, 0x18, 0xc9, 0xa5, 0x4e, 0x2e, 0x5c, 0xb2, 0xf9, 0x45, 0xe9, 0x7a, 0x89, 0x05, 0x89, 0xc9, - 0x19, 0xa9, 0x30, 0x9d, 0x25, 0xf9, 0xf9, 0x39, 0xc5, 0x90, 0xe0, 0x72, 0xc2, 0x15, 0x9a, 0x60, - 0xba, 0xb8, 0x83, 0x91, 0x71, 0x01, 0x23, 0x23, 0x20, 0x00, 0x00, 0xff, 0xff, 0x7c, 0x38, 0xcc, - 0x6f, 0x73, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.proto deleted file mode 100644 index 51552b879f3..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/GetUserMappingsProtocol.proto +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.tools.proto"; -option java_outer_classname = "GetUserMappingsProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Get groups for user request. - */ -message GetGroupsForUserRequestProto { - required string user = 1; -} - -/** - * Response for get groups. - */ -message GetGroupsForUserResponseProto { - repeated string groups = 1; -} - - -/** - * Protocol which maps users to groups. - */ -service GetUserMappingsProtocolService { - /** - * Get the groups which are mapped to the given user. - */ - rpc getGroupsForUser(GetGroupsForUserRequestProto) - returns(GetGroupsForUserResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.pb.go deleted file mode 100644 index cc855ccc98c..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.pb.go +++ /dev/null @@ -1,295 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: HAServiceProtocol.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type HAServiceStateProto int32 - -const ( - HAServiceStateProto_INITIALIZING HAServiceStateProto = 0 - HAServiceStateProto_ACTIVE HAServiceStateProto = 1 - HAServiceStateProto_STANDBY HAServiceStateProto = 2 -) - -var HAServiceStateProto_name = map[int32]string{ - 0: "INITIALIZING", - 1: "ACTIVE", - 2: "STANDBY", -} -var HAServiceStateProto_value = map[string]int32{ - "INITIALIZING": 0, - "ACTIVE": 1, - "STANDBY": 2, -} - -func (x HAServiceStateProto) Enum() *HAServiceStateProto { - p := new(HAServiceStateProto) - *p = x - return p -} -func (x HAServiceStateProto) String() string { - return proto.EnumName(HAServiceStateProto_name, int32(x)) -} -func (x *HAServiceStateProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(HAServiceStateProto_value, data, "HAServiceStateProto") - if err != nil { - return err - } - *x = HAServiceStateProto(value) - return nil -} -func (HAServiceStateProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor11, []int{0} } - -type HARequestSource int32 - -const ( - HARequestSource_REQUEST_BY_USER HARequestSource = 0 - HARequestSource_REQUEST_BY_USER_FORCED HARequestSource = 1 - HARequestSource_REQUEST_BY_ZKFC HARequestSource = 2 -) - -var HARequestSource_name = map[int32]string{ - 0: "REQUEST_BY_USER", - 1: "REQUEST_BY_USER_FORCED", - 2: "REQUEST_BY_ZKFC", -} -var HARequestSource_value = map[string]int32{ - "REQUEST_BY_USER": 0, - "REQUEST_BY_USER_FORCED": 1, - "REQUEST_BY_ZKFC": 2, -} - -func (x HARequestSource) Enum() *HARequestSource { - p := new(HARequestSource) - *p = x - return p -} -func (x HARequestSource) String() string { - return proto.EnumName(HARequestSource_name, int32(x)) -} -func (x *HARequestSource) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(HARequestSource_value, data, "HARequestSource") - if err != nil { - return err - } - *x = HARequestSource(value) - return nil -} -func (HARequestSource) EnumDescriptor() ([]byte, []int) { return fileDescriptor11, []int{1} } - -type HAStateChangeRequestInfoProto struct { - ReqSource *HARequestSource `protobuf:"varint,1,req,name=reqSource,enum=hadoop.common.HARequestSource" json:"reqSource,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HAStateChangeRequestInfoProto) Reset() { *m = HAStateChangeRequestInfoProto{} } -func (m *HAStateChangeRequestInfoProto) String() string { return proto.CompactTextString(m) } -func (*HAStateChangeRequestInfoProto) ProtoMessage() {} -func (*HAStateChangeRequestInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{0} } - -func (m *HAStateChangeRequestInfoProto) GetReqSource() HARequestSource { - if m != nil && m.ReqSource != nil { - return *m.ReqSource - } - return HARequestSource_REQUEST_BY_USER -} - -// * -// void request -type MonitorHealthRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *MonitorHealthRequestProto) Reset() { *m = MonitorHealthRequestProto{} } -func (m *MonitorHealthRequestProto) String() string { return proto.CompactTextString(m) } -func (*MonitorHealthRequestProto) ProtoMessage() {} -func (*MonitorHealthRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{1} } - -// * -// void response -type MonitorHealthResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *MonitorHealthResponseProto) Reset() { *m = MonitorHealthResponseProto{} } -func (m *MonitorHealthResponseProto) String() string { return proto.CompactTextString(m) } -func (*MonitorHealthResponseProto) ProtoMessage() {} -func (*MonitorHealthResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{2} } - -// * -// void request -type TransitionToActiveRequestProto struct { - ReqInfo *HAStateChangeRequestInfoProto `protobuf:"bytes,1,req,name=reqInfo" json:"reqInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TransitionToActiveRequestProto) Reset() { *m = TransitionToActiveRequestProto{} } -func (m *TransitionToActiveRequestProto) String() string { return proto.CompactTextString(m) } -func (*TransitionToActiveRequestProto) ProtoMessage() {} -func (*TransitionToActiveRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{3} } - -func (m *TransitionToActiveRequestProto) GetReqInfo() *HAStateChangeRequestInfoProto { - if m != nil { - return m.ReqInfo - } - return nil -} - -// * -// void response -type TransitionToActiveResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *TransitionToActiveResponseProto) Reset() { *m = TransitionToActiveResponseProto{} } -func (m *TransitionToActiveResponseProto) String() string { return proto.CompactTextString(m) } -func (*TransitionToActiveResponseProto) ProtoMessage() {} -func (*TransitionToActiveResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor11, []int{4} -} - -// * -// void request -type TransitionToStandbyRequestProto struct { - ReqInfo *HAStateChangeRequestInfoProto `protobuf:"bytes,1,req,name=reqInfo" json:"reqInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TransitionToStandbyRequestProto) Reset() { *m = TransitionToStandbyRequestProto{} } -func (m *TransitionToStandbyRequestProto) String() string { return proto.CompactTextString(m) } -func (*TransitionToStandbyRequestProto) ProtoMessage() {} -func (*TransitionToStandbyRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor11, []int{5} -} - -func (m *TransitionToStandbyRequestProto) GetReqInfo() *HAStateChangeRequestInfoProto { - if m != nil { - return m.ReqInfo - } - return nil -} - -// * -// void response -type TransitionToStandbyResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *TransitionToStandbyResponseProto) Reset() { *m = TransitionToStandbyResponseProto{} } -func (m *TransitionToStandbyResponseProto) String() string { return proto.CompactTextString(m) } -func (*TransitionToStandbyResponseProto) ProtoMessage() {} -func (*TransitionToStandbyResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor11, []int{6} -} - -// * -// void request -type GetServiceStatusRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceStatusRequestProto) Reset() { *m = GetServiceStatusRequestProto{} } -func (m *GetServiceStatusRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetServiceStatusRequestProto) ProtoMessage() {} -func (*GetServiceStatusRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{7} } - -// * -// Returns the state of the service -type GetServiceStatusResponseProto struct { - State *HAServiceStateProto `protobuf:"varint,1,req,name=state,enum=hadoop.common.HAServiceStateProto" json:"state,omitempty"` - // If state is STANDBY, indicate whether it is - // ready to become active. - ReadyToBecomeActive *bool `protobuf:"varint,2,opt,name=readyToBecomeActive" json:"readyToBecomeActive,omitempty"` - // If not ready to become active, a textual explanation of why not - NotReadyReason *string `protobuf:"bytes,3,opt,name=notReadyReason" json:"notReadyReason,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServiceStatusResponseProto) Reset() { *m = GetServiceStatusResponseProto{} } -func (m *GetServiceStatusResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetServiceStatusResponseProto) ProtoMessage() {} -func (*GetServiceStatusResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor11, []int{8} } - -func (m *GetServiceStatusResponseProto) GetState() HAServiceStateProto { - if m != nil && m.State != nil { - return *m.State - } - return HAServiceStateProto_INITIALIZING -} - -func (m *GetServiceStatusResponseProto) GetReadyToBecomeActive() bool { - if m != nil && m.ReadyToBecomeActive != nil { - return *m.ReadyToBecomeActive - } - return false -} - -func (m *GetServiceStatusResponseProto) GetNotReadyReason() string { - if m != nil && m.NotReadyReason != nil { - return *m.NotReadyReason - } - return "" -} - -func init() { - proto.RegisterType((*HAStateChangeRequestInfoProto)(nil), "hadoop.common.HAStateChangeRequestInfoProto") - proto.RegisterType((*MonitorHealthRequestProto)(nil), "hadoop.common.MonitorHealthRequestProto") - proto.RegisterType((*MonitorHealthResponseProto)(nil), "hadoop.common.MonitorHealthResponseProto") - proto.RegisterType((*TransitionToActiveRequestProto)(nil), "hadoop.common.TransitionToActiveRequestProto") - proto.RegisterType((*TransitionToActiveResponseProto)(nil), "hadoop.common.TransitionToActiveResponseProto") - proto.RegisterType((*TransitionToStandbyRequestProto)(nil), "hadoop.common.TransitionToStandbyRequestProto") - proto.RegisterType((*TransitionToStandbyResponseProto)(nil), "hadoop.common.TransitionToStandbyResponseProto") - proto.RegisterType((*GetServiceStatusRequestProto)(nil), "hadoop.common.GetServiceStatusRequestProto") - proto.RegisterType((*GetServiceStatusResponseProto)(nil), "hadoop.common.GetServiceStatusResponseProto") - proto.RegisterEnum("hadoop.common.HAServiceStateProto", HAServiceStateProto_name, HAServiceStateProto_value) - proto.RegisterEnum("hadoop.common.HARequestSource", HARequestSource_name, HARequestSource_value) -} - -func init() { proto.RegisterFile("HAServiceProtocol.proto", fileDescriptor11) } - -var fileDescriptor11 = []byte{ - // 529 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xdd, 0x6e, 0xd3, 0x30, - 0x14, 0x9e, 0x3b, 0xc1, 0xd8, 0x19, 0xdb, 0x22, 0x57, 0x82, 0x12, 0xb6, 0x52, 0x72, 0x81, 0xca, - 0x18, 0x01, 0xf5, 0x0a, 0x09, 0x2e, 0x48, 0xbb, 0x74, 0x8d, 0x80, 0x02, 0x4e, 0x06, 0xda, 0x24, - 0x54, 0x99, 0xd4, 0x34, 0x91, 0x16, 0xbb, 0x4d, 0xdc, 0x4a, 0x7b, 0x03, 0x1e, 0x83, 0x77, 0xe0, - 0x75, 0x78, 0x18, 0x94, 0x26, 0xd3, 0xd2, 0x24, 0x84, 0xdd, 0xec, 0x2a, 0x89, 0xfd, 0xfd, 0xc5, - 0xe7, 0x1c, 0xc3, 0xfd, 0x81, 0x61, 0xb3, 0x70, 0xe1, 0xbb, 0xec, 0x53, 0x28, 0xa4, 0x70, 0xc5, - 0xb9, 0x3e, 0x8d, 0x5f, 0xf0, 0xb6, 0x47, 0xc7, 0x42, 0x4c, 0x75, 0x57, 0x04, 0x81, 0xe0, 0xda, - 0x37, 0xd8, 0x1f, 0x18, 0xb6, 0xa4, 0x92, 0xf5, 0x3c, 0xca, 0x27, 0x8c, 0xb0, 0xd9, 0x9c, 0x45, - 0xd2, 0xe2, 0x3f, 0xc4, 0x92, 0x88, 0xdf, 0xc0, 0x66, 0xc8, 0x66, 0xb6, 0x98, 0x87, 0x2e, 0x6b, - 0xa0, 0x56, 0xad, 0xbd, 0xd3, 0x69, 0xea, 0x2b, 0x1a, 0xfa, 0xc0, 0x48, 0x59, 0x09, 0x8a, 0x5c, - 0x11, 0xb4, 0x87, 0xf0, 0xe0, 0x83, 0xe0, 0xbe, 0x14, 0xe1, 0x80, 0xd1, 0x73, 0xe9, 0xa5, 0xc0, - 0xa5, 0xb4, 0xb6, 0x07, 0x6a, 0x6e, 0x33, 0x9a, 0x0a, 0x1e, 0x25, 0x89, 0x35, 0x0f, 0x9a, 0x4e, - 0x48, 0x79, 0xe4, 0x4b, 0x5f, 0x70, 0x47, 0x18, 0xae, 0xf4, 0x17, 0x2c, 0xcb, 0xc7, 0x7d, 0xd8, - 0x08, 0xd9, 0x2c, 0x8e, 0xba, 0x0c, 0xb6, 0xd5, 0x39, 0x2c, 0x04, 0xab, 0xf8, 0x33, 0x72, 0x49, - 0xd6, 0x1e, 0xc3, 0xa3, 0x32, 0xa7, 0x6c, 0x18, 0x7f, 0x15, 0x62, 0x4b, 0xca, 0xc7, 0xdf, 0x2f, - 0x6e, 0x24, 0x8d, 0x06, 0xad, 0x52, 0xab, 0x6c, 0x9c, 0x26, 0xec, 0x1d, 0x33, 0x99, 0x16, 0x38, - 0x56, 0x9d, 0x47, 0x2b, 0x27, 0xfb, 0x1b, 0xc1, 0x7e, 0x11, 0x90, 0x51, 0xc0, 0xaf, 0xe0, 0x56, - 0x14, 0xa7, 0x49, 0x4b, 0xaa, 0x15, 0xb3, 0x5e, 0x71, 0x13, 0x0a, 0x49, 0x08, 0xf8, 0x25, 0xd4, - 0x43, 0x46, 0xc7, 0x17, 0x8e, 0xe8, 0x32, 0x57, 0x04, 0x2c, 0x39, 0xae, 0x46, 0xad, 0x85, 0xda, - 0x77, 0x48, 0xd9, 0x16, 0x7e, 0x02, 0x3b, 0x5c, 0x48, 0x12, 0xef, 0x10, 0x46, 0x23, 0xc1, 0x1b, - 0xeb, 0x2d, 0xd4, 0xde, 0x24, 0xb9, 0xd5, 0x83, 0xb7, 0x50, 0x2f, 0xf1, 0xc5, 0x0a, 0xdc, 0xb5, - 0x86, 0x96, 0x63, 0x19, 0xef, 0xad, 0x33, 0x6b, 0x78, 0xac, 0xac, 0x61, 0x80, 0xdb, 0x46, 0xcf, - 0xb1, 0xbe, 0x98, 0x0a, 0xc2, 0x5b, 0xb0, 0x61, 0x3b, 0xc6, 0xf0, 0xa8, 0x7b, 0xaa, 0xd4, 0x0e, - 0xbe, 0xc2, 0x6e, 0xae, 0x19, 0x71, 0x1d, 0x76, 0x89, 0xf9, 0xf9, 0xc4, 0xb4, 0x9d, 0x51, 0xf7, - 0x74, 0x74, 0x62, 0x9b, 0x44, 0x59, 0xc3, 0x2a, 0xdc, 0xcb, 0x2d, 0x8e, 0xfa, 0x1f, 0x49, 0xcf, - 0x3c, 0x52, 0x50, 0x8e, 0x70, 0xf6, 0xae, 0xdf, 0x53, 0x6a, 0x9d, 0x3f, 0xeb, 0xd0, 0x28, 0x4c, - 0x54, 0xfa, 0x89, 0xc7, 0xb0, 0x1d, 0x64, 0xfb, 0x18, 0xb7, 0x73, 0xa7, 0xf9, 0xcf, 0x11, 0x50, - 0x9f, 0x56, 0x23, 0xb3, 0x15, 0x8b, 0x00, 0xcb, 0x42, 0x97, 0xe2, 0xe7, 0x39, 0x81, 0xea, 0x91, - 0x51, 0xf5, 0x6b, 0xc0, 0xb3, 0xa6, 0x0b, 0xa8, 0xcb, 0x62, 0x33, 0xe2, 0x2a, 0x99, 0x92, 0xd9, - 0x50, 0x5f, 0x5c, 0x07, 0x9f, 0xf5, 0x0d, 0x40, 0x99, 0xe4, 0xfa, 0x17, 0x3f, 0xcb, 0x89, 0x54, - 0x4d, 0x80, 0x7a, 0xf8, 0x5f, 0x70, 0xc6, 0xae, 0xfb, 0x1a, 0x54, 0x11, 0x4e, 0x74, 0x3a, 0xa5, - 0xae, 0xc7, 0x2e, 0x99, 0x1e, 0x4d, 0xae, 0xcc, 0x6e, 0xf1, 0x2e, 0x5d, 0x3e, 0xa3, 0x9f, 0x08, - 0xfd, 0x42, 0xe8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xd8, 0x57, 0xbf, 0x6b, 0x05, 0x00, - 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.proto deleted file mode 100644 index e0060f25041..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/HAServiceProtocol.proto +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ha.proto"; -option java_outer_classname = "HAServiceProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -enum HAServiceStateProto { - INITIALIZING = 0; - ACTIVE = 1; - STANDBY = 2; -} - -enum HARequestSource { - REQUEST_BY_USER = 0; - REQUEST_BY_USER_FORCED = 1; - REQUEST_BY_ZKFC = 2; -} - -message HAStateChangeRequestInfoProto { - required HARequestSource reqSource = 1; -} - -/** - * void request - */ -message MonitorHealthRequestProto { -} - -/** - * void response - */ -message MonitorHealthResponseProto { -} - -/** - * void request - */ -message TransitionToActiveRequestProto { - required HAStateChangeRequestInfoProto reqInfo = 1; -} - -/** - * void response - */ -message TransitionToActiveResponseProto { -} - -/** - * void request - */ -message TransitionToStandbyRequestProto { - required HAStateChangeRequestInfoProto reqInfo = 1; -} - -/** - * void response - */ -message TransitionToStandbyResponseProto { -} - -/** - * void request - */ -message GetServiceStatusRequestProto { -} - -/** - * Returns the state of the service - */ -message GetServiceStatusResponseProto { - required HAServiceStateProto state = 1; - - // If state is STANDBY, indicate whether it is - // ready to become active. - optional bool readyToBecomeActive = 2; - // If not ready to become active, a textual explanation of why not - optional string notReadyReason = 3; -} - -/** - * Protocol interface provides High availability related - * primitives to monitor and failover a service. - * - * For details see o.a.h.ha.HAServiceProtocol. - */ -service HAServiceProtocolService { - /** - * Monitor the health of a service. - */ - rpc monitorHealth(MonitorHealthRequestProto) - returns(MonitorHealthResponseProto); - - /** - * Request service to tranisition to active state. - */ - rpc transitionToActive(TransitionToActiveRequestProto) - returns(TransitionToActiveResponseProto); - - /** - * Request service to transition to standby state. - */ - rpc transitionToStandby(TransitionToStandbyRequestProto) - returns(TransitionToStandbyResponseProto); - - /** - * Get the current status of the service. - */ - rpc getServiceStatus(GetServiceStatusRequestProto) - returns(GetServiceStatusResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.pb.go deleted file mode 100644 index 7f621594b26..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.pb.go +++ /dev/null @@ -1,177 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: IpcConnectionContext.proto - -/* -Package hadoop_common is a generated protocol buffer package. - -It is generated from these files: - IpcConnectionContext.proto - ZKFCProtocol.proto - RefreshAuthorizationPolicyProtocol.proto - GenericRefreshProtocol.proto - RpcHeader.proto - TraceAdmin.proto - RefreshCallQueueProtocol.proto - Security.proto - ProtobufRpcEngine.proto - RefreshUserMappingsProtocol.proto - GetUserMappingsProtocol.proto - HAServiceProtocol.proto - ProtocolInfo.proto - -It has these top-level messages: - UserInformationProto - IpcConnectionContextProto - CedeActiveRequestProto - CedeActiveResponseProto - GracefulFailoverRequestProto - GracefulFailoverResponseProto - RefreshServiceAclRequestProto - RefreshServiceAclResponseProto - GenericRefreshRequestProto - GenericRefreshResponseProto - GenericRefreshResponseCollectionProto - RPCTraceInfoProto - RPCCallerContextProto - RpcRequestHeaderProto - RpcResponseHeaderProto - RpcSaslProto - ListSpanReceiversRequestProto - SpanReceiverListInfo - ListSpanReceiversResponseProto - ConfigPair - AddSpanReceiverRequestProto - AddSpanReceiverResponseProto - RemoveSpanReceiverRequestProto - RemoveSpanReceiverResponseProto - RefreshCallQueueRequestProto - RefreshCallQueueResponseProto - TokenProto - GetDelegationTokenRequestProto - GetDelegationTokenResponseProto - RenewDelegationTokenRequestProto - RenewDelegationTokenResponseProto - CancelDelegationTokenRequestProto - CancelDelegationTokenResponseProto - RequestHeaderProto - RefreshUserToGroupsMappingsRequestProto - RefreshUserToGroupsMappingsResponseProto - RefreshSuperUserGroupsConfigurationRequestProto - RefreshSuperUserGroupsConfigurationResponseProto - GetGroupsForUserRequestProto - GetGroupsForUserResponseProto - HAStateChangeRequestInfoProto - MonitorHealthRequestProto - MonitorHealthResponseProto - TransitionToActiveRequestProto - TransitionToActiveResponseProto - TransitionToStandbyRequestProto - TransitionToStandbyResponseProto - GetServiceStatusRequestProto - GetServiceStatusResponseProto - GetProtocolVersionsRequestProto - ProtocolVersionProto - GetProtocolVersionsResponseProto - GetProtocolSignatureRequestProto - GetProtocolSignatureResponseProto - ProtocolSignatureProto -*/ -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// * -// Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext -type UserInformationProto struct { - EffectiveUser *string `protobuf:"bytes,1,opt,name=effectiveUser" json:"effectiveUser,omitempty"` - RealUser *string `protobuf:"bytes,2,opt,name=realUser" json:"realUser,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UserInformationProto) Reset() { *m = UserInformationProto{} } -func (m *UserInformationProto) String() string { return proto.CompactTextString(m) } -func (*UserInformationProto) ProtoMessage() {} -func (*UserInformationProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *UserInformationProto) GetEffectiveUser() string { - if m != nil && m.EffectiveUser != nil { - return *m.EffectiveUser - } - return "" -} - -func (m *UserInformationProto) GetRealUser() string { - if m != nil && m.RealUser != nil { - return *m.RealUser - } - return "" -} - -// * -// The connection context is sent as part of the connection establishment. -// It establishes the context for ALL Rpc calls within the connection. -type IpcConnectionContextProto struct { - // UserInfo beyond what is determined as part of security handshake - // at connection time (kerberos, tokens etc). - UserInfo *UserInformationProto `protobuf:"bytes,2,opt,name=userInfo" json:"userInfo,omitempty"` - // Protocol name for next rpc layer. - // The client created a proxy with this protocol name - Protocol *string `protobuf:"bytes,3,opt,name=protocol" json:"protocol,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IpcConnectionContextProto) Reset() { *m = IpcConnectionContextProto{} } -func (m *IpcConnectionContextProto) String() string { return proto.CompactTextString(m) } -func (*IpcConnectionContextProto) ProtoMessage() {} -func (*IpcConnectionContextProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *IpcConnectionContextProto) GetUserInfo() *UserInformationProto { - if m != nil { - return m.UserInfo - } - return nil -} - -func (m *IpcConnectionContextProto) GetProtocol() string { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return "" -} - -func init() { - proto.RegisterType((*UserInformationProto)(nil), "hadoop.common.UserInformationProto") - proto.RegisterType((*IpcConnectionContextProto)(nil), "hadoop.common.IpcConnectionContextProto") -} - -func init() { proto.RegisterFile("IpcConnectionContext.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 199 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xf2, 0x2c, 0x48, 0x76, - 0xce, 0xcf, 0xcb, 0x4b, 0x4d, 0x2e, 0xc9, 0xcc, 0xcf, 0x73, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, - 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0xcd, 0x48, 0x4c, 0xc9, 0xcf, 0x2f, 0xd0, 0x4b, - 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0x53, 0x8a, 0xe0, 0x12, 0x09, 0x2d, 0x4e, 0x2d, 0xf2, 0xcc, 0x4b, - 0xcb, 0x2f, 0xca, 0x4d, 0x04, 0x29, 0x0f, 0x00, 0x2b, 0x53, 0xe1, 0xe2, 0x4d, 0x4d, 0x4b, 0x03, - 0x19, 0x50, 0x96, 0x0a, 0x52, 0x20, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x84, 0x2a, 0x28, 0x24, - 0xc5, 0xc5, 0x51, 0x94, 0x9a, 0x98, 0x03, 0x56, 0xc0, 0x04, 0x56, 0x00, 0xe7, 0x2b, 0x55, 0x70, - 0x49, 0x62, 0x73, 0x06, 0xc4, 0x78, 0x7b, 0x2e, 0x8e, 0x52, 0xa8, 0xb5, 0x60, 0x8d, 0xdc, 0x46, - 0xca, 0x7a, 0x28, 0x0e, 0xd3, 0xc3, 0xe6, 0xaa, 0x20, 0xb8, 0x26, 0x90, 0xcd, 0x60, 0xff, 0x24, - 0xe7, 0xe7, 0x48, 0x30, 0x43, 0x6c, 0x86, 0xf1, 0x9d, 0xec, 0xb9, 0xe4, 0xf2, 0x8b, 0xd2, 0xf5, - 0x12, 0x0b, 0x12, 0x93, 0x33, 0x52, 0x61, 0xc6, 0x66, 0x16, 0x24, 0x43, 0x42, 0x20, 0xa9, 0x34, - 0xcd, 0x49, 0x0a, 0xa7, 0xcb, 0x8a, 0x17, 0x30, 0x32, 0x02, 0x02, 0x00, 0x00, 0xff, 0xff, 0x94, - 0x05, 0xde, 0x49, 0x40, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.proto deleted file mode 100644 index 4557e893cff..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/IpcConnectionContext.proto +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ipc.protobuf"; -option java_outer_classname = "IpcConnectionContextProtos"; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext - */ -message UserInformationProto { - optional string effectiveUser = 1; - optional string realUser = 2; -} - -/** - * The connection context is sent as part of the connection establishment. - * It establishes the context for ALL Rpc calls within the connection. - */ -message IpcConnectionContextProto { - // UserInfo beyond what is determined as part of security handshake - // at connection time (kerberos, tokens etc). - optional UserInformationProto userInfo = 2; - - // Protocol name for next rpc layer. - // The client created a proxy with this protocol name - optional string protocol = 3; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.pb.go deleted file mode 100644 index 6321f81b217..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.pb.go +++ /dev/null @@ -1,92 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: ProtobufRpcEngine.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// This message is the header for the Protobuf Rpc Engine -// when sending a RPC request from RPC client to the RPC server. -// The actual request (serialized as protobuf) follows this request. -// -// No special header is needed for the Rpc Response for Protobuf Rpc Engine. -// The normal RPC response header (see RpcHeader.proto) are sufficient. -type RequestHeaderProto struct { - // * Name of the RPC method - MethodName *string `protobuf:"bytes,1,req,name=methodName" json:"methodName,omitempty"` - // * - // RPCs for a particular interface (ie protocol) are done using a - // IPC connection that is setup using rpcProxy. - // The rpcProxy's has a declared protocol name that is - // sent form client to server at connection time. - // - // Each Rpc call also sends a protocol name - // (called declaringClassprotocolName). This name is usually the same - // as the connection protocol name except in some cases. - // For example metaProtocols such ProtocolInfoProto which get metainfo - // about the protocol reuse the connection but need to indicate that - // the actual protocol is different (i.e. the protocol is - // ProtocolInfoProto) since they reuse the connection; in this case - // the declaringClassProtocolName field is set to the ProtocolInfoProto - DeclaringClassProtocolName *string `protobuf:"bytes,2,req,name=declaringClassProtocolName" json:"declaringClassProtocolName,omitempty"` - // * protocol version of class declaring the called method - ClientProtocolVersion *uint64 `protobuf:"varint,3,req,name=clientProtocolVersion" json:"clientProtocolVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RequestHeaderProto) Reset() { *m = RequestHeaderProto{} } -func (m *RequestHeaderProto) String() string { return proto.CompactTextString(m) } -func (*RequestHeaderProto) ProtoMessage() {} -func (*RequestHeaderProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } - -func (m *RequestHeaderProto) GetMethodName() string { - if m != nil && m.MethodName != nil { - return *m.MethodName - } - return "" -} - -func (m *RequestHeaderProto) GetDeclaringClassProtocolName() string { - if m != nil && m.DeclaringClassProtocolName != nil { - return *m.DeclaringClassProtocolName - } - return "" -} - -func (m *RequestHeaderProto) GetClientProtocolVersion() uint64 { - if m != nil && m.ClientProtocolVersion != nil { - return *m.ClientProtocolVersion - } - return 0 -} - -func init() { - proto.RegisterType((*RequestHeaderProto)(nil), "hadoop.common.RequestHeaderProto") -} - -func init() { proto.RegisterFile("ProtobufRpcEngine.proto", fileDescriptor8) } - -var fileDescriptor8 = []byte{ - // 197 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x8f, 0xb1, 0x6a, 0xc3, 0x30, - 0x14, 0x45, 0x91, 0xdb, 0xa5, 0x82, 0x2e, 0x82, 0x52, 0xd3, 0xc1, 0x98, 0x4e, 0x9e, 0x34, 0x75, - 0x2c, 0x1d, 0x5c, 0x0a, 0x9d, 0x42, 0xd0, 0x90, 0x5d, 0x96, 0x9e, 0x6d, 0x81, 0xa4, 0xa7, 0x48, - 0xf2, 0xff, 0x64, 0xce, 0x57, 0x86, 0x28, 0x09, 0x04, 0x12, 0xb2, 0x9e, 0x7b, 0xee, 0x70, 0xe8, - 0xfb, 0x3a, 0x62, 0xc6, 0x61, 0x19, 0x45, 0x50, 0x7f, 0x7e, 0x32, 0x1e, 0x78, 0x38, 0x12, 0xf6, - 0x3a, 0x4b, 0x8d, 0x18, 0xb8, 0x42, 0xe7, 0xd0, 0x7f, 0xee, 0x09, 0x65, 0x02, 0xb6, 0x0b, 0xa4, - 0xfc, 0x0f, 0x52, 0x43, 0x2c, 0x3f, 0xd6, 0x50, 0xea, 0x20, 0xcf, 0xa8, 0x57, 0xd2, 0x41, 0x4d, - 0xda, 0xaa, 0x7b, 0x11, 0x57, 0x84, 0xfd, 0xd0, 0x0f, 0x0d, 0xca, 0xca, 0x68, 0xfc, 0xf4, 0x6b, - 0x65, 0x4a, 0xe5, 0xa6, 0xd0, 0x16, 0xbf, 0x2a, 0xfe, 0x03, 0x83, 0x7d, 0xd1, 0x37, 0x65, 0x0d, - 0xf8, 0x7c, 0xa1, 0x1b, 0x88, 0xc9, 0xa0, 0xaf, 0x9f, 0xda, 0xaa, 0x7b, 0x16, 0xf7, 0xc7, 0xfe, - 0x9b, 0x36, 0x18, 0x27, 0x2e, 0x83, 0x54, 0x33, 0xf0, 0x73, 0x88, 0x09, 0xea, 0x94, 0x36, 0x2c, - 0x63, 0x7f, 0x9b, 0x5d, 0x40, 0xda, 0x11, 0x72, 0x08, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xe2, 0xf0, - 0x01, 0x13, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.proto deleted file mode 100644 index a17e2078e94..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtobufRpcEngine.proto +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -/** - * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer - * to marshal the request and response in the RPC layer. - * The messages are sent in addition to the normal RPC header as - * defined in RpcHeader.proto - */ -option java_package = "org.apache.hadoop.ipc.protobuf"; -option java_outer_classname = "ProtobufRpcEngineProtos"; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * This message is the header for the Protobuf Rpc Engine - * when sending a RPC request from RPC client to the RPC server. - * The actual request (serialized as protobuf) follows this request. - * - * No special header is needed for the Rpc Response for Protobuf Rpc Engine. - * The normal RPC response header (see RpcHeader.proto) are sufficient. - */ -message RequestHeaderProto { - /** Name of the RPC method */ - required string methodName = 1; - - /** - * RPCs for a particular interface (ie protocol) are done using a - * IPC connection that is setup using rpcProxy. - * The rpcProxy's has a declared protocol name that is - * sent form client to server at connection time. - * - * Each Rpc call also sends a protocol name - * (called declaringClassprotocolName). This name is usually the same - * as the connection protocol name except in some cases. - * For example metaProtocols such ProtocolInfoProto which get metainfo - * about the protocol reuse the connection but need to indicate that - * the actual protocol is different (i.e. the protocol is - * ProtocolInfoProto) since they reuse the connection; in this case - * the declaringClassProtocolName field is set to the ProtocolInfoProto - */ - required string declaringClassProtocolName = 2; - - /** protocol version of class declaring the called method */ - required uint64 clientProtocolVersion = 3; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.pb.go deleted file mode 100644 index 9c858f09559..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.pb.go +++ /dev/null @@ -1,194 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: ProtocolInfo.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Request to get protocol versions for all supported rpc kinds. -type GetProtocolVersionsRequestProto struct { - Protocol *string `protobuf:"bytes,1,req,name=protocol" json:"protocol,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetProtocolVersionsRequestProto) Reset() { *m = GetProtocolVersionsRequestProto{} } -func (m *GetProtocolVersionsRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetProtocolVersionsRequestProto) ProtoMessage() {} -func (*GetProtocolVersionsRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor12, []int{0} -} - -func (m *GetProtocolVersionsRequestProto) GetProtocol() string { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return "" -} - -// * -// Protocol version with corresponding rpc kind. -type ProtocolVersionProto struct { - RpcKind *string `protobuf:"bytes,1,req,name=rpcKind" json:"rpcKind,omitempty"` - Versions []uint64 `protobuf:"varint,2,rep,name=versions" json:"versions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ProtocolVersionProto) Reset() { *m = ProtocolVersionProto{} } -func (m *ProtocolVersionProto) String() string { return proto.CompactTextString(m) } -func (*ProtocolVersionProto) ProtoMessage() {} -func (*ProtocolVersionProto) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{1} } - -func (m *ProtocolVersionProto) GetRpcKind() string { - if m != nil && m.RpcKind != nil { - return *m.RpcKind - } - return "" -} - -func (m *ProtocolVersionProto) GetVersions() []uint64 { - if m != nil { - return m.Versions - } - return nil -} - -// * -// Get protocol version response. -type GetProtocolVersionsResponseProto struct { - ProtocolVersions []*ProtocolVersionProto `protobuf:"bytes,1,rep,name=protocolVersions" json:"protocolVersions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetProtocolVersionsResponseProto) Reset() { *m = GetProtocolVersionsResponseProto{} } -func (m *GetProtocolVersionsResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetProtocolVersionsResponseProto) ProtoMessage() {} -func (*GetProtocolVersionsResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor12, []int{2} -} - -func (m *GetProtocolVersionsResponseProto) GetProtocolVersions() []*ProtocolVersionProto { - if m != nil { - return m.ProtocolVersions - } - return nil -} - -// * -// Get protocol signature request. -type GetProtocolSignatureRequestProto struct { - Protocol *string `protobuf:"bytes,1,req,name=protocol" json:"protocol,omitempty"` - RpcKind *string `protobuf:"bytes,2,req,name=rpcKind" json:"rpcKind,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetProtocolSignatureRequestProto) Reset() { *m = GetProtocolSignatureRequestProto{} } -func (m *GetProtocolSignatureRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetProtocolSignatureRequestProto) ProtoMessage() {} -func (*GetProtocolSignatureRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor12, []int{3} -} - -func (m *GetProtocolSignatureRequestProto) GetProtocol() string { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return "" -} - -func (m *GetProtocolSignatureRequestProto) GetRpcKind() string { - if m != nil && m.RpcKind != nil { - return *m.RpcKind - } - return "" -} - -// * -// Get protocol signature response. -type GetProtocolSignatureResponseProto struct { - ProtocolSignature []*ProtocolSignatureProto `protobuf:"bytes,1,rep,name=protocolSignature" json:"protocolSignature,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetProtocolSignatureResponseProto) Reset() { *m = GetProtocolSignatureResponseProto{} } -func (m *GetProtocolSignatureResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetProtocolSignatureResponseProto) ProtoMessage() {} -func (*GetProtocolSignatureResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor12, []int{4} -} - -func (m *GetProtocolSignatureResponseProto) GetProtocolSignature() []*ProtocolSignatureProto { - if m != nil { - return m.ProtocolSignature - } - return nil -} - -type ProtocolSignatureProto struct { - Version *uint64 `protobuf:"varint,1,req,name=version" json:"version,omitempty"` - Methods []uint32 `protobuf:"varint,2,rep,name=methods" json:"methods,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ProtocolSignatureProto) Reset() { *m = ProtocolSignatureProto{} } -func (m *ProtocolSignatureProto) String() string { return proto.CompactTextString(m) } -func (*ProtocolSignatureProto) ProtoMessage() {} -func (*ProtocolSignatureProto) Descriptor() ([]byte, []int) { return fileDescriptor12, []int{5} } - -func (m *ProtocolSignatureProto) GetVersion() uint64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func (m *ProtocolSignatureProto) GetMethods() []uint32 { - if m != nil { - return m.Methods - } - return nil -} - -func init() { - proto.RegisterType((*GetProtocolVersionsRequestProto)(nil), "hadoop.common.GetProtocolVersionsRequestProto") - proto.RegisterType((*ProtocolVersionProto)(nil), "hadoop.common.ProtocolVersionProto") - proto.RegisterType((*GetProtocolVersionsResponseProto)(nil), "hadoop.common.GetProtocolVersionsResponseProto") - proto.RegisterType((*GetProtocolSignatureRequestProto)(nil), "hadoop.common.GetProtocolSignatureRequestProto") - proto.RegisterType((*GetProtocolSignatureResponseProto)(nil), "hadoop.common.GetProtocolSignatureResponseProto") - proto.RegisterType((*ProtocolSignatureProto)(nil), "hadoop.common.ProtocolSignatureProto") -} - -func init() { proto.RegisterFile("ProtocolInfo.proto", fileDescriptor12) } - -var fileDescriptor12 = []byte{ - // 339 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x51, 0x4b, 0xf3, 0x30, - 0x14, 0xa5, 0xdd, 0xe0, 0xfb, 0xbc, 0x32, 0xd0, 0x6e, 0x48, 0xd9, 0x83, 0xd6, 0x88, 0xb0, 0xa7, - 0x28, 0x7b, 0x14, 0x7c, 0xd9, 0x8b, 0x88, 0x03, 0x25, 0x03, 0xf1, 0xb5, 0x66, 0x59, 0x57, 0x70, - 0xb9, 0x31, 0xe9, 0x86, 0xfe, 0x83, 0xfd, 0x0c, 0x7f, 0xaa, 0x74, 0x69, 0x47, 0xdb, 0x45, 0xdd, - 0x5b, 0x4f, 0x73, 0xce, 0xbd, 0xf7, 0x9c, 0x9b, 0x40, 0xf0, 0xa4, 0x31, 0x43, 0x8e, 0x6f, 0xf7, - 0x72, 0x86, 0x54, 0xe5, 0x20, 0xe8, 0xcc, 0xe3, 0x29, 0xa2, 0xa2, 0x1c, 0x17, 0x0b, 0x94, 0xe4, - 0x16, 0xce, 0xee, 0x44, 0x56, 0xf2, 0x9e, 0x85, 0x36, 0x29, 0x4a, 0xc3, 0xc4, 0xfb, 0x52, 0x18, - 0xfb, 0x3b, 0xe8, 0xc3, 0x7f, 0x55, 0x9c, 0x87, 0x5e, 0xe4, 0x0f, 0x0e, 0xd8, 0x16, 0x93, 0x31, - 0xf4, 0x1a, 0x5a, 0xab, 0x09, 0xe1, 0x9f, 0x56, 0xfc, 0x21, 0x95, 0xd3, 0x42, 0x52, 0xc2, 0xbc, - 0xda, 0xaa, 0xe8, 0x12, 0xfa, 0x51, 0x6b, 0xd0, 0x66, 0x5b, 0x4c, 0x0c, 0x44, 0xce, 0x61, 0x8c, - 0x42, 0x69, 0x84, 0xad, 0xfc, 0x08, 0x47, 0xaa, 0x41, 0x08, 0xbd, 0xa8, 0x35, 0x38, 0x1c, 0x5e, - 0xd0, 0x9a, 0x35, 0xea, 0x1a, 0x8c, 0xed, 0x88, 0xc9, 0x4b, 0xad, 0xe9, 0x24, 0x4d, 0x64, 0x9c, - 0x2d, 0xb5, 0xd8, 0x37, 0x82, 0xaa, 0x55, 0xbf, 0x66, 0x95, 0x7c, 0xc0, 0xb9, 0xbb, 0x72, 0xd5, - 0xcf, 0x04, 0x8e, 0x55, 0x93, 0x51, 0x18, 0xba, 0xfc, 0xc1, 0xd0, 0x96, 0x67, 0x2d, 0xed, 0xea, - 0xc9, 0x18, 0x4e, 0xdc, 0xe4, 0x7c, 0xda, 0x22, 0xee, 0x8d, 0x91, 0x36, 0x2b, 0x61, 0x7e, 0xb2, - 0x10, 0xd9, 0x1c, 0xa7, 0x76, 0x2f, 0x1d, 0x56, 0xc2, 0xe1, 0xda, 0x87, 0x6e, 0xf5, 0x26, 0x4d, - 0x84, 0x5e, 0xa5, 0x5c, 0x04, 0x2b, 0xe8, 0x26, 0xbb, 0xeb, 0x0a, 0x68, 0x63, 0xec, 0x3f, 0xee, - 0x57, 0xff, 0x6a, 0x1f, 0x7e, 0x35, 0xb2, 0x4f, 0xe8, 0x25, 0x8e, 0x5c, 0x83, 0x5f, 0x0a, 0x39, - 0xd7, 0xda, 0xbf, 0xde, 0x4b, 0x50, 0x69, 0x3d, 0xba, 0x81, 0x53, 0xd4, 0x09, 0x8d, 0x55, 0xcc, - 0xe7, 0xa2, 0x54, 0xa7, 0x8a, 0xdb, 0xc7, 0xf5, 0xba, 0x9c, 0x8d, 0x6a, 0x6f, 0x6e, 0xf3, 0x6d, - 0xd6, 0x9e, 0xf7, 0xe5, 0x79, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x42, 0x12, 0xbd, 0x0a, 0x8e, - 0x03, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.proto deleted file mode 100644 index fdbc440d91c..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ProtocolInfo.proto +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ipc.protobuf"; -option java_outer_classname = "ProtocolInfoProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Request to get protocol versions for all supported rpc kinds. - */ -message GetProtocolVersionsRequestProto { - required string protocol = 1; // Protocol name -} - -/** - * Protocol version with corresponding rpc kind. - */ -message ProtocolVersionProto { - required string rpcKind = 1; //RPC kind - repeated uint64 versions = 2; //Protocol version corresponding to the rpc kind. -} - -/** - * Get protocol version response. - */ -message GetProtocolVersionsResponseProto { - repeated ProtocolVersionProto protocolVersions = 1; -} - -/** - * Get protocol signature request. - */ -message GetProtocolSignatureRequestProto { - required string protocol = 1; // Protocol name - required string rpcKind = 2; // RPC kind -} - -/** - * Get protocol signature response. - */ -message GetProtocolSignatureResponseProto { - repeated ProtocolSignatureProto protocolSignature = 1; -} - -message ProtocolSignatureProto { - required uint64 version = 1; - repeated uint32 methods = 2; -} - -/** - * Protocol to get information about protocols. - */ -service ProtocolInfoService { - /** - * Return protocol version corresponding to protocol interface for each - * supported rpc kind. - */ - rpc getProtocolVersions(GetProtocolVersionsRequestProto) - returns (GetProtocolVersionsResponseProto); - - /** - * Return protocol version corresponding to protocol interface. - */ - rpc getProtocolSignature(GetProtocolSignatureRequestProto) - returns (GetProtocolSignatureResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.pb.go deleted file mode 100644 index 5054b196fd8..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.pb.go +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: RefreshAuthorizationPolicyProtocol.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Refresh service acl request. -type RefreshServiceAclRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshServiceAclRequestProto) Reset() { *m = RefreshServiceAclRequestProto{} } -func (m *RefreshServiceAclRequestProto) String() string { return proto.CompactTextString(m) } -func (*RefreshServiceAclRequestProto) ProtoMessage() {} -func (*RefreshServiceAclRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } - -// * -// void response -type RefreshServiceAclResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshServiceAclResponseProto) Reset() { *m = RefreshServiceAclResponseProto{} } -func (m *RefreshServiceAclResponseProto) String() string { return proto.CompactTextString(m) } -func (*RefreshServiceAclResponseProto) ProtoMessage() {} -func (*RefreshServiceAclResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } - -func init() { - proto.RegisterType((*RefreshServiceAclRequestProto)(nil), "hadoop.common.RefreshServiceAclRequestProto") - proto.RegisterType((*RefreshServiceAclResponseProto)(nil), "hadoop.common.RefreshServiceAclResponseProto") -} - -func init() { proto.RegisterFile("RefreshAuthorizationPolicyProtocol.proto", fileDescriptor2) } - -var fileDescriptor2 = []byte{ - // 189 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x08, 0x4a, 0x4d, 0x2b, - 0x4a, 0x2d, 0xce, 0x70, 0x2c, 0x2d, 0xc9, 0xc8, 0x2f, 0xca, 0xac, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, - 0x0b, 0xc8, 0xcf, 0xc9, 0x4c, 0xae, 0x0c, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0xce, 0xcf, 0xd1, 0x2b, - 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, 0x53, 0xf2, 0xf3, 0x0b, 0xf4, 0x92, 0xf3, 0x73, 0x73, 0xf3, - 0xf3, 0x94, 0xe4, 0xb9, 0x64, 0xa1, 0x5a, 0x83, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, 0x1d, 0x93, - 0x73, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0xc0, 0x1a, 0x95, 0x14, 0xb8, 0xe4, 0xb0, 0x28, - 0x28, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x05, 0xab, 0x30, 0x9a, 0xcb, 0xc8, 0xa5, 0x49, 0xd8, 0x7a, - 0xa8, 0x6e, 0xa1, 0x02, 0x2e, 0xc1, 0x22, 0x74, 0xf3, 0x84, 0x74, 0xf4, 0x50, 0x5c, 0xa5, 0x87, - 0xd7, 0x49, 0x52, 0xba, 0x84, 0x55, 0x23, 0xb9, 0xcf, 0x29, 0x88, 0x4b, 0x21, 0xbf, 0x28, 0x5d, - 0x2f, 0xb1, 0x20, 0x31, 0x39, 0x23, 0x15, 0xa6, 0xb5, 0x38, 0x35, 0xb9, 0xb4, 0x28, 0xb3, 0xa4, - 0x12, 0x12, 0x2a, 0x4e, 0x44, 0x84, 0x1f, 0x98, 0x2e, 0xee, 0x60, 0x64, 0x5c, 0xc0, 0xc8, 0x08, - 0x08, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xf2, 0xf6, 0x15, 0x70, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.proto deleted file mode 100644 index 5ef1c2d0a8c..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshAuthorizationPolicyProtocol.proto +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.security.proto"; -option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Refresh service acl request. - */ -message RefreshServiceAclRequestProto { -} - -/** - * void response - */ -message RefreshServiceAclResponseProto { -} - -/** - * Protocol which is used to refresh the authorization policy in use currently. - */ -service RefreshAuthorizationPolicyProtocolService { - /** - * Refresh the service-level authorization policy in-effect. - */ - rpc refreshServiceAcl(RefreshServiceAclRequestProto) - returns(RefreshServiceAclResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.pb.go deleted file mode 100644 index 4ddd7d8fcd9..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.pb.go +++ /dev/null @@ -1,57 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: RefreshCallQueueProtocol.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Refresh callqueue request. -type RefreshCallQueueRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshCallQueueRequestProto) Reset() { *m = RefreshCallQueueRequestProto{} } -func (m *RefreshCallQueueRequestProto) String() string { return proto.CompactTextString(m) } -func (*RefreshCallQueueRequestProto) ProtoMessage() {} -func (*RefreshCallQueueRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } - -// * -// void response. -type RefreshCallQueueResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshCallQueueResponseProto) Reset() { *m = RefreshCallQueueResponseProto{} } -func (m *RefreshCallQueueResponseProto) String() string { return proto.CompactTextString(m) } -func (*RefreshCallQueueResponseProto) ProtoMessage() {} -func (*RefreshCallQueueResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } - -func init() { - proto.RegisterType((*RefreshCallQueueRequestProto)(nil), "hadoop.common.RefreshCallQueueRequestProto") - proto.RegisterType((*RefreshCallQueueResponseProto)(nil), "hadoop.common.RefreshCallQueueResponseProto") -} - -func init() { proto.RegisterFile("RefreshCallQueueProtocol.proto", fileDescriptor6) } - -var fileDescriptor6 = []byte{ - // 168 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x0b, 0x4a, 0x4d, 0x2b, - 0x4a, 0x2d, 0xce, 0x70, 0x4e, 0xcc, 0xc9, 0x09, 0x2c, 0x4d, 0x2d, 0x4d, 0x0d, 0x28, 0xca, 0x2f, - 0xc9, 0x4f, 0xce, 0xcf, 0xd1, 0x2b, 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, 0x53, 0xf2, 0xf3, 0x0b, - 0xf4, 0x92, 0xf3, 0x73, 0x73, 0xf3, 0xf3, 0x94, 0xe4, 0xb8, 0x64, 0xd0, 0x35, 0x04, 0xa5, 0x16, - 0x96, 0xa6, 0x16, 0x97, 0x80, 0xf5, 0x29, 0xc9, 0x73, 0xc9, 0x62, 0xca, 0x17, 0x17, 0xe4, 0xe7, - 0x15, 0x43, 0x0c, 0x36, 0x9a, 0xc0, 0xc8, 0x25, 0x8f, 0xcb, 0xca, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, - 0xe4, 0x54, 0xa1, 0x5c, 0x2e, 0x81, 0x22, 0x34, 0x25, 0x42, 0xda, 0x7a, 0x28, 0x0e, 0xd1, 0xc3, - 0xe7, 0x0a, 0x29, 0x1d, 0x82, 0x8a, 0x91, 0x9c, 0xe4, 0xe4, 0xcc, 0x25, 0x9d, 0x5f, 0x94, 0xae, - 0x97, 0x58, 0x90, 0x98, 0x9c, 0x91, 0x0a, 0xd3, 0x99, 0x59, 0x90, 0x0c, 0x09, 0x01, 0x27, 0x9c, - 0x21, 0x04, 0xa6, 0x8b, 0x3b, 0x18, 0x19, 0x17, 0x30, 0x32, 0x02, 0x02, 0x00, 0x00, 0xff, 0xff, - 0x5d, 0x22, 0xcd, 0xe0, 0x48, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.proto deleted file mode 100644 index 67ed1332510..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshCallQueueProtocol.proto +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ipc.proto"; -option java_outer_classname = "RefreshCallQueueProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Refresh callqueue request. - */ -message RefreshCallQueueRequestProto { -} - -/** - * void response. - */ -message RefreshCallQueueResponseProto { -} - -/** - * Protocol which is used to refresh the callqueue. - */ -service RefreshCallQueueProtocolService { - /** - * Refresh the callqueue. - */ - rpc refreshCallQueue(RefreshCallQueueRequestProto) - returns(RefreshCallQueueResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.pb.go deleted file mode 100644 index f254f3e3bd8..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.pb.go +++ /dev/null @@ -1,106 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: RefreshUserMappingsProtocol.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Refresh user to group mappings request. -type RefreshUserToGroupsMappingsRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshUserToGroupsMappingsRequestProto) Reset() { - *m = RefreshUserToGroupsMappingsRequestProto{} -} -func (m *RefreshUserToGroupsMappingsRequestProto) String() string { return proto.CompactTextString(m) } -func (*RefreshUserToGroupsMappingsRequestProto) ProtoMessage() {} -func (*RefreshUserToGroupsMappingsRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor9, []int{0} -} - -// * -// void response -type RefreshUserToGroupsMappingsResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshUserToGroupsMappingsResponseProto) Reset() { - *m = RefreshUserToGroupsMappingsResponseProto{} -} -func (m *RefreshUserToGroupsMappingsResponseProto) String() string { return proto.CompactTextString(m) } -func (*RefreshUserToGroupsMappingsResponseProto) ProtoMessage() {} -func (*RefreshUserToGroupsMappingsResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor9, []int{1} -} - -// * -// Refresh superuser configuration request. -type RefreshSuperUserGroupsConfigurationRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshSuperUserGroupsConfigurationRequestProto) Reset() { - *m = RefreshSuperUserGroupsConfigurationRequestProto{} -} -func (m *RefreshSuperUserGroupsConfigurationRequestProto) String() string { - return proto.CompactTextString(m) -} -func (*RefreshSuperUserGroupsConfigurationRequestProto) ProtoMessage() {} -func (*RefreshSuperUserGroupsConfigurationRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor9, []int{2} -} - -// * -// void response -type RefreshSuperUserGroupsConfigurationResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshSuperUserGroupsConfigurationResponseProto) Reset() { - *m = RefreshSuperUserGroupsConfigurationResponseProto{} -} -func (m *RefreshSuperUserGroupsConfigurationResponseProto) String() string { - return proto.CompactTextString(m) -} -func (*RefreshSuperUserGroupsConfigurationResponseProto) ProtoMessage() {} -func (*RefreshSuperUserGroupsConfigurationResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor9, []int{3} -} - -func init() { - proto.RegisterType((*RefreshUserToGroupsMappingsRequestProto)(nil), "hadoop.common.RefreshUserToGroupsMappingsRequestProto") - proto.RegisterType((*RefreshUserToGroupsMappingsResponseProto)(nil), "hadoop.common.RefreshUserToGroupsMappingsResponseProto") - proto.RegisterType((*RefreshSuperUserGroupsConfigurationRequestProto)(nil), "hadoop.common.RefreshSuperUserGroupsConfigurationRequestProto") - proto.RegisterType((*RefreshSuperUserGroupsConfigurationResponseProto)(nil), "hadoop.common.RefreshSuperUserGroupsConfigurationResponseProto") -} - -func init() { proto.RegisterFile("RefreshUserMappingsProtocol.proto", fileDescriptor9) } - -var fileDescriptor9 = []byte{ - // 249 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x0c, 0x4a, 0x4d, 0x2b, - 0x4a, 0x2d, 0xce, 0x08, 0x2d, 0x4e, 0x2d, 0xf2, 0x4d, 0x2c, 0x28, 0xc8, 0xcc, 0x4b, 0x2f, 0x0e, - 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0xce, 0xcf, 0xd1, 0x2b, 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, 0x53, - 0xf2, 0xf3, 0x0b, 0xf4, 0x92, 0xf3, 0x73, 0x73, 0xf3, 0xf3, 0x94, 0x34, 0xb9, 0xd4, 0x91, 0xf4, - 0x84, 0xe4, 0xbb, 0x17, 0xe5, 0x97, 0x16, 0x14, 0xc3, 0xf4, 0x06, 0xa5, 0x16, 0x96, 0xa6, 0x16, - 0x97, 0x80, 0x8d, 0x50, 0xd2, 0xe2, 0xd2, 0xc0, 0xab, 0xb4, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x15, - 0xa2, 0xd6, 0x90, 0x4b, 0x1f, 0xaa, 0x36, 0xb8, 0xb4, 0x20, 0xb5, 0x08, 0xa4, 0x01, 0xa2, 0xdc, - 0x39, 0x3f, 0x2f, 0x2d, 0x33, 0xbd, 0xb4, 0x28, 0xb1, 0x24, 0x33, 0x3f, 0x0f, 0xc5, 0x78, 0x23, - 0x2e, 0x03, 0xa2, 0xb4, 0x20, 0x59, 0x63, 0x74, 0x8f, 0x89, 0x4b, 0x09, 0x8f, 0x97, 0x83, 0x53, - 0x8b, 0xca, 0x32, 0x93, 0x53, 0x85, 0xfa, 0x18, 0xb9, 0xa4, 0x8b, 0x70, 0x3b, 0x5d, 0xc8, 0x4c, - 0x0f, 0x25, 0x50, 0xf4, 0x88, 0x0c, 0x11, 0x29, 0x73, 0x52, 0xf4, 0x21, 0xb9, 0x5b, 0x68, 0x19, - 0x23, 0x97, 0x72, 0x11, 0x61, 0xcf, 0x0a, 0xd9, 0x61, 0xb7, 0x80, 0xd8, 0x30, 0x95, 0xb2, 0x27, - 0x47, 0x3f, 0x92, 0x43, 0x9d, 0xbc, 0xb9, 0x14, 0xf2, 0x8b, 0xd2, 0xf5, 0x12, 0x0b, 0x12, 0x93, - 0x33, 0x52, 0x61, 0x86, 0x15, 0xa7, 0x26, 0x97, 0x16, 0x65, 0x96, 0x54, 0x42, 0x52, 0x94, 0x13, - 0xbe, 0x44, 0x07, 0xa6, 0x8b, 0x3b, 0x18, 0x19, 0x17, 0x30, 0x32, 0x02, 0x02, 0x00, 0x00, 0xff, - 0xff, 0xfc, 0xf2, 0x05, 0xcb, 0x9e, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.proto deleted file mode 100644 index 41031ed9ea0..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RefreshUserMappingsProtocol.proto +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.security.proto"; -option java_outer_classname = "RefreshUserMappingsProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Refresh user to group mappings request. - */ -message RefreshUserToGroupsMappingsRequestProto { -} - -/** - * void response - */ -message RefreshUserToGroupsMappingsResponseProto { -} - -/** - * Refresh superuser configuration request. - */ -message RefreshSuperUserGroupsConfigurationRequestProto { -} - -/** - * void response - */ -message RefreshSuperUserGroupsConfigurationResponseProto { -} - -/** - * Protocol to refresh the user mappings. - */ -service RefreshUserMappingsProtocolService { - /** - * Refresh user to group mappings. - */ - rpc refreshUserToGroupsMappings(RefreshUserToGroupsMappingsRequestProto) - returns(RefreshUserToGroupsMappingsResponseProto); - - /** - * Refresh superuser proxy group list. - */ - rpc refreshSuperUserGroupsConfiguration(RefreshSuperUserGroupsConfigurationRequestProto) - returns(RefreshSuperUserGroupsConfigurationResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.pb.go deleted file mode 100644 index 2b2d472141e..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.pb.go +++ /dev/null @@ -1,639 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: RpcHeader.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// RpcKind determine the rpcEngine and the serialization of the rpc request -type RpcKindProto int32 - -const ( - RpcKindProto_RPC_BUILTIN RpcKindProto = 0 - RpcKindProto_RPC_WRITABLE RpcKindProto = 1 - RpcKindProto_RPC_PROTOCOL_BUFFER RpcKindProto = 2 -) - -var RpcKindProto_name = map[int32]string{ - 0: "RPC_BUILTIN", - 1: "RPC_WRITABLE", - 2: "RPC_PROTOCOL_BUFFER", -} -var RpcKindProto_value = map[string]int32{ - "RPC_BUILTIN": 0, - "RPC_WRITABLE": 1, - "RPC_PROTOCOL_BUFFER": 2, -} - -func (x RpcKindProto) Enum() *RpcKindProto { - p := new(RpcKindProto) - *p = x - return p -} -func (x RpcKindProto) String() string { - return proto.EnumName(RpcKindProto_name, int32(x)) -} -func (x *RpcKindProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcKindProto_value, data, "RpcKindProto") - if err != nil { - return err - } - *x = RpcKindProto(value) - return nil -} -func (RpcKindProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } - -type RpcRequestHeaderProto_OperationProto int32 - -const ( - RpcRequestHeaderProto_RPC_FINAL_PACKET RpcRequestHeaderProto_OperationProto = 0 - RpcRequestHeaderProto_RPC_CONTINUATION_PACKET RpcRequestHeaderProto_OperationProto = 1 - RpcRequestHeaderProto_RPC_CLOSE_CONNECTION RpcRequestHeaderProto_OperationProto = 2 -) - -var RpcRequestHeaderProto_OperationProto_name = map[int32]string{ - 0: "RPC_FINAL_PACKET", - 1: "RPC_CONTINUATION_PACKET", - 2: "RPC_CLOSE_CONNECTION", -} -var RpcRequestHeaderProto_OperationProto_value = map[string]int32{ - "RPC_FINAL_PACKET": 0, - "RPC_CONTINUATION_PACKET": 1, - "RPC_CLOSE_CONNECTION": 2, -} - -func (x RpcRequestHeaderProto_OperationProto) Enum() *RpcRequestHeaderProto_OperationProto { - p := new(RpcRequestHeaderProto_OperationProto) - *p = x - return p -} -func (x RpcRequestHeaderProto_OperationProto) String() string { - return proto.EnumName(RpcRequestHeaderProto_OperationProto_name, int32(x)) -} -func (x *RpcRequestHeaderProto_OperationProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcRequestHeaderProto_OperationProto_value, data, "RpcRequestHeaderProto_OperationProto") - if err != nil { - return err - } - *x = RpcRequestHeaderProto_OperationProto(value) - return nil -} -func (RpcRequestHeaderProto_OperationProto) EnumDescriptor() ([]byte, []int) { - return fileDescriptor4, []int{2, 0} -} - -type RpcResponseHeaderProto_RpcStatusProto int32 - -const ( - RpcResponseHeaderProto_SUCCESS RpcResponseHeaderProto_RpcStatusProto = 0 - RpcResponseHeaderProto_ERROR RpcResponseHeaderProto_RpcStatusProto = 1 - RpcResponseHeaderProto_FATAL RpcResponseHeaderProto_RpcStatusProto = 2 -) - -var RpcResponseHeaderProto_RpcStatusProto_name = map[int32]string{ - 0: "SUCCESS", - 1: "ERROR", - 2: "FATAL", -} -var RpcResponseHeaderProto_RpcStatusProto_value = map[string]int32{ - "SUCCESS": 0, - "ERROR": 1, - "FATAL": 2, -} - -func (x RpcResponseHeaderProto_RpcStatusProto) Enum() *RpcResponseHeaderProto_RpcStatusProto { - p := new(RpcResponseHeaderProto_RpcStatusProto) - *p = x - return p -} -func (x RpcResponseHeaderProto_RpcStatusProto) String() string { - return proto.EnumName(RpcResponseHeaderProto_RpcStatusProto_name, int32(x)) -} -func (x *RpcResponseHeaderProto_RpcStatusProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcResponseHeaderProto_RpcStatusProto_value, data, "RpcResponseHeaderProto_RpcStatusProto") - if err != nil { - return err - } - *x = RpcResponseHeaderProto_RpcStatusProto(value) - return nil -} -func (RpcResponseHeaderProto_RpcStatusProto) EnumDescriptor() ([]byte, []int) { - return fileDescriptor4, []int{3, 0} -} - -type RpcResponseHeaderProto_RpcErrorCodeProto int32 - -const ( - // Non-fatal Rpc error - connection left open for future rpc calls - RpcResponseHeaderProto_ERROR_APPLICATION RpcResponseHeaderProto_RpcErrorCodeProto = 1 - RpcResponseHeaderProto_ERROR_NO_SUCH_METHOD RpcResponseHeaderProto_RpcErrorCodeProto = 2 - RpcResponseHeaderProto_ERROR_NO_SUCH_PROTOCOL RpcResponseHeaderProto_RpcErrorCodeProto = 3 - RpcResponseHeaderProto_ERROR_RPC_SERVER RpcResponseHeaderProto_RpcErrorCodeProto = 4 - RpcResponseHeaderProto_ERROR_SERIALIZING_RESPONSE RpcResponseHeaderProto_RpcErrorCodeProto = 5 - RpcResponseHeaderProto_ERROR_RPC_VERSION_MISMATCH RpcResponseHeaderProto_RpcErrorCodeProto = 6 - // Fatal Server side Rpc error - connection closed - RpcResponseHeaderProto_FATAL_UNKNOWN RpcResponseHeaderProto_RpcErrorCodeProto = 10 - RpcResponseHeaderProto_FATAL_UNSUPPORTED_SERIALIZATION RpcResponseHeaderProto_RpcErrorCodeProto = 11 - RpcResponseHeaderProto_FATAL_INVALID_RPC_HEADER RpcResponseHeaderProto_RpcErrorCodeProto = 12 - RpcResponseHeaderProto_FATAL_DESERIALIZING_REQUEST RpcResponseHeaderProto_RpcErrorCodeProto = 13 - RpcResponseHeaderProto_FATAL_VERSION_MISMATCH RpcResponseHeaderProto_RpcErrorCodeProto = 14 - RpcResponseHeaderProto_FATAL_UNAUTHORIZED RpcResponseHeaderProto_RpcErrorCodeProto = 15 -) - -var RpcResponseHeaderProto_RpcErrorCodeProto_name = map[int32]string{ - 1: "ERROR_APPLICATION", - 2: "ERROR_NO_SUCH_METHOD", - 3: "ERROR_NO_SUCH_PROTOCOL", - 4: "ERROR_RPC_SERVER", - 5: "ERROR_SERIALIZING_RESPONSE", - 6: "ERROR_RPC_VERSION_MISMATCH", - 10: "FATAL_UNKNOWN", - 11: "FATAL_UNSUPPORTED_SERIALIZATION", - 12: "FATAL_INVALID_RPC_HEADER", - 13: "FATAL_DESERIALIZING_REQUEST", - 14: "FATAL_VERSION_MISMATCH", - 15: "FATAL_UNAUTHORIZED", -} -var RpcResponseHeaderProto_RpcErrorCodeProto_value = map[string]int32{ - "ERROR_APPLICATION": 1, - "ERROR_NO_SUCH_METHOD": 2, - "ERROR_NO_SUCH_PROTOCOL": 3, - "ERROR_RPC_SERVER": 4, - "ERROR_SERIALIZING_RESPONSE": 5, - "ERROR_RPC_VERSION_MISMATCH": 6, - "FATAL_UNKNOWN": 10, - "FATAL_UNSUPPORTED_SERIALIZATION": 11, - "FATAL_INVALID_RPC_HEADER": 12, - "FATAL_DESERIALIZING_REQUEST": 13, - "FATAL_VERSION_MISMATCH": 14, - "FATAL_UNAUTHORIZED": 15, -} - -func (x RpcResponseHeaderProto_RpcErrorCodeProto) Enum() *RpcResponseHeaderProto_RpcErrorCodeProto { - p := new(RpcResponseHeaderProto_RpcErrorCodeProto) - *p = x - return p -} -func (x RpcResponseHeaderProto_RpcErrorCodeProto) String() string { - return proto.EnumName(RpcResponseHeaderProto_RpcErrorCodeProto_name, int32(x)) -} -func (x *RpcResponseHeaderProto_RpcErrorCodeProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcResponseHeaderProto_RpcErrorCodeProto_value, data, "RpcResponseHeaderProto_RpcErrorCodeProto") - if err != nil { - return err - } - *x = RpcResponseHeaderProto_RpcErrorCodeProto(value) - return nil -} -func (RpcResponseHeaderProto_RpcErrorCodeProto) EnumDescriptor() ([]byte, []int) { - return fileDescriptor4, []int{3, 1} -} - -type RpcSaslProto_SaslState int32 - -const ( - RpcSaslProto_SUCCESS RpcSaslProto_SaslState = 0 - RpcSaslProto_NEGOTIATE RpcSaslProto_SaslState = 1 - RpcSaslProto_INITIATE RpcSaslProto_SaslState = 2 - RpcSaslProto_CHALLENGE RpcSaslProto_SaslState = 3 - RpcSaslProto_RESPONSE RpcSaslProto_SaslState = 4 - RpcSaslProto_WRAP RpcSaslProto_SaslState = 5 -) - -var RpcSaslProto_SaslState_name = map[int32]string{ - 0: "SUCCESS", - 1: "NEGOTIATE", - 2: "INITIATE", - 3: "CHALLENGE", - 4: "RESPONSE", - 5: "WRAP", -} -var RpcSaslProto_SaslState_value = map[string]int32{ - "SUCCESS": 0, - "NEGOTIATE": 1, - "INITIATE": 2, - "CHALLENGE": 3, - "RESPONSE": 4, - "WRAP": 5, -} - -func (x RpcSaslProto_SaslState) Enum() *RpcSaslProto_SaslState { - p := new(RpcSaslProto_SaslState) - *p = x - return p -} -func (x RpcSaslProto_SaslState) String() string { - return proto.EnumName(RpcSaslProto_SaslState_name, int32(x)) -} -func (x *RpcSaslProto_SaslState) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcSaslProto_SaslState_value, data, "RpcSaslProto_SaslState") - if err != nil { - return err - } - *x = RpcSaslProto_SaslState(value) - return nil -} -func (RpcSaslProto_SaslState) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{4, 0} } - -// * -// Used to pass through the information necessary to continue -// a trace after an RPC is made. All we need is the traceid -// (so we know the overarching trace this message is a part of), and -// the id of the current span when this message was sent, so we know -// what span caused the new span we will create when this message is received. -type RPCTraceInfoProto struct { - TraceId *int64 `protobuf:"varint,1,opt,name=traceId" json:"traceId,omitempty"` - ParentId *int64 `protobuf:"varint,2,opt,name=parentId" json:"parentId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RPCTraceInfoProto) Reset() { *m = RPCTraceInfoProto{} } -func (m *RPCTraceInfoProto) String() string { return proto.CompactTextString(m) } -func (*RPCTraceInfoProto) ProtoMessage() {} -func (*RPCTraceInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } - -func (m *RPCTraceInfoProto) GetTraceId() int64 { - if m != nil && m.TraceId != nil { - return *m.TraceId - } - return 0 -} - -func (m *RPCTraceInfoProto) GetParentId() int64 { - if m != nil && m.ParentId != nil { - return *m.ParentId - } - return 0 -} - -// * -// Used to pass through the call context entry after an RPC is made. -type RPCCallerContextProto struct { - Context *string `protobuf:"bytes,1,req,name=context" json:"context,omitempty"` - Signature []byte `protobuf:"bytes,2,opt,name=signature" json:"signature,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RPCCallerContextProto) Reset() { *m = RPCCallerContextProto{} } -func (m *RPCCallerContextProto) String() string { return proto.CompactTextString(m) } -func (*RPCCallerContextProto) ProtoMessage() {} -func (*RPCCallerContextProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } - -func (m *RPCCallerContextProto) GetContext() string { - if m != nil && m.Context != nil { - return *m.Context - } - return "" -} - -func (m *RPCCallerContextProto) GetSignature() []byte { - if m != nil { - return m.Signature - } - return nil -} - -type RpcRequestHeaderProto struct { - RpcKind *RpcKindProto `protobuf:"varint,1,opt,name=rpcKind,enum=hadoop.common.RpcKindProto" json:"rpcKind,omitempty"` - RpcOp *RpcRequestHeaderProto_OperationProto `protobuf:"varint,2,opt,name=rpcOp,enum=hadoop.common.RpcRequestHeaderProto_OperationProto" json:"rpcOp,omitempty"` - CallId *int32 `protobuf:"zigzag32,3,req,name=callId" json:"callId,omitempty"` - ClientId []byte `protobuf:"bytes,4,req,name=clientId" json:"clientId,omitempty"` - // clientId + callId uniquely identifies a request - // retry count, 1 means this is the first retry - RetryCount *int32 `protobuf:"zigzag32,5,opt,name=retryCount,def=-1" json:"retryCount,omitempty"` - TraceInfo *RPCTraceInfoProto `protobuf:"bytes,6,opt,name=traceInfo" json:"traceInfo,omitempty"` - CallerContext *RPCCallerContextProto `protobuf:"bytes,7,opt,name=callerContext" json:"callerContext,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcRequestHeaderProto) Reset() { *m = RpcRequestHeaderProto{} } -func (m *RpcRequestHeaderProto) String() string { return proto.CompactTextString(m) } -func (*RpcRequestHeaderProto) ProtoMessage() {} -func (*RpcRequestHeaderProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } - -const Default_RpcRequestHeaderProto_RetryCount int32 = -1 - -func (m *RpcRequestHeaderProto) GetRpcKind() RpcKindProto { - if m != nil && m.RpcKind != nil { - return *m.RpcKind - } - return RpcKindProto_RPC_BUILTIN -} - -func (m *RpcRequestHeaderProto) GetRpcOp() RpcRequestHeaderProto_OperationProto { - if m != nil && m.RpcOp != nil { - return *m.RpcOp - } - return RpcRequestHeaderProto_RPC_FINAL_PACKET -} - -func (m *RpcRequestHeaderProto) GetCallId() int32 { - if m != nil && m.CallId != nil { - return *m.CallId - } - return 0 -} - -func (m *RpcRequestHeaderProto) GetClientId() []byte { - if m != nil { - return m.ClientId - } - return nil -} - -func (m *RpcRequestHeaderProto) GetRetryCount() int32 { - if m != nil && m.RetryCount != nil { - return *m.RetryCount - } - return Default_RpcRequestHeaderProto_RetryCount -} - -func (m *RpcRequestHeaderProto) GetTraceInfo() *RPCTraceInfoProto { - if m != nil { - return m.TraceInfo - } - return nil -} - -func (m *RpcRequestHeaderProto) GetCallerContext() *RPCCallerContextProto { - if m != nil { - return m.CallerContext - } - return nil -} - -// * -// Rpc Response Header -// +------------------------------------------------------------------+ -// | Rpc total response length in bytes (4 bytes int) | -// | (sum of next two parts) | -// +------------------------------------------------------------------+ -// | RpcResponseHeaderProto - serialized delimited ie has len | -// +------------------------------------------------------------------+ -// | if request is successful: | -// | - RpcResponse - The actual rpc response bytes follow | -// | the response header | -// | This response is serialized based on RpcKindProto | -// | if request fails : | -// | The rpc response header contains the necessary info | -// +------------------------------------------------------------------+ -// -// Note that rpc response header is also used when connection setup fails. -// Ie the response looks like a rpc response with a fake callId. -type RpcResponseHeaderProto struct { - CallId *uint32 `protobuf:"varint,1,req,name=callId" json:"callId,omitempty"` - Status *RpcResponseHeaderProto_RpcStatusProto `protobuf:"varint,2,req,name=status,enum=hadoop.common.RpcResponseHeaderProto_RpcStatusProto" json:"status,omitempty"` - ServerIpcVersionNum *uint32 `protobuf:"varint,3,opt,name=serverIpcVersionNum" json:"serverIpcVersionNum,omitempty"` - ExceptionClassName *string `protobuf:"bytes,4,opt,name=exceptionClassName" json:"exceptionClassName,omitempty"` - ErrorMsg *string `protobuf:"bytes,5,opt,name=errorMsg" json:"errorMsg,omitempty"` - ErrorDetail *RpcResponseHeaderProto_RpcErrorCodeProto `protobuf:"varint,6,opt,name=errorDetail,enum=hadoop.common.RpcResponseHeaderProto_RpcErrorCodeProto" json:"errorDetail,omitempty"` - ClientId []byte `protobuf:"bytes,7,opt,name=clientId" json:"clientId,omitempty"` - RetryCount *int32 `protobuf:"zigzag32,8,opt,name=retryCount,def=-1" json:"retryCount,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcResponseHeaderProto) Reset() { *m = RpcResponseHeaderProto{} } -func (m *RpcResponseHeaderProto) String() string { return proto.CompactTextString(m) } -func (*RpcResponseHeaderProto) ProtoMessage() {} -func (*RpcResponseHeaderProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } - -const Default_RpcResponseHeaderProto_RetryCount int32 = -1 - -func (m *RpcResponseHeaderProto) GetCallId() uint32 { - if m != nil && m.CallId != nil { - return *m.CallId - } - return 0 -} - -func (m *RpcResponseHeaderProto) GetStatus() RpcResponseHeaderProto_RpcStatusProto { - if m != nil && m.Status != nil { - return *m.Status - } - return RpcResponseHeaderProto_SUCCESS -} - -func (m *RpcResponseHeaderProto) GetServerIpcVersionNum() uint32 { - if m != nil && m.ServerIpcVersionNum != nil { - return *m.ServerIpcVersionNum - } - return 0 -} - -func (m *RpcResponseHeaderProto) GetExceptionClassName() string { - if m != nil && m.ExceptionClassName != nil { - return *m.ExceptionClassName - } - return "" -} - -func (m *RpcResponseHeaderProto) GetErrorMsg() string { - if m != nil && m.ErrorMsg != nil { - return *m.ErrorMsg - } - return "" -} - -func (m *RpcResponseHeaderProto) GetErrorDetail() RpcResponseHeaderProto_RpcErrorCodeProto { - if m != nil && m.ErrorDetail != nil { - return *m.ErrorDetail - } - return RpcResponseHeaderProto_ERROR_APPLICATION -} - -func (m *RpcResponseHeaderProto) GetClientId() []byte { - if m != nil { - return m.ClientId - } - return nil -} - -func (m *RpcResponseHeaderProto) GetRetryCount() int32 { - if m != nil && m.RetryCount != nil { - return *m.RetryCount - } - return Default_RpcResponseHeaderProto_RetryCount -} - -type RpcSaslProto struct { - Version *uint32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` - State *RpcSaslProto_SaslState `protobuf:"varint,2,req,name=state,enum=hadoop.common.RpcSaslProto_SaslState" json:"state,omitempty"` - Token []byte `protobuf:"bytes,3,opt,name=token" json:"token,omitempty"` - Auths []*RpcSaslProto_SaslAuth `protobuf:"bytes,4,rep,name=auths" json:"auths,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcSaslProto) Reset() { *m = RpcSaslProto{} } -func (m *RpcSaslProto) String() string { return proto.CompactTextString(m) } -func (*RpcSaslProto) ProtoMessage() {} -func (*RpcSaslProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } - -func (m *RpcSaslProto) GetVersion() uint32 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func (m *RpcSaslProto) GetState() RpcSaslProto_SaslState { - if m != nil && m.State != nil { - return *m.State - } - return RpcSaslProto_SUCCESS -} - -func (m *RpcSaslProto) GetToken() []byte { - if m != nil { - return m.Token - } - return nil -} - -func (m *RpcSaslProto) GetAuths() []*RpcSaslProto_SaslAuth { - if m != nil { - return m.Auths - } - return nil -} - -type RpcSaslProto_SaslAuth struct { - Method *string `protobuf:"bytes,1,req,name=method" json:"method,omitempty"` - Mechanism *string `protobuf:"bytes,2,req,name=mechanism" json:"mechanism,omitempty"` - Protocol *string `protobuf:"bytes,3,opt,name=protocol" json:"protocol,omitempty"` - ServerId *string `protobuf:"bytes,4,opt,name=serverId" json:"serverId,omitempty"` - Challenge []byte `protobuf:"bytes,5,opt,name=challenge" json:"challenge,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RpcSaslProto_SaslAuth) Reset() { *m = RpcSaslProto_SaslAuth{} } -func (m *RpcSaslProto_SaslAuth) String() string { return proto.CompactTextString(m) } -func (*RpcSaslProto_SaslAuth) ProtoMessage() {} -func (*RpcSaslProto_SaslAuth) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4, 0} } - -func (m *RpcSaslProto_SaslAuth) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RpcSaslProto_SaslAuth) GetMechanism() string { - if m != nil && m.Mechanism != nil { - return *m.Mechanism - } - return "" -} - -func (m *RpcSaslProto_SaslAuth) GetProtocol() string { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return "" -} - -func (m *RpcSaslProto_SaslAuth) GetServerId() string { - if m != nil && m.ServerId != nil { - return *m.ServerId - } - return "" -} - -func (m *RpcSaslProto_SaslAuth) GetChallenge() []byte { - if m != nil { - return m.Challenge - } - return nil -} - -func init() { - proto.RegisterType((*RPCTraceInfoProto)(nil), "hadoop.common.RPCTraceInfoProto") - proto.RegisterType((*RPCCallerContextProto)(nil), "hadoop.common.RPCCallerContextProto") - proto.RegisterType((*RpcRequestHeaderProto)(nil), "hadoop.common.RpcRequestHeaderProto") - proto.RegisterType((*RpcResponseHeaderProto)(nil), "hadoop.common.RpcResponseHeaderProto") - proto.RegisterType((*RpcSaslProto)(nil), "hadoop.common.RpcSaslProto") - proto.RegisterType((*RpcSaslProto_SaslAuth)(nil), "hadoop.common.RpcSaslProto.SaslAuth") - proto.RegisterEnum("hadoop.common.RpcKindProto", RpcKindProto_name, RpcKindProto_value) - proto.RegisterEnum("hadoop.common.RpcRequestHeaderProto_OperationProto", RpcRequestHeaderProto_OperationProto_name, RpcRequestHeaderProto_OperationProto_value) - proto.RegisterEnum("hadoop.common.RpcResponseHeaderProto_RpcStatusProto", RpcResponseHeaderProto_RpcStatusProto_name, RpcResponseHeaderProto_RpcStatusProto_value) - proto.RegisterEnum("hadoop.common.RpcResponseHeaderProto_RpcErrorCodeProto", RpcResponseHeaderProto_RpcErrorCodeProto_name, RpcResponseHeaderProto_RpcErrorCodeProto_value) - proto.RegisterEnum("hadoop.common.RpcSaslProto_SaslState", RpcSaslProto_SaslState_name, RpcSaslProto_SaslState_value) -} - -func init() { proto.RegisterFile("RpcHeader.proto", fileDescriptor4) } - -var fileDescriptor4 = []byte{ - // 1035 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xdd, 0x6e, 0xe3, 0x44, - 0x14, 0x5e, 0x3b, 0x49, 0xdb, 0x9c, 0x26, 0xa9, 0x33, 0xdb, 0x76, 0xad, 0x76, 0xb5, 0x1b, 0x05, - 0x90, 0x22, 0x24, 0x22, 0xe8, 0x2e, 0x42, 0x5a, 0x24, 0x24, 0xd7, 0x99, 0x36, 0xb3, 0x75, 0x6d, - 0x33, 0x76, 0x5a, 0xb1, 0x02, 0x45, 0xc6, 0x99, 0x6d, 0x22, 0x12, 0xdb, 0xd8, 0xce, 0x6a, 0x79, - 0x11, 0xae, 0x79, 0x02, 0xc4, 0x03, 0x71, 0xc3, 0x9b, 0xa0, 0x99, 0x89, 0x9d, 0xa6, 0x29, 0x12, - 0x77, 0x73, 0xfe, 0xbe, 0x39, 0x3f, 0xdf, 0x99, 0x81, 0x03, 0x9a, 0x84, 0x43, 0x16, 0x4c, 0x58, - 0xda, 0x4f, 0xd2, 0x38, 0x8f, 0x51, 0x73, 0x1a, 0x4c, 0xe2, 0x38, 0xe9, 0x87, 0xf1, 0x62, 0x11, - 0x47, 0x5d, 0x02, 0x6d, 0xea, 0x9a, 0x7e, 0x1a, 0x84, 0x8c, 0x44, 0xef, 0x63, 0x57, 0xf8, 0xe8, - 0xb0, 0x9b, 0x0b, 0xcd, 0x44, 0x57, 0x3a, 0x4a, 0xaf, 0x42, 0x0b, 0x11, 0x9d, 0xc0, 0x5e, 0x12, - 0xa4, 0x2c, 0xca, 0xc9, 0x44, 0x57, 0x85, 0xa9, 0x94, 0xbb, 0x0e, 0x1c, 0x51, 0xd7, 0x34, 0x83, - 0xf9, 0x9c, 0xa5, 0x66, 0x1c, 0xe5, 0xec, 0x63, 0x5e, 0xc2, 0x85, 0x52, 0xd6, 0x95, 0x8e, 0xda, - 0xab, 0xd3, 0x42, 0x44, 0xcf, 0xa1, 0x9e, 0xcd, 0xee, 0xa2, 0x20, 0x5f, 0xa6, 0x4c, 0xe0, 0x35, - 0xe8, 0x5a, 0xd1, 0xfd, 0xbb, 0x02, 0x47, 0x34, 0x09, 0x29, 0xfb, 0x75, 0xc9, 0xb2, 0x5c, 0x56, - 0x21, 0x11, 0xbf, 0x86, 0xdd, 0x34, 0x09, 0xaf, 0x66, 0x91, 0x4c, 0xb0, 0x75, 0x76, 0xda, 0xdf, - 0x28, 0xab, 0x4f, 0xa5, 0x55, 0x78, 0xd3, 0xc2, 0x17, 0x11, 0xa8, 0xa5, 0x49, 0xe8, 0x24, 0xe2, - 0xaa, 0xd6, 0xd9, 0xab, 0xed, 0xa0, 0xed, 0xbb, 0xfa, 0x4e, 0xc2, 0xd2, 0x20, 0x9f, 0xc5, 0x91, - 0x04, 0x93, 0x08, 0xe8, 0x18, 0x76, 0xc2, 0x60, 0x3e, 0x27, 0x13, 0xbd, 0xd2, 0x51, 0x7b, 0x6d, - 0xba, 0x92, 0x78, 0x83, 0xc2, 0xf9, 0x4c, 0x36, 0xa8, 0xda, 0x51, 0x7b, 0x0d, 0x5a, 0xca, 0xa8, - 0x0b, 0x90, 0xb2, 0x3c, 0xfd, 0xcd, 0x8c, 0x97, 0x51, 0xae, 0xd7, 0x3a, 0x4a, 0xaf, 0xfd, 0x46, - 0xfd, 0xe2, 0x2b, 0x7a, 0x4f, 0x8b, 0xbe, 0x83, 0x7a, 0x5e, 0x0c, 0x43, 0xdf, 0xe9, 0x28, 0xbd, - 0xfd, 0xb3, 0xce, 0xc3, 0x34, 0x1f, 0xce, 0x8b, 0xae, 0x43, 0xd0, 0x5b, 0x68, 0x86, 0xf7, 0x27, - 0xa0, 0xef, 0x0a, 0x8c, 0x4f, 0xb7, 0x31, 0xb6, 0x07, 0x45, 0x37, 0x43, 0xbb, 0x3f, 0x41, 0x6b, - 0xb3, 0x78, 0x74, 0x08, 0x1a, 0x75, 0xcd, 0xf1, 0x05, 0xb1, 0x0d, 0x6b, 0xec, 0x1a, 0xe6, 0x15, - 0xf6, 0xb5, 0x27, 0xe8, 0x14, 0x9e, 0x71, 0xad, 0xe9, 0xd8, 0x3e, 0xb1, 0x47, 0x86, 0x4f, 0x1c, - 0xbb, 0x30, 0x2a, 0x48, 0x87, 0x43, 0x61, 0xb4, 0x1c, 0x0f, 0x73, 0x17, 0x1b, 0x9b, 0xdc, 0x41, - 0x53, 0xbb, 0x7f, 0xed, 0xc0, 0xb1, 0x68, 0x79, 0x96, 0xc4, 0x51, 0xc6, 0xee, 0xcf, 0x77, 0xdd, - 0x5d, 0x4e, 0x98, 0x66, 0xd9, 0x5d, 0x0b, 0x76, 0xb2, 0x3c, 0xc8, 0x97, 0x99, 0xae, 0x76, 0xd4, - 0x5e, 0xeb, 0xec, 0xf5, 0x63, 0x13, 0xdc, 0x82, 0xe3, 0x6a, 0x4f, 0x84, 0xc9, 0x32, 0x57, 0x18, - 0xe8, 0x4b, 0x78, 0x9a, 0xb1, 0xf4, 0x03, 0x4b, 0x49, 0x12, 0xde, 0xb0, 0x34, 0x9b, 0xc5, 0x91, - 0xbd, 0x5c, 0xe8, 0x95, 0x8e, 0xd2, 0x6b, 0xd2, 0xc7, 0x4c, 0xa8, 0x0f, 0x88, 0x7d, 0x0c, 0x59, - 0xc2, 0x3b, 0x62, 0xce, 0x83, 0x2c, 0xb3, 0x83, 0x05, 0xd3, 0xab, 0x1d, 0xa5, 0x57, 0xa7, 0x8f, - 0x58, 0x38, 0x1b, 0x58, 0x9a, 0xc6, 0xe9, 0x75, 0x76, 0x27, 0xe6, 0x5d, 0xa7, 0xa5, 0x8c, 0x7e, - 0x80, 0x7d, 0x71, 0x1e, 0xb0, 0x3c, 0x98, 0xcd, 0xc5, 0xac, 0x5b, 0x67, 0xdf, 0xfc, 0xef, 0x82, - 0x30, 0x8f, 0x35, 0xe3, 0x09, 0x93, 0x35, 0xdd, 0xc7, 0xda, 0x20, 0xe1, 0xae, 0xd8, 0xaa, 0xff, - 0x22, 0xe1, 0xde, 0x63, 0x24, 0xec, 0xbe, 0x82, 0xd6, 0x66, 0xcb, 0xd0, 0x3e, 0xec, 0x7a, 0x23, - 0xd3, 0xc4, 0x9e, 0xa7, 0x3d, 0x41, 0x75, 0xa8, 0x61, 0x4a, 0x1d, 0xaa, 0x29, 0xfc, 0x78, 0x61, - 0xf8, 0x86, 0xa5, 0xa9, 0xdd, 0x7f, 0x54, 0x68, 0x6f, 0xe5, 0x85, 0x8e, 0xa0, 0x2d, 0x7c, 0xc7, - 0x86, 0xeb, 0x5a, 0xc4, 0x14, 0xe4, 0x90, 0xac, 0x90, 0x6a, 0xdb, 0x19, 0x7b, 0x23, 0x73, 0x38, - 0xbe, 0xc6, 0xfe, 0xd0, 0x19, 0x68, 0x2a, 0x3a, 0x81, 0xe3, 0x4d, 0x8b, 0x4b, 0x1d, 0xdf, 0x31, - 0x1d, 0x4b, 0xab, 0x70, 0xfa, 0x49, 0x1b, 0x67, 0x94, 0x87, 0xe9, 0x0d, 0xa6, 0x5a, 0x15, 0xbd, - 0x80, 0x13, 0xa9, 0xf5, 0x30, 0x25, 0x86, 0x45, 0xde, 0x11, 0xfb, 0x72, 0x4c, 0xb1, 0xe7, 0x3a, - 0xb6, 0x87, 0xb5, 0xda, 0xda, 0xce, 0xa3, 0x6e, 0x30, 0xf5, 0x38, 0x3f, 0xaf, 0x89, 0x77, 0x6d, - 0xf8, 0xe6, 0x50, 0xdb, 0x41, 0x6d, 0x68, 0x8a, 0x1a, 0xc6, 0x23, 0xfb, 0xca, 0x76, 0x6e, 0x6d, - 0x0d, 0xd0, 0x27, 0xf0, 0xb2, 0x50, 0x79, 0x23, 0xd7, 0x75, 0xa8, 0x8f, 0x07, 0x25, 0xbc, 0xac, - 0x61, 0x1f, 0x3d, 0x07, 0x5d, 0x3a, 0x11, 0xfb, 0xc6, 0xb0, 0xc8, 0x40, 0xe0, 0x0f, 0xb1, 0x31, - 0xc0, 0x54, 0x6b, 0xa0, 0x97, 0x70, 0x2a, 0xad, 0x03, 0xbc, 0x99, 0xd7, 0xf7, 0x23, 0xec, 0xf9, - 0x5a, 0x93, 0x17, 0x2a, 0x1d, 0xb6, 0x52, 0x6a, 0xa1, 0x63, 0x40, 0xc5, 0xfd, 0xc6, 0xc8, 0x1f, - 0x3a, 0x94, 0xbc, 0xc3, 0x03, 0xed, 0xa0, 0xfb, 0x67, 0x05, 0x1a, 0x7c, 0x32, 0x41, 0x36, 0x2f, - 0x9f, 0xd6, 0x0f, 0x92, 0x9e, 0xe2, 0x21, 0x6c, 0xd2, 0x42, 0x44, 0xdf, 0x42, 0x8d, 0xd3, 0x9c, - 0xad, 0x36, 0xe5, 0xb3, 0x6d, 0x62, 0x95, 0x28, 0x7d, 0x7e, 0xe2, 0xd3, 0x66, 0x54, 0xc6, 0xa0, - 0x43, 0xa8, 0xe5, 0xf1, 0x2f, 0x2c, 0x12, 0xbb, 0xd0, 0xa0, 0x52, 0x40, 0x6f, 0xa0, 0x16, 0x2c, - 0xf3, 0x69, 0xa6, 0x57, 0x3b, 0x95, 0xc7, 0xde, 0x94, 0x87, 0x90, 0xc6, 0x32, 0x9f, 0x52, 0x19, - 0x72, 0xf2, 0xbb, 0x02, 0x7b, 0x85, 0x8e, 0xaf, 0xf7, 0x82, 0xe5, 0xd3, 0x78, 0xb2, 0xfa, 0x0f, - 0x56, 0x12, 0xff, 0x0e, 0x16, 0x2c, 0x9c, 0x06, 0xd1, 0x2c, 0x5b, 0x88, 0xbc, 0xeb, 0x74, 0xad, - 0x10, 0x7f, 0x0f, 0xc7, 0x0e, 0xe3, 0xb9, 0xc8, 0xab, 0x4e, 0x4b, 0x99, 0xdb, 0x56, 0xfb, 0x3a, - 0x59, 0xad, 0x63, 0x29, 0x73, 0xd4, 0x70, 0xca, 0x1f, 0xb6, 0xe8, 0x8e, 0x89, 0x2d, 0x6c, 0xd0, - 0xb5, 0xa2, 0xfb, 0x23, 0xd4, 0xcb, 0xf2, 0x37, 0x69, 0xde, 0x84, 0xba, 0x8d, 0x2f, 0x1d, 0x9f, - 0x18, 0x3e, 0xd6, 0x14, 0xd4, 0x80, 0x3d, 0x62, 0x13, 0x29, 0xa9, 0xdc, 0x68, 0x0e, 0x0d, 0xcb, - 0xc2, 0xf6, 0x25, 0xd6, 0x2a, 0xdc, 0x58, 0x32, 0xae, 0x8a, 0xf6, 0xa0, 0x7a, 0x4b, 0x0d, 0x57, - 0xab, 0x7d, 0xfe, 0x56, 0xcc, 0xab, 0xfc, 0x8a, 0xd0, 0x01, 0xec, 0x73, 0x96, 0x9c, 0x8f, 0x88, - 0xe5, 0x13, 0x5b, 0x7b, 0x82, 0x34, 0x68, 0x70, 0xc5, 0x2d, 0x25, 0xbe, 0x71, 0x6e, 0xf1, 0x7b, - 0x9e, 0xc1, 0x53, 0xae, 0x29, 0x68, 0x3f, 0x3e, 0x1f, 0x5d, 0x5c, 0x60, 0xaa, 0xa9, 0xe7, 0xaf, - 0xe1, 0x45, 0x9c, 0xde, 0xf5, 0x83, 0x24, 0x08, 0xa7, 0xac, 0xe8, 0xfd, 0x2c, 0x09, 0xe5, 0xc7, - 0xfe, 0xf3, 0xf2, 0xfd, 0xf9, 0xfa, 0xb3, 0x17, 0xb7, 0x65, 0x7f, 0x28, 0xca, 0xbf, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x4d, 0x60, 0xab, 0xef, 0x01, 0x08, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.proto deleted file mode 100644 index aa146162896..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/RpcHeader.proto +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ipc.protobuf"; -option java_outer_classname = "RpcHeaderProtos"; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * This is the rpc request header. It is sent with every rpc call. - * - * The format of RPC call is as follows: - * +--------------------------------------------------------------+ - * | Rpc length in bytes (4 bytes int) sum of next two parts | - * +--------------------------------------------------------------+ - * | RpcRequestHeaderProto - serialized delimited ie has len | - * +--------------------------------------------------------------+ - * | RpcRequest The actual rpc request | - * | This request is serialized based on RpcKindProto | - * +--------------------------------------------------------------+ - * - */ - -/** - * RpcKind determine the rpcEngine and the serialization of the rpc request - */ -enum RpcKindProto { - RPC_BUILTIN = 0; // Used for built in calls by tests - RPC_WRITABLE = 1; // Use WritableRpcEngine - RPC_PROTOCOL_BUFFER = 2; // Use ProtobufRpcEngine -} - - - -/** - * Used to pass through the information necessary to continue - * a trace after an RPC is made. All we need is the traceid - * (so we know the overarching trace this message is a part of), and - * the id of the current span when this message was sent, so we know - * what span caused the new span we will create when this message is received. - */ -message RPCTraceInfoProto { - optional int64 traceId = 1; // parentIdHigh - optional int64 parentId = 2; // parentIdLow - -} - -/** - * Used to pass through the call context entry after an RPC is made. - */ -message RPCCallerContextProto { - required string context = 1; - optional bytes signature = 2; -} - -message RpcRequestHeaderProto { // the header for the RpcRequest - enum OperationProto { - RPC_FINAL_PACKET = 0; // The final RPC Packet - RPC_CONTINUATION_PACKET = 1; // not implemented yet - RPC_CLOSE_CONNECTION = 2; // close the rpc connection - } - - optional RpcKindProto rpcKind = 1; - optional OperationProto rpcOp = 2; - required sint32 callId = 3; // a sequence number that is sent back in response - required bytes clientId = 4; // Globally unique client ID - // clientId + callId uniquely identifies a request - // retry count, 1 means this is the first retry - optional sint32 retryCount = 5 [default = -1]; - optional RPCTraceInfoProto traceInfo = 6; // tracing info - optional RPCCallerContextProto callerContext = 7; // call context -} - - - -/** - * Rpc Response Header - * +------------------------------------------------------------------+ - * | Rpc total response length in bytes (4 bytes int) | - * | (sum of next two parts) | - * +------------------------------------------------------------------+ - * | RpcResponseHeaderProto - serialized delimited ie has len | - * +------------------------------------------------------------------+ - * | if request is successful: | - * | - RpcResponse - The actual rpc response bytes follow | - * | the response header | - * | This response is serialized based on RpcKindProto | - * | if request fails : | - * | The rpc response header contains the necessary info | - * +------------------------------------------------------------------+ - * - * Note that rpc response header is also used when connection setup fails. - * Ie the response looks like a rpc response with a fake callId. - */ -message RpcResponseHeaderProto { - /** - * - * RpcStastus - success or failure - * The reponseHeader's errDetail, exceptionClassName and errMsg contains - * further details on the error - **/ - - enum RpcStatusProto { - SUCCESS = 0; // RPC succeeded - ERROR = 1; // RPC or error - connection left open for future calls - FATAL = 2; // Fatal error - connection closed - } - - enum RpcErrorCodeProto { - - // Non-fatal Rpc error - connection left open for future rpc calls - ERROR_APPLICATION = 1; // RPC Failed - rpc app threw exception - ERROR_NO_SUCH_METHOD = 2; // Rpc error - no such method - ERROR_NO_SUCH_PROTOCOL = 3; // Rpc error - no such protocol - ERROR_RPC_SERVER = 4; // Rpc error on server side - ERROR_SERIALIZING_RESPONSE = 5; // error serializign response - ERROR_RPC_VERSION_MISMATCH = 6; // Rpc protocol version mismatch - - - // Fatal Server side Rpc error - connection closed - FATAL_UNKNOWN = 10; // unknown Fatal error - FATAL_UNSUPPORTED_SERIALIZATION = 11; // IPC layer serilization type invalid - FATAL_INVALID_RPC_HEADER = 12; // fields of RpcHeader are invalid - FATAL_DESERIALIZING_REQUEST = 13; // could not deserilize rpc request - FATAL_VERSION_MISMATCH = 14; // Ipc Layer version mismatch - FATAL_UNAUTHORIZED = 15; // Auth failed - } - - required uint32 callId = 1; // callId used in Request - required RpcStatusProto status = 2; - optional uint32 serverIpcVersionNum = 3; // Sent if success or fail - optional string exceptionClassName = 4; // if request fails - optional string errorMsg = 5; // if request fails, often contains strack trace - optional RpcErrorCodeProto errorDetail = 6; // in case of error - optional bytes clientId = 7; // Globally unique client ID - optional sint32 retryCount = 8 [default = -1]; -} - -message RpcSaslProto { - enum SaslState { - SUCCESS = 0; - NEGOTIATE = 1; - INITIATE = 2; - CHALLENGE = 3; - RESPONSE = 4; - WRAP = 5; - } - - message SaslAuth { - required string method = 1; - required string mechanism = 2; - optional string protocol = 3; - optional string serverId = 4; - optional bytes challenge = 5; - } - - optional uint32 version = 1; - required SaslState state = 2; - optional bytes token = 3; - repeated SaslAuth auths = 4; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.pb.go deleted file mode 100644 index 666e1d495a6..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.pb.go +++ /dev/null @@ -1,193 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: Security.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Security token identifier -type TokenProto struct { - Identifier []byte `protobuf:"bytes,1,req,name=identifier" json:"identifier,omitempty"` - Password []byte `protobuf:"bytes,2,req,name=password" json:"password,omitempty"` - Kind *string `protobuf:"bytes,3,req,name=kind" json:"kind,omitempty"` - Service *string `protobuf:"bytes,4,req,name=service" json:"service,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TokenProto) Reset() { *m = TokenProto{} } -func (m *TokenProto) String() string { return proto.CompactTextString(m) } -func (*TokenProto) ProtoMessage() {} -func (*TokenProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } - -func (m *TokenProto) GetIdentifier() []byte { - if m != nil { - return m.Identifier - } - return nil -} - -func (m *TokenProto) GetPassword() []byte { - if m != nil { - return m.Password - } - return nil -} - -func (m *TokenProto) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *TokenProto) GetService() string { - if m != nil && m.Service != nil { - return *m.Service - } - return "" -} - -type GetDelegationTokenRequestProto struct { - Renewer *string `protobuf:"bytes,1,req,name=renewer" json:"renewer,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDelegationTokenRequestProto) Reset() { *m = GetDelegationTokenRequestProto{} } -func (m *GetDelegationTokenRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetDelegationTokenRequestProto) ProtoMessage() {} -func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{1} } - -func (m *GetDelegationTokenRequestProto) GetRenewer() string { - if m != nil && m.Renewer != nil { - return *m.Renewer - } - return "" -} - -type GetDelegationTokenResponseProto struct { - Token *TokenProto `protobuf:"bytes,1,opt,name=token" json:"token,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDelegationTokenResponseProto) Reset() { *m = GetDelegationTokenResponseProto{} } -func (m *GetDelegationTokenResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetDelegationTokenResponseProto) ProtoMessage() {} -func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{2} } - -func (m *GetDelegationTokenResponseProto) GetToken() *TokenProto { - if m != nil { - return m.Token - } - return nil -} - -type RenewDelegationTokenRequestProto struct { - Token *TokenProto `protobuf:"bytes,1,req,name=token" json:"token,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenewDelegationTokenRequestProto) Reset() { *m = RenewDelegationTokenRequestProto{} } -func (m *RenewDelegationTokenRequestProto) String() string { return proto.CompactTextString(m) } -func (*RenewDelegationTokenRequestProto) ProtoMessage() {} -func (*RenewDelegationTokenRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor7, []int{3} -} - -func (m *RenewDelegationTokenRequestProto) GetToken() *TokenProto { - if m != nil { - return m.Token - } - return nil -} - -type RenewDelegationTokenResponseProto struct { - NewExpiryTime *uint64 `protobuf:"varint,1,req,name=newExpiryTime" json:"newExpiryTime,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenewDelegationTokenResponseProto) Reset() { *m = RenewDelegationTokenResponseProto{} } -func (m *RenewDelegationTokenResponseProto) String() string { return proto.CompactTextString(m) } -func (*RenewDelegationTokenResponseProto) ProtoMessage() {} -func (*RenewDelegationTokenResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor7, []int{4} -} - -func (m *RenewDelegationTokenResponseProto) GetNewExpiryTime() uint64 { - if m != nil && m.NewExpiryTime != nil { - return *m.NewExpiryTime - } - return 0 -} - -type CancelDelegationTokenRequestProto struct { - Token *TokenProto `protobuf:"bytes,1,req,name=token" json:"token,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CancelDelegationTokenRequestProto) Reset() { *m = CancelDelegationTokenRequestProto{} } -func (m *CancelDelegationTokenRequestProto) String() string { return proto.CompactTextString(m) } -func (*CancelDelegationTokenRequestProto) ProtoMessage() {} -func (*CancelDelegationTokenRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor7, []int{5} -} - -func (m *CancelDelegationTokenRequestProto) GetToken() *TokenProto { - if m != nil { - return m.Token - } - return nil -} - -type CancelDelegationTokenResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *CancelDelegationTokenResponseProto) Reset() { *m = CancelDelegationTokenResponseProto{} } -func (m *CancelDelegationTokenResponseProto) String() string { return proto.CompactTextString(m) } -func (*CancelDelegationTokenResponseProto) ProtoMessage() {} -func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor7, []int{6} -} - -func init() { - proto.RegisterType((*TokenProto)(nil), "hadoop.common.TokenProto") - proto.RegisterType((*GetDelegationTokenRequestProto)(nil), "hadoop.common.GetDelegationTokenRequestProto") - proto.RegisterType((*GetDelegationTokenResponseProto)(nil), "hadoop.common.GetDelegationTokenResponseProto") - proto.RegisterType((*RenewDelegationTokenRequestProto)(nil), "hadoop.common.RenewDelegationTokenRequestProto") - proto.RegisterType((*RenewDelegationTokenResponseProto)(nil), "hadoop.common.RenewDelegationTokenResponseProto") - proto.RegisterType((*CancelDelegationTokenRequestProto)(nil), "hadoop.common.CancelDelegationTokenRequestProto") - proto.RegisterType((*CancelDelegationTokenResponseProto)(nil), "hadoop.common.CancelDelegationTokenResponseProto") -} - -func init() { proto.RegisterFile("Security.proto", fileDescriptor7) } - -var fileDescriptor7 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xb1, 0x4e, 0xf3, 0x30, - 0x10, 0xc7, 0xe5, 0x7c, 0xfd, 0x54, 0x7a, 0x50, 0x86, 0x4c, 0x86, 0xa1, 0xa4, 0x56, 0x87, 0x4e, - 0x41, 0x62, 0x42, 0x8c, 0x05, 0x84, 0xd8, 0x90, 0xdb, 0x17, 0xb0, 0x92, 0xa3, 0xb5, 0xda, 0xd8, - 0xc6, 0x76, 0x1b, 0xfa, 0x06, 0x3c, 0x06, 0x8f, 0x8a, 0xea, 0x90, 0x92, 0x48, 0x05, 0x31, 0xb0, - 0xf9, 0xfe, 0xe7, 0xfb, 0xe9, 0x77, 0xd2, 0xc1, 0xe9, 0x14, 0xb3, 0xb5, 0x95, 0x7e, 0x9b, 0x1a, - 0xab, 0xbd, 0x8e, 0xfb, 0x0b, 0x91, 0x6b, 0x6d, 0xd2, 0x4c, 0x17, 0x85, 0x56, 0x6c, 0x03, 0x30, - 0xd3, 0x4b, 0x54, 0x4f, 0xa1, 0x39, 0x00, 0x90, 0x39, 0x2a, 0x2f, 0x9f, 0x25, 0x5a, 0x4a, 0x92, - 0x68, 0x7c, 0xc2, 0x1b, 0x49, 0x7c, 0x0e, 0x47, 0x46, 0x38, 0x57, 0x6a, 0x9b, 0xd3, 0x28, 0x74, - 0xf7, 0x75, 0x1c, 0x43, 0x67, 0x29, 0x55, 0x4e, 0xff, 0x25, 0xd1, 0xb8, 0xc7, 0xc3, 0x3b, 0xa6, - 0xd0, 0x75, 0x68, 0x37, 0x32, 0x43, 0xda, 0x09, 0x71, 0x5d, 0xb2, 0x1b, 0x18, 0x3c, 0xa0, 0xbf, - 0xc3, 0x15, 0xce, 0x85, 0x97, 0x5a, 0x05, 0x09, 0x8e, 0x2f, 0x6b, 0x74, 0xbe, 0x72, 0xa1, 0xd0, - 0xb5, 0xa8, 0xb0, 0xfc, 0x14, 0xe9, 0xf1, 0xba, 0x64, 0x1c, 0x2e, 0x0e, 0xcd, 0x3a, 0xa3, 0x95, - 0xc3, 0x6a, 0xf8, 0x12, 0xfe, 0xfb, 0x5d, 0x4a, 0x49, 0x42, 0xc6, 0xc7, 0x57, 0x67, 0x69, 0x6b, - 0xeb, 0xf4, 0x6b, 0x65, 0x5e, 0xfd, 0x63, 0x53, 0x48, 0xf8, 0x0e, 0xff, 0x93, 0x51, 0x03, 0x1a, - 0xfd, 0x0a, 0xfa, 0x08, 0xc3, 0xc3, 0xd0, 0xa6, 0xea, 0x08, 0xfa, 0x0a, 0xcb, 0xfb, 0x57, 0x23, - 0xed, 0x76, 0x26, 0x0b, 0x0c, 0xf4, 0x0e, 0x6f, 0x87, 0x6c, 0x06, 0xc3, 0x5b, 0xa1, 0x32, 0x5c, - 0xfd, 0xa9, 0xe0, 0x08, 0xd8, 0x37, 0xd4, 0x86, 0xe1, 0xe4, 0x1a, 0x12, 0x6d, 0xe7, 0xa9, 0x30, - 0x22, 0x5b, 0x60, 0xcd, 0x74, 0xad, 0xb3, 0x9a, 0xec, 0xcf, 0x2c, 0x8c, 0xb8, 0x37, 0x42, 0xde, - 0x09, 0xf9, 0x08, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x67, 0xa7, 0xeb, 0x7d, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.proto deleted file mode 100644 index 5ff571decf5..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/Security.proto +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.security.proto"; -option java_outer_classname = "SecurityProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -/** - * Security token identifier - */ -message TokenProto { - required bytes identifier = 1; - required bytes password = 2; - required string kind = 3; - required string service = 4; -} - -message GetDelegationTokenRequestProto { - required string renewer = 1; -} - -message GetDelegationTokenResponseProto { - optional hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenResponseProto { - required uint64 newExpiryTime = 1; -} - -message CancelDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message CancelDelegationTokenResponseProto { // void response -} - diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.pb.go deleted file mode 100644 index c7a68f3f53f..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.pb.go +++ /dev/null @@ -1,199 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: TraceAdmin.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ListSpanReceiversRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListSpanReceiversRequestProto) Reset() { *m = ListSpanReceiversRequestProto{} } -func (m *ListSpanReceiversRequestProto) String() string { return proto.CompactTextString(m) } -func (*ListSpanReceiversRequestProto) ProtoMessage() {} -func (*ListSpanReceiversRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } - -type SpanReceiverListInfo struct { - Id *int64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - ClassName *string `protobuf:"bytes,2,req,name=className" json:"className,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SpanReceiverListInfo) Reset() { *m = SpanReceiverListInfo{} } -func (m *SpanReceiverListInfo) String() string { return proto.CompactTextString(m) } -func (*SpanReceiverListInfo) ProtoMessage() {} -func (*SpanReceiverListInfo) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } - -func (m *SpanReceiverListInfo) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *SpanReceiverListInfo) GetClassName() string { - if m != nil && m.ClassName != nil { - return *m.ClassName - } - return "" -} - -type ListSpanReceiversResponseProto struct { - Descriptions []*SpanReceiverListInfo `protobuf:"bytes,1,rep,name=descriptions" json:"descriptions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListSpanReceiversResponseProto) Reset() { *m = ListSpanReceiversResponseProto{} } -func (m *ListSpanReceiversResponseProto) String() string { return proto.CompactTextString(m) } -func (*ListSpanReceiversResponseProto) ProtoMessage() {} -func (*ListSpanReceiversResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{2} } - -func (m *ListSpanReceiversResponseProto) GetDescriptions() []*SpanReceiverListInfo { - if m != nil { - return m.Descriptions - } - return nil -} - -type ConfigPair struct { - Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` - Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConfigPair) Reset() { *m = ConfigPair{} } -func (m *ConfigPair) String() string { return proto.CompactTextString(m) } -func (*ConfigPair) ProtoMessage() {} -func (*ConfigPair) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{3} } - -func (m *ConfigPair) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *ConfigPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type AddSpanReceiverRequestProto struct { - ClassName *string `protobuf:"bytes,1,req,name=className" json:"className,omitempty"` - Config []*ConfigPair `protobuf:"bytes,2,rep,name=config" json:"config,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddSpanReceiverRequestProto) Reset() { *m = AddSpanReceiverRequestProto{} } -func (m *AddSpanReceiverRequestProto) String() string { return proto.CompactTextString(m) } -func (*AddSpanReceiverRequestProto) ProtoMessage() {} -func (*AddSpanReceiverRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{4} } - -func (m *AddSpanReceiverRequestProto) GetClassName() string { - if m != nil && m.ClassName != nil { - return *m.ClassName - } - return "" -} - -func (m *AddSpanReceiverRequestProto) GetConfig() []*ConfigPair { - if m != nil { - return m.Config - } - return nil -} - -type AddSpanReceiverResponseProto struct { - Id *int64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddSpanReceiverResponseProto) Reset() { *m = AddSpanReceiverResponseProto{} } -func (m *AddSpanReceiverResponseProto) String() string { return proto.CompactTextString(m) } -func (*AddSpanReceiverResponseProto) ProtoMessage() {} -func (*AddSpanReceiverResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{5} } - -func (m *AddSpanReceiverResponseProto) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -type RemoveSpanReceiverRequestProto struct { - Id *int64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveSpanReceiverRequestProto) Reset() { *m = RemoveSpanReceiverRequestProto{} } -func (m *RemoveSpanReceiverRequestProto) String() string { return proto.CompactTextString(m) } -func (*RemoveSpanReceiverRequestProto) ProtoMessage() {} -func (*RemoveSpanReceiverRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{6} } - -func (m *RemoveSpanReceiverRequestProto) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -type RemoveSpanReceiverResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveSpanReceiverResponseProto) Reset() { *m = RemoveSpanReceiverResponseProto{} } -func (m *RemoveSpanReceiverResponseProto) String() string { return proto.CompactTextString(m) } -func (*RemoveSpanReceiverResponseProto) ProtoMessage() {} -func (*RemoveSpanReceiverResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{7} } - -func init() { - proto.RegisterType((*ListSpanReceiversRequestProto)(nil), "hadoop.common.ListSpanReceiversRequestProto") - proto.RegisterType((*SpanReceiverListInfo)(nil), "hadoop.common.SpanReceiverListInfo") - proto.RegisterType((*ListSpanReceiversResponseProto)(nil), "hadoop.common.ListSpanReceiversResponseProto") - proto.RegisterType((*ConfigPair)(nil), "hadoop.common.ConfigPair") - proto.RegisterType((*AddSpanReceiverRequestProto)(nil), "hadoop.common.AddSpanReceiverRequestProto") - proto.RegisterType((*AddSpanReceiverResponseProto)(nil), "hadoop.common.AddSpanReceiverResponseProto") - proto.RegisterType((*RemoveSpanReceiverRequestProto)(nil), "hadoop.common.RemoveSpanReceiverRequestProto") - proto.RegisterType((*RemoveSpanReceiverResponseProto)(nil), "hadoop.common.RemoveSpanReceiverResponseProto") -} - -func init() { proto.RegisterFile("TraceAdmin.proto", fileDescriptor5) } - -var fileDescriptor5 = []byte{ - // 385 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xef, 0x4e, 0xea, 0x30, - 0x18, 0xc6, 0xb3, 0x91, 0x73, 0x12, 0xde, 0xc3, 0x39, 0x07, 0x1a, 0x3e, 0x0c, 0x44, 0xc0, 0xf9, - 0x85, 0xf8, 0x67, 0x2a, 0xf1, 0x06, 0x40, 0x13, 0x63, 0x62, 0x0c, 0x29, 0xde, 0x40, 0xd3, 0x15, - 0xa8, 0xb2, 0x76, 0xb6, 0x63, 0x89, 0x77, 0xe0, 0x65, 0x78, 0x75, 0x5e, 0x87, 0x61, 0x4a, 0xd6, - 0x15, 0x04, 0xbf, 0xed, 0xcf, 0xf3, 0xf6, 0xf9, 0x3d, 0xcf, 0x5b, 0xa8, 0x3e, 0x28, 0x42, 0xd9, - 0x20, 0x8c, 0xb8, 0x08, 0x62, 0x25, 0x13, 0x89, 0xfe, 0xce, 0x48, 0x28, 0x65, 0x1c, 0x50, 0x19, - 0x45, 0x52, 0xf8, 0x1d, 0xd8, 0xbf, 0xe3, 0x3a, 0x19, 0xc7, 0x44, 0x60, 0x46, 0x19, 0x4f, 0x99, - 0xd2, 0x98, 0x3d, 0x2f, 0x98, 0x4e, 0x46, 0x4b, 0xbd, 0x7f, 0x0d, 0x75, 0xf3, 0xe7, 0x52, 0x7c, - 0x2b, 0x26, 0x12, 0xfd, 0x03, 0x97, 0x87, 0x9e, 0xd3, 0x75, 0x7b, 0x25, 0xec, 0xf2, 0x10, 0xb5, - 0xa0, 0x4c, 0xe7, 0x44, 0xeb, 0x7b, 0x12, 0x31, 0xcf, 0xed, 0xba, 0xbd, 0x32, 0xce, 0x3f, 0xf8, - 0x1c, 0xda, 0x1b, 0x6c, 0x74, 0x2c, 0x85, 0x66, 0x99, 0x0f, 0xba, 0x81, 0x4a, 0xc8, 0x34, 0x55, - 0x3c, 0x4e, 0xb8, 0x14, 0xda, 0x73, 0xba, 0xa5, 0xde, 0x9f, 0xfe, 0x61, 0x50, 0xc0, 0x0d, 0x36, - 0xa1, 0xe0, 0xc2, 0xa0, 0x7f, 0x09, 0x70, 0x25, 0xc5, 0x84, 0x4f, 0x47, 0x84, 0x2b, 0x54, 0x85, - 0xd2, 0x13, 0x7b, 0xc9, 0x38, 0xcb, 0x78, 0xf9, 0x88, 0xea, 0xf0, 0x2b, 0x25, 0xf3, 0xc5, 0x0a, - 0xf2, 0xf3, 0xc5, 0x17, 0xb0, 0x37, 0x08, 0x43, 0xf3, 0x78, 0xb3, 0x85, 0x62, 0x3a, 0xc7, 0x4a, - 0x87, 0x2e, 0xe0, 0x37, 0xcd, 0x2c, 0x3d, 0x37, 0xa3, 0x6e, 0x58, 0xd4, 0x39, 0x0f, 0xfe, 0x12, - 0xfa, 0x01, 0xb4, 0xd6, 0xfc, 0xcc, 0x3a, 0xac, 0x7a, 0xfd, 0x73, 0x68, 0x63, 0x16, 0xc9, 0x94, - 0x7d, 0x8b, 0x68, 0x4f, 0x1c, 0x40, 0x67, 0xd3, 0x84, 0x61, 0xd2, 0x7f, 0x77, 0xa1, 0x96, 0x5f, - 0x90, 0x31, 0x53, 0x29, 0xa7, 0x0c, 0xc5, 0x50, 0x9b, 0xdb, 0xbb, 0x42, 0x27, 0x56, 0xa4, 0xad, - 0x97, 0xa6, 0x79, 0xba, 0x5b, 0x6d, 0x86, 0x7d, 0x84, 0xff, 0xa4, 0x58, 0x06, 0x3a, 0xb2, 0x4e, - 0xd8, 0xb2, 0x9c, 0xe6, 0xf1, 0x2e, 0xad, 0xe9, 0xa5, 0x01, 0xa9, 0xb5, 0x5a, 0x90, 0x0d, 0xbc, - 0xbd, 0xeb, 0x66, 0xf0, 0x03, 0xb9, 0x61, 0x3a, 0x3c, 0x83, 0x86, 0x54, 0xd3, 0x80, 0xc4, 0x84, - 0xce, 0xd8, 0x6a, 0x36, 0x51, 0x84, 0x72, 0x31, 0x1d, 0x56, 0xf2, 0x15, 0x8c, 0x86, 0xaf, 0x8e, - 0xf3, 0xe6, 0x38, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x09, 0x34, 0x4e, 0xb8, 0x03, 0x00, - 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.proto deleted file mode 100644 index 52d2a90abf4..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/TraceAdmin.proto +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.tracing"; -option java_outer_classname = "TraceAdminPB"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -message ListSpanReceiversRequestProto { -} - -message SpanReceiverListInfo { - required int64 id = 1; - required string className = 2; -} - -message ListSpanReceiversResponseProto { - repeated SpanReceiverListInfo descriptions = 1; -} - -message ConfigPair { - required string key = 1; - required string value = 2; -} - -message AddSpanReceiverRequestProto { - required string className = 1; - repeated ConfigPair config = 2; -} - -message AddSpanReceiverResponseProto { - required int64 id = 1; -} - -message RemoveSpanReceiverRequestProto { - required int64 id = 1; -} - -message RemoveSpanReceiverResponseProto { -} - -service TraceAdminService { - rpc listSpanReceivers(ListSpanReceiversRequestProto) - returns(ListSpanReceiversResponseProto); - - rpc addSpanReceiver(AddSpanReceiverRequestProto) - returns(AddSpanReceiverResponseProto); - - rpc removeSpanReceiver(RemoveSpanReceiverRequestProto) - returns(RemoveSpanReceiverResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.pb.go deleted file mode 100644 index 97d2ac910eb..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.pb.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: ZKFCProtocol.proto - -package hadoop_common - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type CedeActiveRequestProto struct { - MillisToCede *uint32 `protobuf:"varint,1,req,name=millisToCede" json:"millisToCede,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CedeActiveRequestProto) Reset() { *m = CedeActiveRequestProto{} } -func (m *CedeActiveRequestProto) String() string { return proto.CompactTextString(m) } -func (*CedeActiveRequestProto) ProtoMessage() {} -func (*CedeActiveRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *CedeActiveRequestProto) GetMillisToCede() uint32 { - if m != nil && m.MillisToCede != nil { - return *m.MillisToCede - } - return 0 -} - -type CedeActiveResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *CedeActiveResponseProto) Reset() { *m = CedeActiveResponseProto{} } -func (m *CedeActiveResponseProto) String() string { return proto.CompactTextString(m) } -func (*CedeActiveResponseProto) ProtoMessage() {} -func (*CedeActiveResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -type GracefulFailoverRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GracefulFailoverRequestProto) Reset() { *m = GracefulFailoverRequestProto{} } -func (m *GracefulFailoverRequestProto) String() string { return proto.CompactTextString(m) } -func (*GracefulFailoverRequestProto) ProtoMessage() {} -func (*GracefulFailoverRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } - -type GracefulFailoverResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GracefulFailoverResponseProto) Reset() { *m = GracefulFailoverResponseProto{} } -func (m *GracefulFailoverResponseProto) String() string { return proto.CompactTextString(m) } -func (*GracefulFailoverResponseProto) ProtoMessage() {} -func (*GracefulFailoverResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } - -func init() { - proto.RegisterType((*CedeActiveRequestProto)(nil), "hadoop.common.CedeActiveRequestProto") - proto.RegisterType((*CedeActiveResponseProto)(nil), "hadoop.common.CedeActiveResponseProto") - proto.RegisterType((*GracefulFailoverRequestProto)(nil), "hadoop.common.GracefulFailoverRequestProto") - proto.RegisterType((*GracefulFailoverResponseProto)(nil), "hadoop.common.GracefulFailoverResponseProto") -} - -func init() { proto.RegisterFile("ZKFCProtocol.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 238 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x8a, 0xf2, 0x76, 0x73, - 0x0e, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0xce, 0xcf, 0xd1, 0x2b, 0x00, 0x31, 0x84, 0x78, 0x33, 0x12, - 0x53, 0xf2, 0xf3, 0x0b, 0xf4, 0x92, 0xf3, 0x73, 0x73, 0xf3, 0xf3, 0x94, 0x6c, 0xb8, 0xc4, 0x9c, - 0x53, 0x53, 0x52, 0x1d, 0x93, 0x4b, 0x32, 0xcb, 0x52, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, - 0xc0, 0x3a, 0x84, 0x94, 0xb8, 0x78, 0x72, 0x33, 0x73, 0x72, 0x32, 0x8b, 0x43, 0xf2, 0x41, 0x2a, - 0x24, 0x18, 0x15, 0x98, 0x34, 0x78, 0x83, 0x50, 0xc4, 0x94, 0x24, 0xb9, 0xc4, 0x91, 0x75, 0x17, - 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x82, 0xb5, 0x2b, 0xc9, 0x71, 0xc9, 0xb8, 0x17, 0x25, 0x26, 0xa7, - 0xa6, 0x95, 0xe6, 0xb8, 0x25, 0x66, 0xe6, 0xe4, 0x97, 0xa5, 0x16, 0x21, 0x1b, 0xaf, 0x24, 0xcf, - 0x25, 0x8b, 0x29, 0x8f, 0x64, 0x80, 0xd1, 0x43, 0x46, 0x2e, 0x61, 0x64, 0xf7, 0x07, 0xa7, 0x16, - 0x95, 0x65, 0x26, 0xa7, 0x0a, 0x45, 0x73, 0x71, 0x25, 0xc3, 0xed, 0x14, 0x52, 0xd5, 0x43, 0xf1, - 0x8f, 0x1e, 0x76, 0xcf, 0x48, 0xa9, 0xe1, 0x51, 0x86, 0x64, 0xa9, 0x50, 0x2e, 0x97, 0x40, 0x3a, - 0x9a, 0xab, 0x84, 0xb4, 0xd1, 0xf4, 0xe2, 0xf3, 0x96, 0x94, 0x0e, 0x41, 0xc5, 0x48, 0xd6, 0x39, - 0x99, 0x71, 0x49, 0xe5, 0x17, 0xa5, 0xeb, 0x25, 0x16, 0x24, 0x26, 0x67, 0xa4, 0xc2, 0x74, 0x66, - 0x24, 0x42, 0xa2, 0xca, 0x09, 0x25, 0xfa, 0xc0, 0x74, 0x71, 0x07, 0x23, 0xe3, 0x02, 0x46, 0x46, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x73, 0xcc, 0xde, 0xd9, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.proto deleted file mode 100644 index a2b8dd10b30..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_common/ZKFCProtocol.proto +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.ha.proto"; -option java_outer_classname = "ZKFCProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.common; - -message CedeActiveRequestProto { - required uint32 millisToCede = 1; -} - -message CedeActiveResponseProto { -} - -message GracefulFailoverRequestProto { -} - -message GracefulFailoverResponseProto { -} - - -/** - * Protocol provides manual control of the ZK Failover Controllers - */ -service ZKFCProtocolService { - /** - * Request that the service cede its active state, and quit the election - * for some amount of time - */ - rpc cedeActive(CedeActiveRequestProto) - returns(CedeActiveResponseProto); - - - rpc gracefulFailover(GracefulFailoverRequestProto) - returns(GracefulFailoverResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.pb.go deleted file mode 100644 index 113b0d708a8..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.pb.go +++ /dev/null @@ -1,407 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: ClientDatanodeProtocol.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import hadoop_common "github.com/colinmarc/hdfs/protocol/hadoop_common" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// block - block for which visible length is requested -type GetReplicaVisibleLengthRequestProto struct { - Block *ExtendedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetReplicaVisibleLengthRequestProto) Reset() { *m = GetReplicaVisibleLengthRequestProto{} } -func (m *GetReplicaVisibleLengthRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetReplicaVisibleLengthRequestProto) ProtoMessage() {} -func (*GetReplicaVisibleLengthRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor6, []int{0} -} - -func (m *GetReplicaVisibleLengthRequestProto) GetBlock() *ExtendedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -// * -// length - visible length of the block -type GetReplicaVisibleLengthResponseProto struct { - Length *uint64 `protobuf:"varint,1,req,name=length" json:"length,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetReplicaVisibleLengthResponseProto) Reset() { *m = GetReplicaVisibleLengthResponseProto{} } -func (m *GetReplicaVisibleLengthResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetReplicaVisibleLengthResponseProto) ProtoMessage() {} -func (*GetReplicaVisibleLengthResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor6, []int{1} -} - -func (m *GetReplicaVisibleLengthResponseProto) GetLength() uint64 { - if m != nil && m.Length != nil { - return *m.Length - } - return 0 -} - -// * -// void request -type RefreshNamenodesRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshNamenodesRequestProto) Reset() { *m = RefreshNamenodesRequestProto{} } -func (m *RefreshNamenodesRequestProto) String() string { return proto.CompactTextString(m) } -func (*RefreshNamenodesRequestProto) ProtoMessage() {} -func (*RefreshNamenodesRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} } - -// * -// void response -type RefreshNamenodesResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshNamenodesResponseProto) Reset() { *m = RefreshNamenodesResponseProto{} } -func (m *RefreshNamenodesResponseProto) String() string { return proto.CompactTextString(m) } -func (*RefreshNamenodesResponseProto) ProtoMessage() {} -func (*RefreshNamenodesResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} } - -// * -// blockPool - block pool to be deleted -// force - if false, delete the block pool only if it is empty. -// if true, delete the block pool even if it has blocks. -type DeleteBlockPoolRequestProto struct { - BlockPool *string `protobuf:"bytes,1,req,name=blockPool" json:"blockPool,omitempty"` - Force *bool `protobuf:"varint,2,req,name=force" json:"force,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteBlockPoolRequestProto) Reset() { *m = DeleteBlockPoolRequestProto{} } -func (m *DeleteBlockPoolRequestProto) String() string { return proto.CompactTextString(m) } -func (*DeleteBlockPoolRequestProto) ProtoMessage() {} -func (*DeleteBlockPoolRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} } - -func (m *DeleteBlockPoolRequestProto) GetBlockPool() string { - if m != nil && m.BlockPool != nil { - return *m.BlockPool - } - return "" -} - -func (m *DeleteBlockPoolRequestProto) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return false -} - -// * -// void response -type DeleteBlockPoolResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteBlockPoolResponseProto) Reset() { *m = DeleteBlockPoolResponseProto{} } -func (m *DeleteBlockPoolResponseProto) String() string { return proto.CompactTextString(m) } -func (*DeleteBlockPoolResponseProto) ProtoMessage() {} -func (*DeleteBlockPoolResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{5} } - -// * -// Gets the file information where block and its metadata is stored -// block - block for which path information is being requested -// token - block token -// -// This message is deprecated in favor of file descriptor passing. -type GetBlockLocalPathInfoRequestProto struct { - Block *ExtendedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - Token *hadoop_common.TokenProto `protobuf:"bytes,2,req,name=token" json:"token,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetBlockLocalPathInfoRequestProto) Reset() { *m = GetBlockLocalPathInfoRequestProto{} } -func (m *GetBlockLocalPathInfoRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetBlockLocalPathInfoRequestProto) ProtoMessage() {} -func (*GetBlockLocalPathInfoRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor6, []int{6} -} - -func (m *GetBlockLocalPathInfoRequestProto) GetBlock() *ExtendedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -func (m *GetBlockLocalPathInfoRequestProto) GetToken() *hadoop_common.TokenProto { - if m != nil { - return m.Token - } - return nil -} - -// * -// block - block for which file path information is being returned -// localPath - file path where the block data is stored -// localMetaPath - file path where the block meta data is stored -// -// This message is deprecated in favor of file descriptor passing. -type GetBlockLocalPathInfoResponseProto struct { - Block *ExtendedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - LocalPath *string `protobuf:"bytes,2,req,name=localPath" json:"localPath,omitempty"` - LocalMetaPath *string `protobuf:"bytes,3,req,name=localMetaPath" json:"localMetaPath,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetBlockLocalPathInfoResponseProto) Reset() { *m = GetBlockLocalPathInfoResponseProto{} } -func (m *GetBlockLocalPathInfoResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetBlockLocalPathInfoResponseProto) ProtoMessage() {} -func (*GetBlockLocalPathInfoResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor6, []int{7} -} - -func (m *GetBlockLocalPathInfoResponseProto) GetBlock() *ExtendedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -func (m *GetBlockLocalPathInfoResponseProto) GetLocalPath() string { - if m != nil && m.LocalPath != nil { - return *m.LocalPath - } - return "" -} - -func (m *GetBlockLocalPathInfoResponseProto) GetLocalMetaPath() string { - if m != nil && m.LocalMetaPath != nil { - return *m.LocalMetaPath - } - return "" -} - -// * -// forUpgrade - if true, clients are advised to wait for restart and quick -// upgrade restart is instrumented. Otherwise, datanode does -// the regular shutdown. -type ShutdownDatanodeRequestProto struct { - ForUpgrade *bool `protobuf:"varint,1,req,name=forUpgrade" json:"forUpgrade,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ShutdownDatanodeRequestProto) Reset() { *m = ShutdownDatanodeRequestProto{} } -func (m *ShutdownDatanodeRequestProto) String() string { return proto.CompactTextString(m) } -func (*ShutdownDatanodeRequestProto) ProtoMessage() {} -func (*ShutdownDatanodeRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{8} } - -func (m *ShutdownDatanodeRequestProto) GetForUpgrade() bool { - if m != nil && m.ForUpgrade != nil { - return *m.ForUpgrade - } - return false -} - -type ShutdownDatanodeResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ShutdownDatanodeResponseProto) Reset() { *m = ShutdownDatanodeResponseProto{} } -func (m *ShutdownDatanodeResponseProto) String() string { return proto.CompactTextString(m) } -func (*ShutdownDatanodeResponseProto) ProtoMessage() {} -func (*ShutdownDatanodeResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{9} } - -// * Tell datanode to evict active clients that are writing -type EvictWritersRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *EvictWritersRequestProto) Reset() { *m = EvictWritersRequestProto{} } -func (m *EvictWritersRequestProto) String() string { return proto.CompactTextString(m) } -func (*EvictWritersRequestProto) ProtoMessage() {} -func (*EvictWritersRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{10} } - -type EvictWritersResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *EvictWritersResponseProto) Reset() { *m = EvictWritersResponseProto{} } -func (m *EvictWritersResponseProto) String() string { return proto.CompactTextString(m) } -func (*EvictWritersResponseProto) ProtoMessage() {} -func (*EvictWritersResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{11} } - -// * -// Ping datanode for liveness and quick info -type GetDatanodeInfoRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDatanodeInfoRequestProto) Reset() { *m = GetDatanodeInfoRequestProto{} } -func (m *GetDatanodeInfoRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetDatanodeInfoRequestProto) ProtoMessage() {} -func (*GetDatanodeInfoRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{12} } - -type GetDatanodeInfoResponseProto struct { - LocalInfo *DatanodeLocalInfoProto `protobuf:"bytes,1,req,name=localInfo" json:"localInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDatanodeInfoResponseProto) Reset() { *m = GetDatanodeInfoResponseProto{} } -func (m *GetDatanodeInfoResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetDatanodeInfoResponseProto) ProtoMessage() {} -func (*GetDatanodeInfoResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{13} } - -func (m *GetDatanodeInfoResponseProto) GetLocalInfo() *DatanodeLocalInfoProto { - if m != nil { - return m.LocalInfo - } - return nil -} - -type TriggerBlockReportRequestProto struct { - Incremental *bool `protobuf:"varint,1,req,name=incremental" json:"incremental,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TriggerBlockReportRequestProto) Reset() { *m = TriggerBlockReportRequestProto{} } -func (m *TriggerBlockReportRequestProto) String() string { return proto.CompactTextString(m) } -func (*TriggerBlockReportRequestProto) ProtoMessage() {} -func (*TriggerBlockReportRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{14} } - -func (m *TriggerBlockReportRequestProto) GetIncremental() bool { - if m != nil && m.Incremental != nil { - return *m.Incremental - } - return false -} - -type TriggerBlockReportResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *TriggerBlockReportResponseProto) Reset() { *m = TriggerBlockReportResponseProto{} } -func (m *TriggerBlockReportResponseProto) String() string { return proto.CompactTextString(m) } -func (*TriggerBlockReportResponseProto) ProtoMessage() {} -func (*TriggerBlockReportResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor6, []int{15} -} - -type GetBalancerBandwidthRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetBalancerBandwidthRequestProto) Reset() { *m = GetBalancerBandwidthRequestProto{} } -func (m *GetBalancerBandwidthRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetBalancerBandwidthRequestProto) ProtoMessage() {} -func (*GetBalancerBandwidthRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor6, []int{16} -} - -// * -// bandwidth - balancer bandwidth value of the datanode. -type GetBalancerBandwidthResponseProto struct { - Bandwidth *uint64 `protobuf:"varint,1,req,name=bandwidth" json:"bandwidth,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetBalancerBandwidthResponseProto) Reset() { *m = GetBalancerBandwidthResponseProto{} } -func (m *GetBalancerBandwidthResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetBalancerBandwidthResponseProto) ProtoMessage() {} -func (*GetBalancerBandwidthResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor6, []int{17} -} - -func (m *GetBalancerBandwidthResponseProto) GetBandwidth() uint64 { - if m != nil && m.Bandwidth != nil { - return *m.Bandwidth - } - return 0 -} - -func init() { - proto.RegisterType((*GetReplicaVisibleLengthRequestProto)(nil), "hadoop.hdfs.GetReplicaVisibleLengthRequestProto") - proto.RegisterType((*GetReplicaVisibleLengthResponseProto)(nil), "hadoop.hdfs.GetReplicaVisibleLengthResponseProto") - proto.RegisterType((*RefreshNamenodesRequestProto)(nil), "hadoop.hdfs.RefreshNamenodesRequestProto") - proto.RegisterType((*RefreshNamenodesResponseProto)(nil), "hadoop.hdfs.RefreshNamenodesResponseProto") - proto.RegisterType((*DeleteBlockPoolRequestProto)(nil), "hadoop.hdfs.DeleteBlockPoolRequestProto") - proto.RegisterType((*DeleteBlockPoolResponseProto)(nil), "hadoop.hdfs.DeleteBlockPoolResponseProto") - proto.RegisterType((*GetBlockLocalPathInfoRequestProto)(nil), "hadoop.hdfs.GetBlockLocalPathInfoRequestProto") - proto.RegisterType((*GetBlockLocalPathInfoResponseProto)(nil), "hadoop.hdfs.GetBlockLocalPathInfoResponseProto") - proto.RegisterType((*ShutdownDatanodeRequestProto)(nil), "hadoop.hdfs.ShutdownDatanodeRequestProto") - proto.RegisterType((*ShutdownDatanodeResponseProto)(nil), "hadoop.hdfs.ShutdownDatanodeResponseProto") - proto.RegisterType((*EvictWritersRequestProto)(nil), "hadoop.hdfs.EvictWritersRequestProto") - proto.RegisterType((*EvictWritersResponseProto)(nil), "hadoop.hdfs.EvictWritersResponseProto") - proto.RegisterType((*GetDatanodeInfoRequestProto)(nil), "hadoop.hdfs.GetDatanodeInfoRequestProto") - proto.RegisterType((*GetDatanodeInfoResponseProto)(nil), "hadoop.hdfs.GetDatanodeInfoResponseProto") - proto.RegisterType((*TriggerBlockReportRequestProto)(nil), "hadoop.hdfs.TriggerBlockReportRequestProto") - proto.RegisterType((*TriggerBlockReportResponseProto)(nil), "hadoop.hdfs.TriggerBlockReportResponseProto") - proto.RegisterType((*GetBalancerBandwidthRequestProto)(nil), "hadoop.hdfs.GetBalancerBandwidthRequestProto") - proto.RegisterType((*GetBalancerBandwidthResponseProto)(nil), "hadoop.hdfs.GetBalancerBandwidthResponseProto") -} - -func init() { proto.RegisterFile("ClientDatanodeProtocol.proto", fileDescriptor6) } - -var fileDescriptor6 = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0xdb, 0x46, - 0x10, 0x06, 0xdd, 0xba, 0xb0, 0x46, 0x7d, 0x61, 0xe1, 0xb6, 0x32, 0x2d, 0xd9, 0x32, 0x6d, 0x17, - 0x76, 0x1f, 0x74, 0x2b, 0xc0, 0x3d, 0x1a, 0xb0, 0x6a, 0xc3, 0x28, 0xaa, 0x16, 0x2e, 0xe5, 0x26, - 0x97, 0xe4, 0xb0, 0x22, 0x47, 0xd4, 0xc2, 0x14, 0x97, 0x59, 0xae, 0x6c, 0xe7, 0x12, 0x20, 0xb7, - 0x00, 0x41, 0x0e, 0xf9, 0x05, 0xc9, 0x4f, 0x0d, 0xb8, 0x92, 0xa2, 0x5d, 0x8a, 0xa2, 0x95, 0x20, - 0x27, 0x91, 0x33, 0xdf, 0x3c, 0xf6, 0xdb, 0x99, 0x4f, 0x84, 0xfa, 0x9f, 0x11, 0xc3, 0x58, 0x9e, - 0x51, 0x49, 0x63, 0x1e, 0xe0, 0xa5, 0xe0, 0x92, 0xfb, 0x3c, 0x72, 0x93, 0xec, 0x81, 0x54, 0x07, - 0x34, 0xe0, 0x3c, 0x71, 0x07, 0x41, 0x3f, 0xb5, 0xbf, 0xee, 0xa2, 0x3f, 0x12, 0x4c, 0x3e, 0x1d, - 0x3b, 0x6d, 0xc8, 0xac, 0x93, 0xe7, 0x86, 0x87, 0x3e, 0x8f, 0xfb, 0x2c, 0x1c, 0x09, 0x2a, 0x19, - 0x8f, 0xcd, 0x3c, 0xce, 0x23, 0xd8, 0xbd, 0x40, 0xe9, 0x61, 0x12, 0x31, 0x9f, 0x3e, 0x60, 0x29, - 0xeb, 0x45, 0xd8, 0xc1, 0x38, 0x94, 0x03, 0x0f, 0x9f, 0x8c, 0x30, 0x95, 0x0a, 0x4f, 0x8e, 0x61, - 0xb5, 0x17, 0x71, 0xff, 0xba, 0x66, 0x35, 0x57, 0x0e, 0xaa, 0xad, 0x6d, 0x57, 0x2b, 0xef, 0x9e, - 0xdf, 0x49, 0x8c, 0x03, 0x0c, 0xda, 0x19, 0x42, 0xe1, 0xbd, 0x31, 0xda, 0x39, 0x81, 0xbd, 0x85, - 0xd9, 0xd3, 0x84, 0xc7, 0xe9, 0xf8, 0x58, 0xe4, 0x7b, 0xf8, 0x22, 0x52, 0x66, 0x95, 0xff, 0x73, - 0x6f, 0xf2, 0xe6, 0x6c, 0x41, 0xdd, 0xc3, 0xbe, 0xc0, 0x74, 0xf0, 0x2f, 0x1d, 0x62, 0x46, 0x43, - 0xaa, 0xb7, 0xe5, 0x6c, 0x43, 0x63, 0xde, 0xaf, 0x25, 0x76, 0xfe, 0x83, 0xcd, 0x33, 0x8c, 0x50, - 0xe2, 0xb8, 0x37, 0xce, 0x23, 0xe3, 0x58, 0x75, 0xa8, 0xf4, 0xa6, 0x0e, 0x55, 0xba, 0xe2, 0xcd, - 0x0c, 0x64, 0x1d, 0x56, 0xfb, 0x5c, 0xf8, 0x58, 0x5b, 0x69, 0xae, 0x1c, 0xac, 0x79, 0xe3, 0x97, - 0xac, 0xa7, 0xb9, 0x94, 0x7a, 0xc9, 0x97, 0x16, 0xec, 0x5c, 0xa0, 0x54, 0xde, 0x0e, 0xf7, 0x69, - 0x74, 0x49, 0xe5, 0xe0, 0xaf, 0xb8, 0xcf, 0x3f, 0x01, 0xa1, 0xe4, 0x08, 0x56, 0x25, 0xbf, 0xc6, - 0x58, 0xb5, 0x54, 0x6d, 0x6d, 0x4c, 0xc3, 0x7c, 0x3e, 0x1c, 0xf2, 0xd8, 0xbd, 0xca, 0x7c, 0x93, - 0x00, 0x85, 0x73, 0xde, 0x58, 0xe0, 0x2c, 0xe8, 0x46, 0xbf, 0x80, 0x8f, 0x6c, 0xa7, 0x0e, 0x95, - 0x68, 0x9a, 0x54, 0xb5, 0x54, 0xf1, 0x66, 0x06, 0xb2, 0x07, 0x5f, 0xa9, 0x97, 0x7f, 0x50, 0x52, - 0x85, 0xf8, 0x4c, 0x21, 0x4c, 0xa3, 0x73, 0x02, 0xf5, 0xee, 0x60, 0x24, 0x03, 0x7e, 0x1b, 0x4f, - 0x67, 0xdd, 0x60, 0x6a, 0x0b, 0xa0, 0xcf, 0xc5, 0xff, 0x49, 0x28, 0x68, 0x80, 0xaa, 0xbf, 0x35, - 0x4f, 0xb3, 0x64, 0x33, 0x30, 0x1f, 0xaf, 0x5f, 0x88, 0x0d, 0xb5, 0xf3, 0x1b, 0xe6, 0xcb, 0x87, - 0x82, 0x49, 0x14, 0xe6, 0x00, 0x6d, 0xc2, 0x86, 0xe9, 0xd3, 0x03, 0x1b, 0xb0, 0x79, 0x81, 0xef, - 0x17, 0x30, 0x7f, 0x85, 0x0e, 0x85, 0xfa, 0x9c, 0x5b, 0xe7, 0xf4, 0x74, 0x42, 0x4e, 0xe6, 0x99, - 0xf0, 0xba, 0x6b, 0xf0, 0x3a, 0x0d, 0xed, 0x4c, 0x51, 0x63, 0x6e, 0x67, 0x51, 0x4e, 0x1b, 0xb6, - 0xae, 0x04, 0x0b, 0x43, 0x14, 0x8a, 0x7b, 0x0f, 0x13, 0x2e, 0xa4, 0xc1, 0x4e, 0x13, 0xaa, 0x2c, - 0xf6, 0x05, 0x0e, 0x31, 0x96, 0x34, 0x9a, 0xd0, 0xa3, 0x9b, 0x9c, 0x1d, 0xd8, 0x2e, 0xca, 0xa1, - 0x1f, 0xd4, 0x81, 0x66, 0x36, 0x23, 0x34, 0xa2, 0xb1, 0x8f, 0xa2, 0x4d, 0xe3, 0xe0, 0x96, 0x05, - 0xa6, 0x02, 0x38, 0xa7, 0xe3, 0xa9, 0x9e, 0xc7, 0xe8, 0x47, 0xce, 0xf6, 0x69, 0xea, 0x99, 0xac, - 0xf2, 0xcc, 0xd0, 0x7a, 0x5d, 0x85, 0x46, 0xb1, 0xa8, 0x75, 0x51, 0xdc, 0x30, 0x1f, 0xc9, 0x33, - 0xf8, 0x21, 0x2c, 0xd6, 0x0b, 0xf2, 0x9b, 0x41, 0xdd, 0x12, 0x9a, 0x65, 0xff, 0xbe, 0x5c, 0x84, - 0xde, 0x3f, 0x83, 0x6f, 0x45, 0x4e, 0x4f, 0xc8, 0xa1, 0x91, 0xa6, 0x4c, 0x8e, 0xec, 0x9f, 0xee, - 0x81, 0xea, 0xa5, 0xfa, 0xf0, 0x4d, 0x60, 0xca, 0x08, 0x39, 0x30, 0xa7, 0x63, 0xb1, 0x6e, 0xd9, - 0x87, 0xe5, 0x48, 0xbd, 0xce, 0x1d, 0x7c, 0x17, 0x16, 0xed, 0x3f, 0x71, 0xf3, 0xf4, 0x94, 0x2b, - 0x96, 0x7d, 0xb4, 0x0c, 0x3e, 0x47, 0x66, 0x9a, 0x5b, 0xcc, 0x1c, 0x99, 0x65, 0x7b, 0x9f, 0x23, - 0xb3, 0x74, 0xc5, 0xc9, 0x63, 0xf8, 0x12, 0xb5, 0x35, 0x26, 0xfb, 0xa6, 0x7e, 0x2d, 0xd8, 0x7e, - 0xfb, 0xc7, 0x12, 0x58, 0xee, 0xae, 0x42, 0x73, 0xd3, 0x73, 0x77, 0x55, 0x22, 0x13, 0xb9, 0xbb, - 0x2a, 0x55, 0x8c, 0xe7, 0x16, 0xd4, 0xd4, 0xfc, 0x1b, 0xff, 0xd8, 0x5d, 0x49, 0xe5, 0x28, 0x25, - 0x05, 0xe3, 0x5c, 0x00, 0x33, 0x4a, 0xb7, 0x96, 0x0c, 0xd1, 0x7b, 0x18, 0xc1, 0x7a, 0x2a, 0xa9, - 0xc8, 0x43, 0xc9, 0xaf, 0xe6, 0x75, 0x14, 0x40, 0x8c, 0xd2, 0xee, 0x12, 0x70, 0xbd, 0xec, 0x2b, - 0x0b, 0xea, 0x11, 0x4b, 0x75, 0x50, 0x2f, 0xca, 0x7c, 0x09, 0x0a, 0xc9, 0x30, 0x25, 0xc7, 0x46, - 0xc2, 0x4e, 0x09, 0xd4, 0xe8, 0xe3, 0x8f, 0x0f, 0x08, 0xd3, 0xfb, 0xe1, 0x40, 0xe4, 0x9c, 0x6a, - 0x92, 0x9f, 0x8d, 0x6c, 0xe5, 0xd2, 0x6c, 0xff, 0x72, 0x2f, 0x38, 0xc7, 0x7b, 0x58, 0xa0, 0xaf, - 0x39, 0xde, 0xef, 0x93, 0x69, 0xdb, 0x5d, 0x02, 0xae, 0x95, 0x6d, 0xff, 0x0d, 0xfb, 0x5c, 0x84, - 0x2e, 0x4d, 0xa8, 0x3f, 0x40, 0x23, 0x36, 0x31, 0x3e, 0x14, 0xdb, 0x0b, 0x3e, 0x47, 0xd5, 0x6f, - 0xfa, 0xc2, 0xb2, 0xde, 0x5a, 0xd6, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x38, 0x38, 0x2f, 0xb6, - 0xb3, 0x0a, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.proto deleted file mode 100644 index e135df84fb3..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientDatanodeProtocol.proto +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -// This file contains protocol buffers that are used throughout HDFS -- i.e. -// by the client, server, and data transfer protocols. - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "ClientDatanodeProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "Security.proto"; -import "hdfs.proto"; -import "ReconfigurationProtocol.proto"; - -/** - * block - block for which visible length is requested - */ -message GetReplicaVisibleLengthRequestProto { - required ExtendedBlockProto block = 1; -} - -/** - * length - visible length of the block - */ -message GetReplicaVisibleLengthResponseProto { - required uint64 length = 1; -} - -/** - * void request - */ -message RefreshNamenodesRequestProto { -} - -/** - * void response - */ -message RefreshNamenodesResponseProto { -} - -/** - * blockPool - block pool to be deleted - * force - if false, delete the block pool only if it is empty. - * if true, delete the block pool even if it has blocks. - */ -message DeleteBlockPoolRequestProto { - required string blockPool = 1; - required bool force = 2; -} - -/** - * void response - */ -message DeleteBlockPoolResponseProto { -} - -/** - * Gets the file information where block and its metadata is stored - * block - block for which path information is being requested - * token - block token - * - * This message is deprecated in favor of file descriptor passing. - */ -message GetBlockLocalPathInfoRequestProto { - required ExtendedBlockProto block = 1; - required hadoop.common.TokenProto token = 2; -} - -/** - * block - block for which file path information is being returned - * localPath - file path where the block data is stored - * localMetaPath - file path where the block meta data is stored - * - * This message is deprecated in favor of file descriptor passing. - */ -message GetBlockLocalPathInfoResponseProto { - required ExtendedBlockProto block = 1; - required string localPath = 2; - required string localMetaPath = 3; -} - -/** - * forUpgrade - if true, clients are advised to wait for restart and quick - * upgrade restart is instrumented. Otherwise, datanode does - * the regular shutdown. - */ -message ShutdownDatanodeRequestProto { - required bool forUpgrade = 1; -} - -message ShutdownDatanodeResponseProto { -} - -/** Tell datanode to evict active clients that are writing */ -message EvictWritersRequestProto { -} - -message EvictWritersResponseProto { -} - -/** - * Ping datanode for liveness and quick info - */ -message GetDatanodeInfoRequestProto { -} - -message GetDatanodeInfoResponseProto { - required DatanodeLocalInfoProto localInfo = 1; -} - - -message TriggerBlockReportRequestProto { - required bool incremental = 1; -} - -message TriggerBlockReportResponseProto { -} - -message GetBalancerBandwidthRequestProto { -} - -/** - * bandwidth - balancer bandwidth value of the datanode. - */ -message GetBalancerBandwidthResponseProto { - required uint64 bandwidth = 1; -} - -/** - * Protocol used from client to the Datanode. - * See the request and response for details of rpc call. - */ -service ClientDatanodeProtocolService { - /** - * Returns the visible length of the replica - */ - rpc getReplicaVisibleLength(GetReplicaVisibleLengthRequestProto) - returns(GetReplicaVisibleLengthResponseProto); - - /** - * Refresh the list of federated namenodes from updated configuration. - * Adds new namenodes and stops the deleted namenodes. - */ - rpc refreshNamenodes(RefreshNamenodesRequestProto) - returns(RefreshNamenodesResponseProto); - - /** - * Delete the block pool from the datanode. - */ - rpc deleteBlockPool(DeleteBlockPoolRequestProto) - returns(DeleteBlockPoolResponseProto); - - /** - * Retrieves the path names of the block file and metadata file stored on the - * local file system. - */ - rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto) - returns(GetBlockLocalPathInfoResponseProto); - - rpc shutdownDatanode(ShutdownDatanodeRequestProto) - returns(ShutdownDatanodeResponseProto); - - rpc evictWriters(EvictWritersRequestProto) - returns(EvictWritersResponseProto); - - rpc getDatanodeInfo(GetDatanodeInfoRequestProto) - returns(GetDatanodeInfoResponseProto); - - rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto) - returns(GetReconfigurationStatusResponseProto); - - rpc startReconfiguration(StartReconfigurationRequestProto) - returns(StartReconfigurationResponseProto); - - rpc listReconfigurableProperties( - ListReconfigurablePropertiesRequestProto) - returns(ListReconfigurablePropertiesResponseProto); - - rpc triggerBlockReport(TriggerBlockReportRequestProto) - returns(TriggerBlockReportResponseProto); - - /** - * Returns the balancer bandwidth value of datanode. - */ - rpc getBalancerBandwidth(GetBalancerBandwidthRequestProto) - returns(GetBalancerBandwidthResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.pb.go deleted file mode 100644 index 032e9fe456d..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.pb.go +++ /dev/null @@ -1,3910 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: ClientNamenodeProtocol.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/colinmarc/hdfs/protocol/hadoop_common" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type CreateFlagProto int32 - -const ( - CreateFlagProto_CREATE CreateFlagProto = 1 - CreateFlagProto_OVERWRITE CreateFlagProto = 2 - CreateFlagProto_APPEND CreateFlagProto = 4 - CreateFlagProto_LAZY_PERSIST CreateFlagProto = 16 - CreateFlagProto_NEW_BLOCK CreateFlagProto = 32 -) - -var CreateFlagProto_name = map[int32]string{ - 1: "CREATE", - 2: "OVERWRITE", - 4: "APPEND", - 16: "LAZY_PERSIST", - 32: "NEW_BLOCK", -} -var CreateFlagProto_value = map[string]int32{ - "CREATE": 1, - "OVERWRITE": 2, - "APPEND": 4, - "LAZY_PERSIST": 16, - "NEW_BLOCK": 32, -} - -func (x CreateFlagProto) Enum() *CreateFlagProto { - p := new(CreateFlagProto) - *p = x - return p -} -func (x CreateFlagProto) String() string { - return proto.EnumName(CreateFlagProto_name, int32(x)) -} -func (x *CreateFlagProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateFlagProto_value, data, "CreateFlagProto") - if err != nil { - return err - } - *x = CreateFlagProto(value) - return nil -} -func (CreateFlagProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } - -type DatanodeReportTypeProto int32 - -const ( - DatanodeReportTypeProto_ALL DatanodeReportTypeProto = 1 - DatanodeReportTypeProto_LIVE DatanodeReportTypeProto = 2 - DatanodeReportTypeProto_DEAD DatanodeReportTypeProto = 3 - DatanodeReportTypeProto_DECOMMISSIONING DatanodeReportTypeProto = 4 -) - -var DatanodeReportTypeProto_name = map[int32]string{ - 1: "ALL", - 2: "LIVE", - 3: "DEAD", - 4: "DECOMMISSIONING", -} -var DatanodeReportTypeProto_value = map[string]int32{ - "ALL": 1, - "LIVE": 2, - "DEAD": 3, - "DECOMMISSIONING": 4, -} - -func (x DatanodeReportTypeProto) Enum() *DatanodeReportTypeProto { - p := new(DatanodeReportTypeProto) - *p = x - return p -} -func (x DatanodeReportTypeProto) String() string { - return proto.EnumName(DatanodeReportTypeProto_name, int32(x)) -} -func (x *DatanodeReportTypeProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DatanodeReportTypeProto_value, data, "DatanodeReportTypeProto") - if err != nil { - return err - } - *x = DatanodeReportTypeProto(value) - return nil -} -func (DatanodeReportTypeProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } - -type SafeModeActionProto int32 - -const ( - SafeModeActionProto_SAFEMODE_LEAVE SafeModeActionProto = 1 - SafeModeActionProto_SAFEMODE_ENTER SafeModeActionProto = 2 - SafeModeActionProto_SAFEMODE_GET SafeModeActionProto = 3 - SafeModeActionProto_SAFEMODE_FORCE_EXIT SafeModeActionProto = 4 -) - -var SafeModeActionProto_name = map[int32]string{ - 1: "SAFEMODE_LEAVE", - 2: "SAFEMODE_ENTER", - 3: "SAFEMODE_GET", - 4: "SAFEMODE_FORCE_EXIT", -} -var SafeModeActionProto_value = map[string]int32{ - "SAFEMODE_LEAVE": 1, - "SAFEMODE_ENTER": 2, - "SAFEMODE_GET": 3, - "SAFEMODE_FORCE_EXIT": 4, -} - -func (x SafeModeActionProto) Enum() *SafeModeActionProto { - p := new(SafeModeActionProto) - *p = x - return p -} -func (x SafeModeActionProto) String() string { - return proto.EnumName(SafeModeActionProto_name, int32(x)) -} -func (x *SafeModeActionProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SafeModeActionProto_value, data, "SafeModeActionProto") - if err != nil { - return err - } - *x = SafeModeActionProto(value) - return nil -} -func (SafeModeActionProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } - -type RollingUpgradeActionProto int32 - -const ( - RollingUpgradeActionProto_QUERY RollingUpgradeActionProto = 1 - RollingUpgradeActionProto_START RollingUpgradeActionProto = 2 - RollingUpgradeActionProto_FINALIZE RollingUpgradeActionProto = 3 -) - -var RollingUpgradeActionProto_name = map[int32]string{ - 1: "QUERY", - 2: "START", - 3: "FINALIZE", -} -var RollingUpgradeActionProto_value = map[string]int32{ - "QUERY": 1, - "START": 2, - "FINALIZE": 3, -} - -func (x RollingUpgradeActionProto) Enum() *RollingUpgradeActionProto { - p := new(RollingUpgradeActionProto) - *p = x - return p -} -func (x RollingUpgradeActionProto) String() string { - return proto.EnumName(RollingUpgradeActionProto_name, int32(x)) -} -func (x *RollingUpgradeActionProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RollingUpgradeActionProto_value, data, "RollingUpgradeActionProto") - if err != nil { - return err - } - *x = RollingUpgradeActionProto(value) - return nil -} -func (RollingUpgradeActionProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } - -type CacheFlagProto int32 - -const ( - CacheFlagProto_FORCE CacheFlagProto = 1 -) - -var CacheFlagProto_name = map[int32]string{ - 1: "FORCE", -} -var CacheFlagProto_value = map[string]int32{ - "FORCE": 1, -} - -func (x CacheFlagProto) Enum() *CacheFlagProto { - p := new(CacheFlagProto) - *p = x - return p -} -func (x CacheFlagProto) String() string { - return proto.EnumName(CacheFlagProto_name, int32(x)) -} -func (x *CacheFlagProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CacheFlagProto_value, data, "CacheFlagProto") - if err != nil { - return err - } - *x = CacheFlagProto(value) - return nil -} -func (CacheFlagProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } - -type GetBlockLocationsRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Offset *uint64 `protobuf:"varint,2,req,name=offset" json:"offset,omitempty"` - Length *uint64 `protobuf:"varint,3,req,name=length" json:"length,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetBlockLocationsRequestProto) Reset() { *m = GetBlockLocationsRequestProto{} } -func (m *GetBlockLocationsRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetBlockLocationsRequestProto) ProtoMessage() {} -func (*GetBlockLocationsRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } - -func (m *GetBlockLocationsRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *GetBlockLocationsRequestProto) GetOffset() uint64 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return 0 -} - -func (m *GetBlockLocationsRequestProto) GetLength() uint64 { - if m != nil && m.Length != nil { - return *m.Length - } - return 0 -} - -type GetBlockLocationsResponseProto struct { - Locations *LocatedBlocksProto `protobuf:"bytes,1,opt,name=locations" json:"locations,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetBlockLocationsResponseProto) Reset() { *m = GetBlockLocationsResponseProto{} } -func (m *GetBlockLocationsResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetBlockLocationsResponseProto) ProtoMessage() {} -func (*GetBlockLocationsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } - -func (m *GetBlockLocationsResponseProto) GetLocations() *LocatedBlocksProto { - if m != nil { - return m.Locations - } - return nil -} - -type GetServerDefaultsRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServerDefaultsRequestProto) Reset() { *m = GetServerDefaultsRequestProto{} } -func (m *GetServerDefaultsRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetServerDefaultsRequestProto) ProtoMessage() {} -func (*GetServerDefaultsRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } - -type GetServerDefaultsResponseProto struct { - ServerDefaults *FsServerDefaultsProto `protobuf:"bytes,1,req,name=serverDefaults" json:"serverDefaults,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetServerDefaultsResponseProto) Reset() { *m = GetServerDefaultsResponseProto{} } -func (m *GetServerDefaultsResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetServerDefaultsResponseProto) ProtoMessage() {} -func (*GetServerDefaultsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } - -func (m *GetServerDefaultsResponseProto) GetServerDefaults() *FsServerDefaultsProto { - if m != nil { - return m.ServerDefaults - } - return nil -} - -type CreateRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Masked *FsPermissionProto `protobuf:"bytes,2,req,name=masked" json:"masked,omitempty"` - ClientName *string `protobuf:"bytes,3,req,name=clientName" json:"clientName,omitempty"` - CreateFlag *uint32 `protobuf:"varint,4,req,name=createFlag" json:"createFlag,omitempty"` - CreateParent *bool `protobuf:"varint,5,req,name=createParent" json:"createParent,omitempty"` - Replication *uint32 `protobuf:"varint,6,req,name=replication" json:"replication,omitempty"` - BlockSize *uint64 `protobuf:"varint,7,req,name=blockSize" json:"blockSize,omitempty"` - CryptoProtocolVersion []CryptoProtocolVersionProto `protobuf:"varint,8,rep,name=cryptoProtocolVersion,enum=hadoop.hdfs.CryptoProtocolVersionProto" json:"cryptoProtocolVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateRequestProto) Reset() { *m = CreateRequestProto{} } -func (m *CreateRequestProto) String() string { return proto.CompactTextString(m) } -func (*CreateRequestProto) ProtoMessage() {} -func (*CreateRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{4} } - -func (m *CreateRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *CreateRequestProto) GetMasked() *FsPermissionProto { - if m != nil { - return m.Masked - } - return nil -} - -func (m *CreateRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -func (m *CreateRequestProto) GetCreateFlag() uint32 { - if m != nil && m.CreateFlag != nil { - return *m.CreateFlag - } - return 0 -} - -func (m *CreateRequestProto) GetCreateParent() bool { - if m != nil && m.CreateParent != nil { - return *m.CreateParent - } - return false -} - -func (m *CreateRequestProto) GetReplication() uint32 { - if m != nil && m.Replication != nil { - return *m.Replication - } - return 0 -} - -func (m *CreateRequestProto) GetBlockSize() uint64 { - if m != nil && m.BlockSize != nil { - return *m.BlockSize - } - return 0 -} - -func (m *CreateRequestProto) GetCryptoProtocolVersion() []CryptoProtocolVersionProto { - if m != nil { - return m.CryptoProtocolVersion - } - return nil -} - -type CreateResponseProto struct { - Fs *HdfsFileStatusProto `protobuf:"bytes,1,opt,name=fs" json:"fs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateResponseProto) Reset() { *m = CreateResponseProto{} } -func (m *CreateResponseProto) String() string { return proto.CompactTextString(m) } -func (*CreateResponseProto) ProtoMessage() {} -func (*CreateResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{5} } - -func (m *CreateResponseProto) GetFs() *HdfsFileStatusProto { - if m != nil { - return m.Fs - } - return nil -} - -type AppendRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - ClientName *string `protobuf:"bytes,2,req,name=clientName" json:"clientName,omitempty"` - Flag *uint32 `protobuf:"varint,3,opt,name=flag" json:"flag,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppendRequestProto) Reset() { *m = AppendRequestProto{} } -func (m *AppendRequestProto) String() string { return proto.CompactTextString(m) } -func (*AppendRequestProto) ProtoMessage() {} -func (*AppendRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } - -func (m *AppendRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *AppendRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -func (m *AppendRequestProto) GetFlag() uint32 { - if m != nil && m.Flag != nil { - return *m.Flag - } - return 0 -} - -type AppendResponseProto struct { - Block *LocatedBlockProto `protobuf:"bytes,1,opt,name=block" json:"block,omitempty"` - Stat *HdfsFileStatusProto `protobuf:"bytes,2,opt,name=stat" json:"stat,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppendResponseProto) Reset() { *m = AppendResponseProto{} } -func (m *AppendResponseProto) String() string { return proto.CompactTextString(m) } -func (*AppendResponseProto) ProtoMessage() {} -func (*AppendResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{7} } - -func (m *AppendResponseProto) GetBlock() *LocatedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -func (m *AppendResponseProto) GetStat() *HdfsFileStatusProto { - if m != nil { - return m.Stat - } - return nil -} - -type SetReplicationRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Replication *uint32 `protobuf:"varint,2,req,name=replication" json:"replication,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetReplicationRequestProto) Reset() { *m = SetReplicationRequestProto{} } -func (m *SetReplicationRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetReplicationRequestProto) ProtoMessage() {} -func (*SetReplicationRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{8} } - -func (m *SetReplicationRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetReplicationRequestProto) GetReplication() uint32 { - if m != nil && m.Replication != nil { - return *m.Replication - } - return 0 -} - -type SetReplicationResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetReplicationResponseProto) Reset() { *m = SetReplicationResponseProto{} } -func (m *SetReplicationResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetReplicationResponseProto) ProtoMessage() {} -func (*SetReplicationResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{9} } - -func (m *SetReplicationResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type SetStoragePolicyRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - PolicyName *string `protobuf:"bytes,2,req,name=policyName" json:"policyName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetStoragePolicyRequestProto) Reset() { *m = SetStoragePolicyRequestProto{} } -func (m *SetStoragePolicyRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetStoragePolicyRequestProto) ProtoMessage() {} -func (*SetStoragePolicyRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{10} } - -func (m *SetStoragePolicyRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetStoragePolicyRequestProto) GetPolicyName() string { - if m != nil && m.PolicyName != nil { - return *m.PolicyName - } - return "" -} - -type SetStoragePolicyResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetStoragePolicyResponseProto) Reset() { *m = SetStoragePolicyResponseProto{} } -func (m *SetStoragePolicyResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetStoragePolicyResponseProto) ProtoMessage() {} -func (*SetStoragePolicyResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{11} } - -type UnsetStoragePolicyRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UnsetStoragePolicyRequestProto) Reset() { *m = UnsetStoragePolicyRequestProto{} } -func (m *UnsetStoragePolicyRequestProto) String() string { return proto.CompactTextString(m) } -func (*UnsetStoragePolicyRequestProto) ProtoMessage() {} -func (*UnsetStoragePolicyRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{12} } - -func (m *UnsetStoragePolicyRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type UnsetStoragePolicyResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *UnsetStoragePolicyResponseProto) Reset() { *m = UnsetStoragePolicyResponseProto{} } -func (m *UnsetStoragePolicyResponseProto) String() string { return proto.CompactTextString(m) } -func (*UnsetStoragePolicyResponseProto) ProtoMessage() {} -func (*UnsetStoragePolicyResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{13} -} - -type GetStoragePolicyRequestProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetStoragePolicyRequestProto) Reset() { *m = GetStoragePolicyRequestProto{} } -func (m *GetStoragePolicyRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetStoragePolicyRequestProto) ProtoMessage() {} -func (*GetStoragePolicyRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{14} } - -func (m *GetStoragePolicyRequestProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -type GetStoragePolicyResponseProto struct { - StoragePolicy *BlockStoragePolicyProto `protobuf:"bytes,1,req,name=storagePolicy" json:"storagePolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetStoragePolicyResponseProto) Reset() { *m = GetStoragePolicyResponseProto{} } -func (m *GetStoragePolicyResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetStoragePolicyResponseProto) ProtoMessage() {} -func (*GetStoragePolicyResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{15} } - -func (m *GetStoragePolicyResponseProto) GetStoragePolicy() *BlockStoragePolicyProto { - if m != nil { - return m.StoragePolicy - } - return nil -} - -type GetStoragePoliciesRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetStoragePoliciesRequestProto) Reset() { *m = GetStoragePoliciesRequestProto{} } -func (m *GetStoragePoliciesRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetStoragePoliciesRequestProto) ProtoMessage() {} -func (*GetStoragePoliciesRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{16} } - -type GetStoragePoliciesResponseProto struct { - Policies []*BlockStoragePolicyProto `protobuf:"bytes,1,rep,name=policies" json:"policies,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetStoragePoliciesResponseProto) Reset() { *m = GetStoragePoliciesResponseProto{} } -func (m *GetStoragePoliciesResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetStoragePoliciesResponseProto) ProtoMessage() {} -func (*GetStoragePoliciesResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{17} -} - -func (m *GetStoragePoliciesResponseProto) GetPolicies() []*BlockStoragePolicyProto { - if m != nil { - return m.Policies - } - return nil -} - -type SetPermissionRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Permission *FsPermissionProto `protobuf:"bytes,2,req,name=permission" json:"permission,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetPermissionRequestProto) Reset() { *m = SetPermissionRequestProto{} } -func (m *SetPermissionRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetPermissionRequestProto) ProtoMessage() {} -func (*SetPermissionRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{18} } - -func (m *SetPermissionRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetPermissionRequestProto) GetPermission() *FsPermissionProto { - if m != nil { - return m.Permission - } - return nil -} - -type SetPermissionResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetPermissionResponseProto) Reset() { *m = SetPermissionResponseProto{} } -func (m *SetPermissionResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetPermissionResponseProto) ProtoMessage() {} -func (*SetPermissionResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{19} } - -type SetOwnerRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` - Groupname *string `protobuf:"bytes,3,opt,name=groupname" json:"groupname,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetOwnerRequestProto) Reset() { *m = SetOwnerRequestProto{} } -func (m *SetOwnerRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetOwnerRequestProto) ProtoMessage() {} -func (*SetOwnerRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{20} } - -func (m *SetOwnerRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetOwnerRequestProto) GetUsername() string { - if m != nil && m.Username != nil { - return *m.Username - } - return "" -} - -func (m *SetOwnerRequestProto) GetGroupname() string { - if m != nil && m.Groupname != nil { - return *m.Groupname - } - return "" -} - -type SetOwnerResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetOwnerResponseProto) Reset() { *m = SetOwnerResponseProto{} } -func (m *SetOwnerResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetOwnerResponseProto) ProtoMessage() {} -func (*SetOwnerResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{21} } - -type AbandonBlockRequestProto struct { - B *ExtendedBlockProto `protobuf:"bytes,1,req,name=b" json:"b,omitempty"` - Src *string `protobuf:"bytes,2,req,name=src" json:"src,omitempty"` - Holder *string `protobuf:"bytes,3,req,name=holder" json:"holder,omitempty"` - FileId *uint64 `protobuf:"varint,4,opt,name=fileId,def=0" json:"fileId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AbandonBlockRequestProto) Reset() { *m = AbandonBlockRequestProto{} } -func (m *AbandonBlockRequestProto) String() string { return proto.CompactTextString(m) } -func (*AbandonBlockRequestProto) ProtoMessage() {} -func (*AbandonBlockRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{22} } - -const Default_AbandonBlockRequestProto_FileId uint64 = 0 - -func (m *AbandonBlockRequestProto) GetB() *ExtendedBlockProto { - if m != nil { - return m.B - } - return nil -} - -func (m *AbandonBlockRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *AbandonBlockRequestProto) GetHolder() string { - if m != nil && m.Holder != nil { - return *m.Holder - } - return "" -} - -func (m *AbandonBlockRequestProto) GetFileId() uint64 { - if m != nil && m.FileId != nil { - return *m.FileId - } - return Default_AbandonBlockRequestProto_FileId -} - -type AbandonBlockResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AbandonBlockResponseProto) Reset() { *m = AbandonBlockResponseProto{} } -func (m *AbandonBlockResponseProto) String() string { return proto.CompactTextString(m) } -func (*AbandonBlockResponseProto) ProtoMessage() {} -func (*AbandonBlockResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{23} } - -type AddBlockRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - ClientName *string `protobuf:"bytes,2,req,name=clientName" json:"clientName,omitempty"` - Previous *ExtendedBlockProto `protobuf:"bytes,3,opt,name=previous" json:"previous,omitempty"` - ExcludeNodes []*DatanodeInfoProto `protobuf:"bytes,4,rep,name=excludeNodes" json:"excludeNodes,omitempty"` - FileId *uint64 `protobuf:"varint,5,opt,name=fileId,def=0" json:"fileId,omitempty"` - FavoredNodes []string `protobuf:"bytes,6,rep,name=favoredNodes" json:"favoredNodes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddBlockRequestProto) Reset() { *m = AddBlockRequestProto{} } -func (m *AddBlockRequestProto) String() string { return proto.CompactTextString(m) } -func (*AddBlockRequestProto) ProtoMessage() {} -func (*AddBlockRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{24} } - -const Default_AddBlockRequestProto_FileId uint64 = 0 - -func (m *AddBlockRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *AddBlockRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -func (m *AddBlockRequestProto) GetPrevious() *ExtendedBlockProto { - if m != nil { - return m.Previous - } - return nil -} - -func (m *AddBlockRequestProto) GetExcludeNodes() []*DatanodeInfoProto { - if m != nil { - return m.ExcludeNodes - } - return nil -} - -func (m *AddBlockRequestProto) GetFileId() uint64 { - if m != nil && m.FileId != nil { - return *m.FileId - } - return Default_AddBlockRequestProto_FileId -} - -func (m *AddBlockRequestProto) GetFavoredNodes() []string { - if m != nil { - return m.FavoredNodes - } - return nil -} - -type AddBlockResponseProto struct { - Block *LocatedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddBlockResponseProto) Reset() { *m = AddBlockResponseProto{} } -func (m *AddBlockResponseProto) String() string { return proto.CompactTextString(m) } -func (*AddBlockResponseProto) ProtoMessage() {} -func (*AddBlockResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{25} } - -func (m *AddBlockResponseProto) GetBlock() *LocatedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -type GetAdditionalDatanodeRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Blk *ExtendedBlockProto `protobuf:"bytes,2,req,name=blk" json:"blk,omitempty"` - Existings []*DatanodeInfoProto `protobuf:"bytes,3,rep,name=existings" json:"existings,omitempty"` - Excludes []*DatanodeInfoProto `protobuf:"bytes,4,rep,name=excludes" json:"excludes,omitempty"` - NumAdditionalNodes *uint32 `protobuf:"varint,5,req,name=numAdditionalNodes" json:"numAdditionalNodes,omitempty"` - ClientName *string `protobuf:"bytes,6,req,name=clientName" json:"clientName,omitempty"` - ExistingStorageUuids []string `protobuf:"bytes,7,rep,name=existingStorageUuids" json:"existingStorageUuids,omitempty"` - FileId *uint64 `protobuf:"varint,8,opt,name=fileId,def=0" json:"fileId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAdditionalDatanodeRequestProto) Reset() { *m = GetAdditionalDatanodeRequestProto{} } -func (m *GetAdditionalDatanodeRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetAdditionalDatanodeRequestProto) ProtoMessage() {} -func (*GetAdditionalDatanodeRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{26} -} - -const Default_GetAdditionalDatanodeRequestProto_FileId uint64 = 0 - -func (m *GetAdditionalDatanodeRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *GetAdditionalDatanodeRequestProto) GetBlk() *ExtendedBlockProto { - if m != nil { - return m.Blk - } - return nil -} - -func (m *GetAdditionalDatanodeRequestProto) GetExistings() []*DatanodeInfoProto { - if m != nil { - return m.Existings - } - return nil -} - -func (m *GetAdditionalDatanodeRequestProto) GetExcludes() []*DatanodeInfoProto { - if m != nil { - return m.Excludes - } - return nil -} - -func (m *GetAdditionalDatanodeRequestProto) GetNumAdditionalNodes() uint32 { - if m != nil && m.NumAdditionalNodes != nil { - return *m.NumAdditionalNodes - } - return 0 -} - -func (m *GetAdditionalDatanodeRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -func (m *GetAdditionalDatanodeRequestProto) GetExistingStorageUuids() []string { - if m != nil { - return m.ExistingStorageUuids - } - return nil -} - -func (m *GetAdditionalDatanodeRequestProto) GetFileId() uint64 { - if m != nil && m.FileId != nil { - return *m.FileId - } - return Default_GetAdditionalDatanodeRequestProto_FileId -} - -type GetAdditionalDatanodeResponseProto struct { - Block *LocatedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAdditionalDatanodeResponseProto) Reset() { *m = GetAdditionalDatanodeResponseProto{} } -func (m *GetAdditionalDatanodeResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetAdditionalDatanodeResponseProto) ProtoMessage() {} -func (*GetAdditionalDatanodeResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{27} -} - -func (m *GetAdditionalDatanodeResponseProto) GetBlock() *LocatedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -type CompleteRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - ClientName *string `protobuf:"bytes,2,req,name=clientName" json:"clientName,omitempty"` - Last *ExtendedBlockProto `protobuf:"bytes,3,opt,name=last" json:"last,omitempty"` - FileId *uint64 `protobuf:"varint,4,opt,name=fileId,def=0" json:"fileId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompleteRequestProto) Reset() { *m = CompleteRequestProto{} } -func (m *CompleteRequestProto) String() string { return proto.CompactTextString(m) } -func (*CompleteRequestProto) ProtoMessage() {} -func (*CompleteRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{28} } - -const Default_CompleteRequestProto_FileId uint64 = 0 - -func (m *CompleteRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *CompleteRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -func (m *CompleteRequestProto) GetLast() *ExtendedBlockProto { - if m != nil { - return m.Last - } - return nil -} - -func (m *CompleteRequestProto) GetFileId() uint64 { - if m != nil && m.FileId != nil { - return *m.FileId - } - return Default_CompleteRequestProto_FileId -} - -type CompleteResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CompleteResponseProto) Reset() { *m = CompleteResponseProto{} } -func (m *CompleteResponseProto) String() string { return proto.CompactTextString(m) } -func (*CompleteResponseProto) ProtoMessage() {} -func (*CompleteResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{29} } - -func (m *CompleteResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type ReportBadBlocksRequestProto struct { - Blocks []*LocatedBlockProto `protobuf:"bytes,1,rep,name=blocks" json:"blocks,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ReportBadBlocksRequestProto) Reset() { *m = ReportBadBlocksRequestProto{} } -func (m *ReportBadBlocksRequestProto) String() string { return proto.CompactTextString(m) } -func (*ReportBadBlocksRequestProto) ProtoMessage() {} -func (*ReportBadBlocksRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{30} } - -func (m *ReportBadBlocksRequestProto) GetBlocks() []*LocatedBlockProto { - if m != nil { - return m.Blocks - } - return nil -} - -type ReportBadBlocksResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ReportBadBlocksResponseProto) Reset() { *m = ReportBadBlocksResponseProto{} } -func (m *ReportBadBlocksResponseProto) String() string { return proto.CompactTextString(m) } -func (*ReportBadBlocksResponseProto) ProtoMessage() {} -func (*ReportBadBlocksResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{31} } - -type ConcatRequestProto struct { - Trg *string `protobuf:"bytes,1,req,name=trg" json:"trg,omitempty"` - Srcs []string `protobuf:"bytes,2,rep,name=srcs" json:"srcs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConcatRequestProto) Reset() { *m = ConcatRequestProto{} } -func (m *ConcatRequestProto) String() string { return proto.CompactTextString(m) } -func (*ConcatRequestProto) ProtoMessage() {} -func (*ConcatRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{32} } - -func (m *ConcatRequestProto) GetTrg() string { - if m != nil && m.Trg != nil { - return *m.Trg - } - return "" -} - -func (m *ConcatRequestProto) GetSrcs() []string { - if m != nil { - return m.Srcs - } - return nil -} - -type ConcatResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConcatResponseProto) Reset() { *m = ConcatResponseProto{} } -func (m *ConcatResponseProto) String() string { return proto.CompactTextString(m) } -func (*ConcatResponseProto) ProtoMessage() {} -func (*ConcatResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{33} } - -type TruncateRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - NewLength *uint64 `protobuf:"varint,2,req,name=newLength" json:"newLength,omitempty"` - ClientName *string `protobuf:"bytes,3,req,name=clientName" json:"clientName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TruncateRequestProto) Reset() { *m = TruncateRequestProto{} } -func (m *TruncateRequestProto) String() string { return proto.CompactTextString(m) } -func (*TruncateRequestProto) ProtoMessage() {} -func (*TruncateRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{34} } - -func (m *TruncateRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *TruncateRequestProto) GetNewLength() uint64 { - if m != nil && m.NewLength != nil { - return *m.NewLength - } - return 0 -} - -func (m *TruncateRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -type TruncateResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TruncateResponseProto) Reset() { *m = TruncateResponseProto{} } -func (m *TruncateResponseProto) String() string { return proto.CompactTextString(m) } -func (*TruncateResponseProto) ProtoMessage() {} -func (*TruncateResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{35} } - -func (m *TruncateResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type RenameRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Dst *string `protobuf:"bytes,2,req,name=dst" json:"dst,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenameRequestProto) Reset() { *m = RenameRequestProto{} } -func (m *RenameRequestProto) String() string { return proto.CompactTextString(m) } -func (*RenameRequestProto) ProtoMessage() {} -func (*RenameRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{36} } - -func (m *RenameRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *RenameRequestProto) GetDst() string { - if m != nil && m.Dst != nil { - return *m.Dst - } - return "" -} - -type RenameResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenameResponseProto) Reset() { *m = RenameResponseProto{} } -func (m *RenameResponseProto) String() string { return proto.CompactTextString(m) } -func (*RenameResponseProto) ProtoMessage() {} -func (*RenameResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{37} } - -func (m *RenameResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type Rename2RequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Dst *string `protobuf:"bytes,2,req,name=dst" json:"dst,omitempty"` - OverwriteDest *bool `protobuf:"varint,3,req,name=overwriteDest" json:"overwriteDest,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Rename2RequestProto) Reset() { *m = Rename2RequestProto{} } -func (m *Rename2RequestProto) String() string { return proto.CompactTextString(m) } -func (*Rename2RequestProto) ProtoMessage() {} -func (*Rename2RequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{38} } - -func (m *Rename2RequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *Rename2RequestProto) GetDst() string { - if m != nil && m.Dst != nil { - return *m.Dst - } - return "" -} - -func (m *Rename2RequestProto) GetOverwriteDest() bool { - if m != nil && m.OverwriteDest != nil { - return *m.OverwriteDest - } - return false -} - -type Rename2ResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Rename2ResponseProto) Reset() { *m = Rename2ResponseProto{} } -func (m *Rename2ResponseProto) String() string { return proto.CompactTextString(m) } -func (*Rename2ResponseProto) ProtoMessage() {} -func (*Rename2ResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{39} } - -type DeleteRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Recursive *bool `protobuf:"varint,2,req,name=recursive" json:"recursive,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteRequestProto) Reset() { *m = DeleteRequestProto{} } -func (m *DeleteRequestProto) String() string { return proto.CompactTextString(m) } -func (*DeleteRequestProto) ProtoMessage() {} -func (*DeleteRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{40} } - -func (m *DeleteRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *DeleteRequestProto) GetRecursive() bool { - if m != nil && m.Recursive != nil { - return *m.Recursive - } - return false -} - -type DeleteResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteResponseProto) Reset() { *m = DeleteResponseProto{} } -func (m *DeleteResponseProto) String() string { return proto.CompactTextString(m) } -func (*DeleteResponseProto) ProtoMessage() {} -func (*DeleteResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{41} } - -func (m *DeleteResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type MkdirsRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Masked *FsPermissionProto `protobuf:"bytes,2,req,name=masked" json:"masked,omitempty"` - CreateParent *bool `protobuf:"varint,3,req,name=createParent" json:"createParent,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MkdirsRequestProto) Reset() { *m = MkdirsRequestProto{} } -func (m *MkdirsRequestProto) String() string { return proto.CompactTextString(m) } -func (*MkdirsRequestProto) ProtoMessage() {} -func (*MkdirsRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{42} } - -func (m *MkdirsRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *MkdirsRequestProto) GetMasked() *FsPermissionProto { - if m != nil { - return m.Masked - } - return nil -} - -func (m *MkdirsRequestProto) GetCreateParent() bool { - if m != nil && m.CreateParent != nil { - return *m.CreateParent - } - return false -} - -type MkdirsResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MkdirsResponseProto) Reset() { *m = MkdirsResponseProto{} } -func (m *MkdirsResponseProto) String() string { return proto.CompactTextString(m) } -func (*MkdirsResponseProto) ProtoMessage() {} -func (*MkdirsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{43} } - -func (m *MkdirsResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type GetListingRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - StartAfter []byte `protobuf:"bytes,2,req,name=startAfter" json:"startAfter,omitempty"` - NeedLocation *bool `protobuf:"varint,3,req,name=needLocation" json:"needLocation,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetListingRequestProto) Reset() { *m = GetListingRequestProto{} } -func (m *GetListingRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetListingRequestProto) ProtoMessage() {} -func (*GetListingRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{44} } - -func (m *GetListingRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *GetListingRequestProto) GetStartAfter() []byte { - if m != nil { - return m.StartAfter - } - return nil -} - -func (m *GetListingRequestProto) GetNeedLocation() bool { - if m != nil && m.NeedLocation != nil { - return *m.NeedLocation - } - return false -} - -type GetListingResponseProto struct { - DirList *DirectoryListingProto `protobuf:"bytes,1,opt,name=dirList" json:"dirList,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetListingResponseProto) Reset() { *m = GetListingResponseProto{} } -func (m *GetListingResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetListingResponseProto) ProtoMessage() {} -func (*GetListingResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{45} } - -func (m *GetListingResponseProto) GetDirList() *DirectoryListingProto { - if m != nil { - return m.DirList - } - return nil -} - -type GetSnapshottableDirListingRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetSnapshottableDirListingRequestProto) Reset() { - *m = GetSnapshottableDirListingRequestProto{} -} -func (m *GetSnapshottableDirListingRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetSnapshottableDirListingRequestProto) ProtoMessage() {} -func (*GetSnapshottableDirListingRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{46} -} - -type GetSnapshottableDirListingResponseProto struct { - SnapshottableDirList *SnapshottableDirectoryListingProto `protobuf:"bytes,1,opt,name=snapshottableDirList" json:"snapshottableDirList,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetSnapshottableDirListingResponseProto) Reset() { - *m = GetSnapshottableDirListingResponseProto{} -} -func (m *GetSnapshottableDirListingResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetSnapshottableDirListingResponseProto) ProtoMessage() {} -func (*GetSnapshottableDirListingResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{47} -} - -func (m *GetSnapshottableDirListingResponseProto) GetSnapshottableDirList() *SnapshottableDirectoryListingProto { - if m != nil { - return m.SnapshottableDirList - } - return nil -} - -type GetSnapshotDiffReportRequestProto struct { - SnapshotRoot *string `protobuf:"bytes,1,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - FromSnapshot *string `protobuf:"bytes,2,req,name=fromSnapshot" json:"fromSnapshot,omitempty"` - ToSnapshot *string `protobuf:"bytes,3,req,name=toSnapshot" json:"toSnapshot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetSnapshotDiffReportRequestProto) Reset() { *m = GetSnapshotDiffReportRequestProto{} } -func (m *GetSnapshotDiffReportRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetSnapshotDiffReportRequestProto) ProtoMessage() {} -func (*GetSnapshotDiffReportRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{48} -} - -func (m *GetSnapshotDiffReportRequestProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -func (m *GetSnapshotDiffReportRequestProto) GetFromSnapshot() string { - if m != nil && m.FromSnapshot != nil { - return *m.FromSnapshot - } - return "" -} - -func (m *GetSnapshotDiffReportRequestProto) GetToSnapshot() string { - if m != nil && m.ToSnapshot != nil { - return *m.ToSnapshot - } - return "" -} - -type GetSnapshotDiffReportResponseProto struct { - DiffReport *SnapshotDiffReportProto `protobuf:"bytes,1,req,name=diffReport" json:"diffReport,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetSnapshotDiffReportResponseProto) Reset() { *m = GetSnapshotDiffReportResponseProto{} } -func (m *GetSnapshotDiffReportResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetSnapshotDiffReportResponseProto) ProtoMessage() {} -func (*GetSnapshotDiffReportResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{49} -} - -func (m *GetSnapshotDiffReportResponseProto) GetDiffReport() *SnapshotDiffReportProto { - if m != nil { - return m.DiffReport - } - return nil -} - -type RenewLeaseRequestProto struct { - ClientName *string `protobuf:"bytes,1,req,name=clientName" json:"clientName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenewLeaseRequestProto) Reset() { *m = RenewLeaseRequestProto{} } -func (m *RenewLeaseRequestProto) String() string { return proto.CompactTextString(m) } -func (*RenewLeaseRequestProto) ProtoMessage() {} -func (*RenewLeaseRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{50} } - -func (m *RenewLeaseRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -type RenewLeaseResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenewLeaseResponseProto) Reset() { *m = RenewLeaseResponseProto{} } -func (m *RenewLeaseResponseProto) String() string { return proto.CompactTextString(m) } -func (*RenewLeaseResponseProto) ProtoMessage() {} -func (*RenewLeaseResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{51} } - -type RecoverLeaseRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - ClientName *string `protobuf:"bytes,2,req,name=clientName" json:"clientName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RecoverLeaseRequestProto) Reset() { *m = RecoverLeaseRequestProto{} } -func (m *RecoverLeaseRequestProto) String() string { return proto.CompactTextString(m) } -func (*RecoverLeaseRequestProto) ProtoMessage() {} -func (*RecoverLeaseRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{52} } - -func (m *RecoverLeaseRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *RecoverLeaseRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -type RecoverLeaseResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RecoverLeaseResponseProto) Reset() { *m = RecoverLeaseResponseProto{} } -func (m *RecoverLeaseResponseProto) String() string { return proto.CompactTextString(m) } -func (*RecoverLeaseResponseProto) ProtoMessage() {} -func (*RecoverLeaseResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{53} } - -func (m *RecoverLeaseResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type GetFsStatusRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetFsStatusRequestProto) Reset() { *m = GetFsStatusRequestProto{} } -func (m *GetFsStatusRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetFsStatusRequestProto) ProtoMessage() {} -func (*GetFsStatusRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{54} } - -type GetFsStatsResponseProto struct { - Capacity *uint64 `protobuf:"varint,1,req,name=capacity" json:"capacity,omitempty"` - Used *uint64 `protobuf:"varint,2,req,name=used" json:"used,omitempty"` - Remaining *uint64 `protobuf:"varint,3,req,name=remaining" json:"remaining,omitempty"` - UnderReplicated *uint64 `protobuf:"varint,4,req,name=under_replicated,json=underReplicated" json:"under_replicated,omitempty"` - CorruptBlocks *uint64 `protobuf:"varint,5,req,name=corrupt_blocks,json=corruptBlocks" json:"corrupt_blocks,omitempty"` - MissingBlocks *uint64 `protobuf:"varint,6,req,name=missing_blocks,json=missingBlocks" json:"missing_blocks,omitempty"` - MissingReplOneBlocks *uint64 `protobuf:"varint,7,opt,name=missing_repl_one_blocks,json=missingReplOneBlocks" json:"missing_repl_one_blocks,omitempty"` - BlocksInFuture *uint64 `protobuf:"varint,8,opt,name=blocks_in_future,json=blocksInFuture" json:"blocks_in_future,omitempty"` - PendingDeletionBlocks *uint64 `protobuf:"varint,9,opt,name=pending_deletion_blocks,json=pendingDeletionBlocks" json:"pending_deletion_blocks,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetFsStatsResponseProto) Reset() { *m = GetFsStatsResponseProto{} } -func (m *GetFsStatsResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetFsStatsResponseProto) ProtoMessage() {} -func (*GetFsStatsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{55} } - -func (m *GetFsStatsResponseProto) GetCapacity() uint64 { - if m != nil && m.Capacity != nil { - return *m.Capacity - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetUsed() uint64 { - if m != nil && m.Used != nil { - return *m.Used - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetRemaining() uint64 { - if m != nil && m.Remaining != nil { - return *m.Remaining - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetUnderReplicated() uint64 { - if m != nil && m.UnderReplicated != nil { - return *m.UnderReplicated - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetCorruptBlocks() uint64 { - if m != nil && m.CorruptBlocks != nil { - return *m.CorruptBlocks - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetMissingBlocks() uint64 { - if m != nil && m.MissingBlocks != nil { - return *m.MissingBlocks - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetMissingReplOneBlocks() uint64 { - if m != nil && m.MissingReplOneBlocks != nil { - return *m.MissingReplOneBlocks - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetBlocksInFuture() uint64 { - if m != nil && m.BlocksInFuture != nil { - return *m.BlocksInFuture - } - return 0 -} - -func (m *GetFsStatsResponseProto) GetPendingDeletionBlocks() uint64 { - if m != nil && m.PendingDeletionBlocks != nil { - return *m.PendingDeletionBlocks - } - return 0 -} - -type GetDatanodeReportRequestProto struct { - Type *DatanodeReportTypeProto `protobuf:"varint,1,req,name=type,enum=hadoop.hdfs.DatanodeReportTypeProto" json:"type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDatanodeReportRequestProto) Reset() { *m = GetDatanodeReportRequestProto{} } -func (m *GetDatanodeReportRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetDatanodeReportRequestProto) ProtoMessage() {} -func (*GetDatanodeReportRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{56} } - -func (m *GetDatanodeReportRequestProto) GetType() DatanodeReportTypeProto { - if m != nil && m.Type != nil { - return *m.Type - } - return DatanodeReportTypeProto_ALL -} - -type GetDatanodeReportResponseProto struct { - Di []*DatanodeInfoProto `protobuf:"bytes,1,rep,name=di" json:"di,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDatanodeReportResponseProto) Reset() { *m = GetDatanodeReportResponseProto{} } -func (m *GetDatanodeReportResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetDatanodeReportResponseProto) ProtoMessage() {} -func (*GetDatanodeReportResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{57} } - -func (m *GetDatanodeReportResponseProto) GetDi() []*DatanodeInfoProto { - if m != nil { - return m.Di - } - return nil -} - -type GetDatanodeStorageReportRequestProto struct { - Type *DatanodeReportTypeProto `protobuf:"varint,1,req,name=type,enum=hadoop.hdfs.DatanodeReportTypeProto" json:"type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDatanodeStorageReportRequestProto) Reset() { *m = GetDatanodeStorageReportRequestProto{} } -func (m *GetDatanodeStorageReportRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetDatanodeStorageReportRequestProto) ProtoMessage() {} -func (*GetDatanodeStorageReportRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{58} -} - -func (m *GetDatanodeStorageReportRequestProto) GetType() DatanodeReportTypeProto { - if m != nil && m.Type != nil { - return *m.Type - } - return DatanodeReportTypeProto_ALL -} - -type DatanodeStorageReportProto struct { - DatanodeInfo *DatanodeInfoProto `protobuf:"bytes,1,req,name=datanodeInfo" json:"datanodeInfo,omitempty"` - StorageReports []*StorageReportProto `protobuf:"bytes,2,rep,name=storageReports" json:"storageReports,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DatanodeStorageReportProto) Reset() { *m = DatanodeStorageReportProto{} } -func (m *DatanodeStorageReportProto) String() string { return proto.CompactTextString(m) } -func (*DatanodeStorageReportProto) ProtoMessage() {} -func (*DatanodeStorageReportProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{59} } - -func (m *DatanodeStorageReportProto) GetDatanodeInfo() *DatanodeInfoProto { - if m != nil { - return m.DatanodeInfo - } - return nil -} - -func (m *DatanodeStorageReportProto) GetStorageReports() []*StorageReportProto { - if m != nil { - return m.StorageReports - } - return nil -} - -type GetDatanodeStorageReportResponseProto struct { - DatanodeStorageReports []*DatanodeStorageReportProto `protobuf:"bytes,1,rep,name=datanodeStorageReports" json:"datanodeStorageReports,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDatanodeStorageReportResponseProto) Reset() { *m = GetDatanodeStorageReportResponseProto{} } -func (m *GetDatanodeStorageReportResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetDatanodeStorageReportResponseProto) ProtoMessage() {} -func (*GetDatanodeStorageReportResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{60} -} - -func (m *GetDatanodeStorageReportResponseProto) GetDatanodeStorageReports() []*DatanodeStorageReportProto { - if m != nil { - return m.DatanodeStorageReports - } - return nil -} - -type GetPreferredBlockSizeRequestProto struct { - Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPreferredBlockSizeRequestProto) Reset() { *m = GetPreferredBlockSizeRequestProto{} } -func (m *GetPreferredBlockSizeRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetPreferredBlockSizeRequestProto) ProtoMessage() {} -func (*GetPreferredBlockSizeRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{61} -} - -func (m *GetPreferredBlockSizeRequestProto) GetFilename() string { - if m != nil && m.Filename != nil { - return *m.Filename - } - return "" -} - -type GetPreferredBlockSizeResponseProto struct { - Bsize *uint64 `protobuf:"varint,1,req,name=bsize" json:"bsize,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetPreferredBlockSizeResponseProto) Reset() { *m = GetPreferredBlockSizeResponseProto{} } -func (m *GetPreferredBlockSizeResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetPreferredBlockSizeResponseProto) ProtoMessage() {} -func (*GetPreferredBlockSizeResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{62} -} - -func (m *GetPreferredBlockSizeResponseProto) GetBsize() uint64 { - if m != nil && m.Bsize != nil { - return *m.Bsize - } - return 0 -} - -type SetSafeModeRequestProto struct { - Action *SafeModeActionProto `protobuf:"varint,1,req,name=action,enum=hadoop.hdfs.SafeModeActionProto" json:"action,omitempty"` - Checked *bool `protobuf:"varint,2,opt,name=checked,def=0" json:"checked,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetSafeModeRequestProto) Reset() { *m = SetSafeModeRequestProto{} } -func (m *SetSafeModeRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetSafeModeRequestProto) ProtoMessage() {} -func (*SetSafeModeRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{63} } - -const Default_SetSafeModeRequestProto_Checked bool = false - -func (m *SetSafeModeRequestProto) GetAction() SafeModeActionProto { - if m != nil && m.Action != nil { - return *m.Action - } - return SafeModeActionProto_SAFEMODE_LEAVE -} - -func (m *SetSafeModeRequestProto) GetChecked() bool { - if m != nil && m.Checked != nil { - return *m.Checked - } - return Default_SetSafeModeRequestProto_Checked -} - -type SetSafeModeResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetSafeModeResponseProto) Reset() { *m = SetSafeModeResponseProto{} } -func (m *SetSafeModeResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetSafeModeResponseProto) ProtoMessage() {} -func (*SetSafeModeResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{64} } - -func (m *SetSafeModeResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type SaveNamespaceRequestProto struct { - TimeWindow *uint64 `protobuf:"varint,1,opt,name=timeWindow,def=0" json:"timeWindow,omitempty"` - TxGap *uint64 `protobuf:"varint,2,opt,name=txGap,def=0" json:"txGap,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SaveNamespaceRequestProto) Reset() { *m = SaveNamespaceRequestProto{} } -func (m *SaveNamespaceRequestProto) String() string { return proto.CompactTextString(m) } -func (*SaveNamespaceRequestProto) ProtoMessage() {} -func (*SaveNamespaceRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{65} } - -const Default_SaveNamespaceRequestProto_TimeWindow uint64 = 0 -const Default_SaveNamespaceRequestProto_TxGap uint64 = 0 - -func (m *SaveNamespaceRequestProto) GetTimeWindow() uint64 { - if m != nil && m.TimeWindow != nil { - return *m.TimeWindow - } - return Default_SaveNamespaceRequestProto_TimeWindow -} - -func (m *SaveNamespaceRequestProto) GetTxGap() uint64 { - if m != nil && m.TxGap != nil { - return *m.TxGap - } - return Default_SaveNamespaceRequestProto_TxGap -} - -type SaveNamespaceResponseProto struct { - Saved *bool `protobuf:"varint,1,opt,name=saved,def=1" json:"saved,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SaveNamespaceResponseProto) Reset() { *m = SaveNamespaceResponseProto{} } -func (m *SaveNamespaceResponseProto) String() string { return proto.CompactTextString(m) } -func (*SaveNamespaceResponseProto) ProtoMessage() {} -func (*SaveNamespaceResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{66} } - -const Default_SaveNamespaceResponseProto_Saved bool = true - -func (m *SaveNamespaceResponseProto) GetSaved() bool { - if m != nil && m.Saved != nil { - return *m.Saved - } - return Default_SaveNamespaceResponseProto_Saved -} - -type RollEditsRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollEditsRequestProto) Reset() { *m = RollEditsRequestProto{} } -func (m *RollEditsRequestProto) String() string { return proto.CompactTextString(m) } -func (*RollEditsRequestProto) ProtoMessage() {} -func (*RollEditsRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{67} } - -type RollEditsResponseProto struct { - NewSegmentTxId *uint64 `protobuf:"varint,1,req,name=newSegmentTxId" json:"newSegmentTxId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollEditsResponseProto) Reset() { *m = RollEditsResponseProto{} } -func (m *RollEditsResponseProto) String() string { return proto.CompactTextString(m) } -func (*RollEditsResponseProto) ProtoMessage() {} -func (*RollEditsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{68} } - -func (m *RollEditsResponseProto) GetNewSegmentTxId() uint64 { - if m != nil && m.NewSegmentTxId != nil { - return *m.NewSegmentTxId - } - return 0 -} - -type RestoreFailedStorageRequestProto struct { - Arg *string `protobuf:"bytes,1,req,name=arg" json:"arg,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RestoreFailedStorageRequestProto) Reset() { *m = RestoreFailedStorageRequestProto{} } -func (m *RestoreFailedStorageRequestProto) String() string { return proto.CompactTextString(m) } -func (*RestoreFailedStorageRequestProto) ProtoMessage() {} -func (*RestoreFailedStorageRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{69} -} - -func (m *RestoreFailedStorageRequestProto) GetArg() string { - if m != nil && m.Arg != nil { - return *m.Arg - } - return "" -} - -type RestoreFailedStorageResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RestoreFailedStorageResponseProto) Reset() { *m = RestoreFailedStorageResponseProto{} } -func (m *RestoreFailedStorageResponseProto) String() string { return proto.CompactTextString(m) } -func (*RestoreFailedStorageResponseProto) ProtoMessage() {} -func (*RestoreFailedStorageResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{70} -} - -func (m *RestoreFailedStorageResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type RefreshNodesRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshNodesRequestProto) Reset() { *m = RefreshNodesRequestProto{} } -func (m *RefreshNodesRequestProto) String() string { return proto.CompactTextString(m) } -func (*RefreshNodesRequestProto) ProtoMessage() {} -func (*RefreshNodesRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{71} } - -type RefreshNodesResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RefreshNodesResponseProto) Reset() { *m = RefreshNodesResponseProto{} } -func (m *RefreshNodesResponseProto) String() string { return proto.CompactTextString(m) } -func (*RefreshNodesResponseProto) ProtoMessage() {} -func (*RefreshNodesResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{72} } - -type FinalizeUpgradeRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *FinalizeUpgradeRequestProto) Reset() { *m = FinalizeUpgradeRequestProto{} } -func (m *FinalizeUpgradeRequestProto) String() string { return proto.CompactTextString(m) } -func (*FinalizeUpgradeRequestProto) ProtoMessage() {} -func (*FinalizeUpgradeRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{73} } - -type FinalizeUpgradeResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *FinalizeUpgradeResponseProto) Reset() { *m = FinalizeUpgradeResponseProto{} } -func (m *FinalizeUpgradeResponseProto) String() string { return proto.CompactTextString(m) } -func (*FinalizeUpgradeResponseProto) ProtoMessage() {} -func (*FinalizeUpgradeResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{74} } - -type RollingUpgradeRequestProto struct { - Action *RollingUpgradeActionProto `protobuf:"varint,1,req,name=action,enum=hadoop.hdfs.RollingUpgradeActionProto" json:"action,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollingUpgradeRequestProto) Reset() { *m = RollingUpgradeRequestProto{} } -func (m *RollingUpgradeRequestProto) String() string { return proto.CompactTextString(m) } -func (*RollingUpgradeRequestProto) ProtoMessage() {} -func (*RollingUpgradeRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{75} } - -func (m *RollingUpgradeRequestProto) GetAction() RollingUpgradeActionProto { - if m != nil && m.Action != nil { - return *m.Action - } - return RollingUpgradeActionProto_QUERY -} - -type RollingUpgradeInfoProto struct { - Status *RollingUpgradeStatusProto `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - StartTime *uint64 `protobuf:"varint,2,req,name=startTime" json:"startTime,omitempty"` - FinalizeTime *uint64 `protobuf:"varint,3,req,name=finalizeTime" json:"finalizeTime,omitempty"` - CreatedRollbackImages *bool `protobuf:"varint,4,req,name=createdRollbackImages" json:"createdRollbackImages,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollingUpgradeInfoProto) Reset() { *m = RollingUpgradeInfoProto{} } -func (m *RollingUpgradeInfoProto) String() string { return proto.CompactTextString(m) } -func (*RollingUpgradeInfoProto) ProtoMessage() {} -func (*RollingUpgradeInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{76} } - -func (m *RollingUpgradeInfoProto) GetStatus() *RollingUpgradeStatusProto { - if m != nil { - return m.Status - } - return nil -} - -func (m *RollingUpgradeInfoProto) GetStartTime() uint64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RollingUpgradeInfoProto) GetFinalizeTime() uint64 { - if m != nil && m.FinalizeTime != nil { - return *m.FinalizeTime - } - return 0 -} - -func (m *RollingUpgradeInfoProto) GetCreatedRollbackImages() bool { - if m != nil && m.CreatedRollbackImages != nil { - return *m.CreatedRollbackImages - } - return false -} - -type RollingUpgradeResponseProto struct { - RollingUpgradeInfo *RollingUpgradeInfoProto `protobuf:"bytes,1,opt,name=rollingUpgradeInfo" json:"rollingUpgradeInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollingUpgradeResponseProto) Reset() { *m = RollingUpgradeResponseProto{} } -func (m *RollingUpgradeResponseProto) String() string { return proto.CompactTextString(m) } -func (*RollingUpgradeResponseProto) ProtoMessage() {} -func (*RollingUpgradeResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{77} } - -func (m *RollingUpgradeResponseProto) GetRollingUpgradeInfo() *RollingUpgradeInfoProto { - if m != nil { - return m.RollingUpgradeInfo - } - return nil -} - -type ListCorruptFileBlocksRequestProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - Cookie *string `protobuf:"bytes,2,opt,name=cookie" json:"cookie,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListCorruptFileBlocksRequestProto) Reset() { *m = ListCorruptFileBlocksRequestProto{} } -func (m *ListCorruptFileBlocksRequestProto) String() string { return proto.CompactTextString(m) } -func (*ListCorruptFileBlocksRequestProto) ProtoMessage() {} -func (*ListCorruptFileBlocksRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{78} -} - -func (m *ListCorruptFileBlocksRequestProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *ListCorruptFileBlocksRequestProto) GetCookie() string { - if m != nil && m.Cookie != nil { - return *m.Cookie - } - return "" -} - -type ListCorruptFileBlocksResponseProto struct { - Corrupt *CorruptFileBlocksProto `protobuf:"bytes,1,req,name=corrupt" json:"corrupt,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListCorruptFileBlocksResponseProto) Reset() { *m = ListCorruptFileBlocksResponseProto{} } -func (m *ListCorruptFileBlocksResponseProto) String() string { return proto.CompactTextString(m) } -func (*ListCorruptFileBlocksResponseProto) ProtoMessage() {} -func (*ListCorruptFileBlocksResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{79} -} - -func (m *ListCorruptFileBlocksResponseProto) GetCorrupt() *CorruptFileBlocksProto { - if m != nil { - return m.Corrupt - } - return nil -} - -type MetaSaveRequestProto struct { - Filename *string `protobuf:"bytes,1,req,name=filename" json:"filename,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetaSaveRequestProto) Reset() { *m = MetaSaveRequestProto{} } -func (m *MetaSaveRequestProto) String() string { return proto.CompactTextString(m) } -func (*MetaSaveRequestProto) ProtoMessage() {} -func (*MetaSaveRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{80} } - -func (m *MetaSaveRequestProto) GetFilename() string { - if m != nil && m.Filename != nil { - return *m.Filename - } - return "" -} - -type MetaSaveResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetaSaveResponseProto) Reset() { *m = MetaSaveResponseProto{} } -func (m *MetaSaveResponseProto) String() string { return proto.CompactTextString(m) } -func (*MetaSaveResponseProto) ProtoMessage() {} -func (*MetaSaveResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{81} } - -type GetFileInfoRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetFileInfoRequestProto) Reset() { *m = GetFileInfoRequestProto{} } -func (m *GetFileInfoRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetFileInfoRequestProto) ProtoMessage() {} -func (*GetFileInfoRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{82} } - -func (m *GetFileInfoRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type GetFileInfoResponseProto struct { - Fs *HdfsFileStatusProto `protobuf:"bytes,1,opt,name=fs" json:"fs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetFileInfoResponseProto) Reset() { *m = GetFileInfoResponseProto{} } -func (m *GetFileInfoResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetFileInfoResponseProto) ProtoMessage() {} -func (*GetFileInfoResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{83} } - -func (m *GetFileInfoResponseProto) GetFs() *HdfsFileStatusProto { - if m != nil { - return m.Fs - } - return nil -} - -type IsFileClosedRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IsFileClosedRequestProto) Reset() { *m = IsFileClosedRequestProto{} } -func (m *IsFileClosedRequestProto) String() string { return proto.CompactTextString(m) } -func (*IsFileClosedRequestProto) ProtoMessage() {} -func (*IsFileClosedRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{84} } - -func (m *IsFileClosedRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type IsFileClosedResponseProto struct { - Result *bool `protobuf:"varint,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IsFileClosedResponseProto) Reset() { *m = IsFileClosedResponseProto{} } -func (m *IsFileClosedResponseProto) String() string { return proto.CompactTextString(m) } -func (*IsFileClosedResponseProto) ProtoMessage() {} -func (*IsFileClosedResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{85} } - -func (m *IsFileClosedResponseProto) GetResult() bool { - if m != nil && m.Result != nil { - return *m.Result - } - return false -} - -type CacheDirectiveInfoProto struct { - Id *int64 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` - Path *string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` - Replication *uint32 `protobuf:"varint,3,opt,name=replication" json:"replication,omitempty"` - Pool *string `protobuf:"bytes,4,opt,name=pool" json:"pool,omitempty"` - Expiration *CacheDirectiveInfoExpirationProto `protobuf:"bytes,5,opt,name=expiration" json:"expiration,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CacheDirectiveInfoProto) Reset() { *m = CacheDirectiveInfoProto{} } -func (m *CacheDirectiveInfoProto) String() string { return proto.CompactTextString(m) } -func (*CacheDirectiveInfoProto) ProtoMessage() {} -func (*CacheDirectiveInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{86} } - -func (m *CacheDirectiveInfoProto) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CacheDirectiveInfoProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *CacheDirectiveInfoProto) GetReplication() uint32 { - if m != nil && m.Replication != nil { - return *m.Replication - } - return 0 -} - -func (m *CacheDirectiveInfoProto) GetPool() string { - if m != nil && m.Pool != nil { - return *m.Pool - } - return "" -} - -func (m *CacheDirectiveInfoProto) GetExpiration() *CacheDirectiveInfoExpirationProto { - if m != nil { - return m.Expiration - } - return nil -} - -type CacheDirectiveInfoExpirationProto struct { - Millis *int64 `protobuf:"varint,1,req,name=millis" json:"millis,omitempty"` - IsRelative *bool `protobuf:"varint,2,req,name=isRelative" json:"isRelative,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CacheDirectiveInfoExpirationProto) Reset() { *m = CacheDirectiveInfoExpirationProto{} } -func (m *CacheDirectiveInfoExpirationProto) String() string { return proto.CompactTextString(m) } -func (*CacheDirectiveInfoExpirationProto) ProtoMessage() {} -func (*CacheDirectiveInfoExpirationProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{87} -} - -func (m *CacheDirectiveInfoExpirationProto) GetMillis() int64 { - if m != nil && m.Millis != nil { - return *m.Millis - } - return 0 -} - -func (m *CacheDirectiveInfoExpirationProto) GetIsRelative() bool { - if m != nil && m.IsRelative != nil { - return *m.IsRelative - } - return false -} - -type CacheDirectiveStatsProto struct { - BytesNeeded *int64 `protobuf:"varint,1,req,name=bytesNeeded" json:"bytesNeeded,omitempty"` - BytesCached *int64 `protobuf:"varint,2,req,name=bytesCached" json:"bytesCached,omitempty"` - FilesNeeded *int64 `protobuf:"varint,3,req,name=filesNeeded" json:"filesNeeded,omitempty"` - FilesCached *int64 `protobuf:"varint,4,req,name=filesCached" json:"filesCached,omitempty"` - HasExpired *bool `protobuf:"varint,5,req,name=hasExpired" json:"hasExpired,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CacheDirectiveStatsProto) Reset() { *m = CacheDirectiveStatsProto{} } -func (m *CacheDirectiveStatsProto) String() string { return proto.CompactTextString(m) } -func (*CacheDirectiveStatsProto) ProtoMessage() {} -func (*CacheDirectiveStatsProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{88} } - -func (m *CacheDirectiveStatsProto) GetBytesNeeded() int64 { - if m != nil && m.BytesNeeded != nil { - return *m.BytesNeeded - } - return 0 -} - -func (m *CacheDirectiveStatsProto) GetBytesCached() int64 { - if m != nil && m.BytesCached != nil { - return *m.BytesCached - } - return 0 -} - -func (m *CacheDirectiveStatsProto) GetFilesNeeded() int64 { - if m != nil && m.FilesNeeded != nil { - return *m.FilesNeeded - } - return 0 -} - -func (m *CacheDirectiveStatsProto) GetFilesCached() int64 { - if m != nil && m.FilesCached != nil { - return *m.FilesCached - } - return 0 -} - -func (m *CacheDirectiveStatsProto) GetHasExpired() bool { - if m != nil && m.HasExpired != nil { - return *m.HasExpired - } - return false -} - -type AddCacheDirectiveRequestProto struct { - Info *CacheDirectiveInfoProto `protobuf:"bytes,1,req,name=info" json:"info,omitempty"` - CacheFlags *uint32 `protobuf:"varint,2,opt,name=cacheFlags" json:"cacheFlags,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddCacheDirectiveRequestProto) Reset() { *m = AddCacheDirectiveRequestProto{} } -func (m *AddCacheDirectiveRequestProto) String() string { return proto.CompactTextString(m) } -func (*AddCacheDirectiveRequestProto) ProtoMessage() {} -func (*AddCacheDirectiveRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{89} } - -func (m *AddCacheDirectiveRequestProto) GetInfo() *CacheDirectiveInfoProto { - if m != nil { - return m.Info - } - return nil -} - -func (m *AddCacheDirectiveRequestProto) GetCacheFlags() uint32 { - if m != nil && m.CacheFlags != nil { - return *m.CacheFlags - } - return 0 -} - -type AddCacheDirectiveResponseProto struct { - Id *int64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddCacheDirectiveResponseProto) Reset() { *m = AddCacheDirectiveResponseProto{} } -func (m *AddCacheDirectiveResponseProto) String() string { return proto.CompactTextString(m) } -func (*AddCacheDirectiveResponseProto) ProtoMessage() {} -func (*AddCacheDirectiveResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{90} } - -func (m *AddCacheDirectiveResponseProto) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -type ModifyCacheDirectiveRequestProto struct { - Info *CacheDirectiveInfoProto `protobuf:"bytes,1,req,name=info" json:"info,omitempty"` - CacheFlags *uint32 `protobuf:"varint,2,opt,name=cacheFlags" json:"cacheFlags,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModifyCacheDirectiveRequestProto) Reset() { *m = ModifyCacheDirectiveRequestProto{} } -func (m *ModifyCacheDirectiveRequestProto) String() string { return proto.CompactTextString(m) } -func (*ModifyCacheDirectiveRequestProto) ProtoMessage() {} -func (*ModifyCacheDirectiveRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{91} -} - -func (m *ModifyCacheDirectiveRequestProto) GetInfo() *CacheDirectiveInfoProto { - if m != nil { - return m.Info - } - return nil -} - -func (m *ModifyCacheDirectiveRequestProto) GetCacheFlags() uint32 { - if m != nil && m.CacheFlags != nil { - return *m.CacheFlags - } - return 0 -} - -type ModifyCacheDirectiveResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModifyCacheDirectiveResponseProto) Reset() { *m = ModifyCacheDirectiveResponseProto{} } -func (m *ModifyCacheDirectiveResponseProto) String() string { return proto.CompactTextString(m) } -func (*ModifyCacheDirectiveResponseProto) ProtoMessage() {} -func (*ModifyCacheDirectiveResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{92} -} - -type RemoveCacheDirectiveRequestProto struct { - Id *int64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveCacheDirectiveRequestProto) Reset() { *m = RemoveCacheDirectiveRequestProto{} } -func (m *RemoveCacheDirectiveRequestProto) String() string { return proto.CompactTextString(m) } -func (*RemoveCacheDirectiveRequestProto) ProtoMessage() {} -func (*RemoveCacheDirectiveRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{93} -} - -func (m *RemoveCacheDirectiveRequestProto) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -type RemoveCacheDirectiveResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveCacheDirectiveResponseProto) Reset() { *m = RemoveCacheDirectiveResponseProto{} } -func (m *RemoveCacheDirectiveResponseProto) String() string { return proto.CompactTextString(m) } -func (*RemoveCacheDirectiveResponseProto) ProtoMessage() {} -func (*RemoveCacheDirectiveResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{94} -} - -type ListCacheDirectivesRequestProto struct { - PrevId *int64 `protobuf:"varint,1,req,name=prevId" json:"prevId,omitempty"` - Filter *CacheDirectiveInfoProto `protobuf:"bytes,2,req,name=filter" json:"filter,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListCacheDirectivesRequestProto) Reset() { *m = ListCacheDirectivesRequestProto{} } -func (m *ListCacheDirectivesRequestProto) String() string { return proto.CompactTextString(m) } -func (*ListCacheDirectivesRequestProto) ProtoMessage() {} -func (*ListCacheDirectivesRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{95} -} - -func (m *ListCacheDirectivesRequestProto) GetPrevId() int64 { - if m != nil && m.PrevId != nil { - return *m.PrevId - } - return 0 -} - -func (m *ListCacheDirectivesRequestProto) GetFilter() *CacheDirectiveInfoProto { - if m != nil { - return m.Filter - } - return nil -} - -type CacheDirectiveEntryProto struct { - Info *CacheDirectiveInfoProto `protobuf:"bytes,1,req,name=info" json:"info,omitempty"` - Stats *CacheDirectiveStatsProto `protobuf:"bytes,2,req,name=stats" json:"stats,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CacheDirectiveEntryProto) Reset() { *m = CacheDirectiveEntryProto{} } -func (m *CacheDirectiveEntryProto) String() string { return proto.CompactTextString(m) } -func (*CacheDirectiveEntryProto) ProtoMessage() {} -func (*CacheDirectiveEntryProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{96} } - -func (m *CacheDirectiveEntryProto) GetInfo() *CacheDirectiveInfoProto { - if m != nil { - return m.Info - } - return nil -} - -func (m *CacheDirectiveEntryProto) GetStats() *CacheDirectiveStatsProto { - if m != nil { - return m.Stats - } - return nil -} - -type ListCacheDirectivesResponseProto struct { - Elements []*CacheDirectiveEntryProto `protobuf:"bytes,1,rep,name=elements" json:"elements,omitempty"` - HasMore *bool `protobuf:"varint,2,req,name=hasMore" json:"hasMore,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListCacheDirectivesResponseProto) Reset() { *m = ListCacheDirectivesResponseProto{} } -func (m *ListCacheDirectivesResponseProto) String() string { return proto.CompactTextString(m) } -func (*ListCacheDirectivesResponseProto) ProtoMessage() {} -func (*ListCacheDirectivesResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{97} -} - -func (m *ListCacheDirectivesResponseProto) GetElements() []*CacheDirectiveEntryProto { - if m != nil { - return m.Elements - } - return nil -} - -func (m *ListCacheDirectivesResponseProto) GetHasMore() bool { - if m != nil && m.HasMore != nil { - return *m.HasMore - } - return false -} - -type CachePoolInfoProto struct { - PoolName *string `protobuf:"bytes,1,opt,name=poolName" json:"poolName,omitempty"` - OwnerName *string `protobuf:"bytes,2,opt,name=ownerName" json:"ownerName,omitempty"` - GroupName *string `protobuf:"bytes,3,opt,name=groupName" json:"groupName,omitempty"` - Mode *int32 `protobuf:"varint,4,opt,name=mode" json:"mode,omitempty"` - Limit *int64 `protobuf:"varint,5,opt,name=limit" json:"limit,omitempty"` - MaxRelativeExpiry *int64 `protobuf:"varint,6,opt,name=maxRelativeExpiry" json:"maxRelativeExpiry,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CachePoolInfoProto) Reset() { *m = CachePoolInfoProto{} } -func (m *CachePoolInfoProto) String() string { return proto.CompactTextString(m) } -func (*CachePoolInfoProto) ProtoMessage() {} -func (*CachePoolInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{98} } - -func (m *CachePoolInfoProto) GetPoolName() string { - if m != nil && m.PoolName != nil { - return *m.PoolName - } - return "" -} - -func (m *CachePoolInfoProto) GetOwnerName() string { - if m != nil && m.OwnerName != nil { - return *m.OwnerName - } - return "" -} - -func (m *CachePoolInfoProto) GetGroupName() string { - if m != nil && m.GroupName != nil { - return *m.GroupName - } - return "" -} - -func (m *CachePoolInfoProto) GetMode() int32 { - if m != nil && m.Mode != nil { - return *m.Mode - } - return 0 -} - -func (m *CachePoolInfoProto) GetLimit() int64 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CachePoolInfoProto) GetMaxRelativeExpiry() int64 { - if m != nil && m.MaxRelativeExpiry != nil { - return *m.MaxRelativeExpiry - } - return 0 -} - -type CachePoolStatsProto struct { - BytesNeeded *int64 `protobuf:"varint,1,req,name=bytesNeeded" json:"bytesNeeded,omitempty"` - BytesCached *int64 `protobuf:"varint,2,req,name=bytesCached" json:"bytesCached,omitempty"` - BytesOverlimit *int64 `protobuf:"varint,3,req,name=bytesOverlimit" json:"bytesOverlimit,omitempty"` - FilesNeeded *int64 `protobuf:"varint,4,req,name=filesNeeded" json:"filesNeeded,omitempty"` - FilesCached *int64 `protobuf:"varint,5,req,name=filesCached" json:"filesCached,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CachePoolStatsProto) Reset() { *m = CachePoolStatsProto{} } -func (m *CachePoolStatsProto) String() string { return proto.CompactTextString(m) } -func (*CachePoolStatsProto) ProtoMessage() {} -func (*CachePoolStatsProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{99} } - -func (m *CachePoolStatsProto) GetBytesNeeded() int64 { - if m != nil && m.BytesNeeded != nil { - return *m.BytesNeeded - } - return 0 -} - -func (m *CachePoolStatsProto) GetBytesCached() int64 { - if m != nil && m.BytesCached != nil { - return *m.BytesCached - } - return 0 -} - -func (m *CachePoolStatsProto) GetBytesOverlimit() int64 { - if m != nil && m.BytesOverlimit != nil { - return *m.BytesOverlimit - } - return 0 -} - -func (m *CachePoolStatsProto) GetFilesNeeded() int64 { - if m != nil && m.FilesNeeded != nil { - return *m.FilesNeeded - } - return 0 -} - -func (m *CachePoolStatsProto) GetFilesCached() int64 { - if m != nil && m.FilesCached != nil { - return *m.FilesCached - } - return 0 -} - -type AddCachePoolRequestProto struct { - Info *CachePoolInfoProto `protobuf:"bytes,1,req,name=info" json:"info,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddCachePoolRequestProto) Reset() { *m = AddCachePoolRequestProto{} } -func (m *AddCachePoolRequestProto) String() string { return proto.CompactTextString(m) } -func (*AddCachePoolRequestProto) ProtoMessage() {} -func (*AddCachePoolRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{100} } - -func (m *AddCachePoolRequestProto) GetInfo() *CachePoolInfoProto { - if m != nil { - return m.Info - } - return nil -} - -type AddCachePoolResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AddCachePoolResponseProto) Reset() { *m = AddCachePoolResponseProto{} } -func (m *AddCachePoolResponseProto) String() string { return proto.CompactTextString(m) } -func (*AddCachePoolResponseProto) ProtoMessage() {} -func (*AddCachePoolResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{101} } - -type ModifyCachePoolRequestProto struct { - Info *CachePoolInfoProto `protobuf:"bytes,1,req,name=info" json:"info,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModifyCachePoolRequestProto) Reset() { *m = ModifyCachePoolRequestProto{} } -func (m *ModifyCachePoolRequestProto) String() string { return proto.CompactTextString(m) } -func (*ModifyCachePoolRequestProto) ProtoMessage() {} -func (*ModifyCachePoolRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{102} } - -func (m *ModifyCachePoolRequestProto) GetInfo() *CachePoolInfoProto { - if m != nil { - return m.Info - } - return nil -} - -type ModifyCachePoolResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModifyCachePoolResponseProto) Reset() { *m = ModifyCachePoolResponseProto{} } -func (m *ModifyCachePoolResponseProto) String() string { return proto.CompactTextString(m) } -func (*ModifyCachePoolResponseProto) ProtoMessage() {} -func (*ModifyCachePoolResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{103} } - -type RemoveCachePoolRequestProto struct { - PoolName *string `protobuf:"bytes,1,req,name=poolName" json:"poolName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveCachePoolRequestProto) Reset() { *m = RemoveCachePoolRequestProto{} } -func (m *RemoveCachePoolRequestProto) String() string { return proto.CompactTextString(m) } -func (*RemoveCachePoolRequestProto) ProtoMessage() {} -func (*RemoveCachePoolRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{104} } - -func (m *RemoveCachePoolRequestProto) GetPoolName() string { - if m != nil && m.PoolName != nil { - return *m.PoolName - } - return "" -} - -type RemoveCachePoolResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveCachePoolResponseProto) Reset() { *m = RemoveCachePoolResponseProto{} } -func (m *RemoveCachePoolResponseProto) String() string { return proto.CompactTextString(m) } -func (*RemoveCachePoolResponseProto) ProtoMessage() {} -func (*RemoveCachePoolResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{105} } - -type ListCachePoolsRequestProto struct { - PrevPoolName *string `protobuf:"bytes,1,req,name=prevPoolName" json:"prevPoolName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListCachePoolsRequestProto) Reset() { *m = ListCachePoolsRequestProto{} } -func (m *ListCachePoolsRequestProto) String() string { return proto.CompactTextString(m) } -func (*ListCachePoolsRequestProto) ProtoMessage() {} -func (*ListCachePoolsRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{106} } - -func (m *ListCachePoolsRequestProto) GetPrevPoolName() string { - if m != nil && m.PrevPoolName != nil { - return *m.PrevPoolName - } - return "" -} - -type ListCachePoolsResponseProto struct { - Entries []*CachePoolEntryProto `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - HasMore *bool `protobuf:"varint,2,req,name=hasMore" json:"hasMore,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListCachePoolsResponseProto) Reset() { *m = ListCachePoolsResponseProto{} } -func (m *ListCachePoolsResponseProto) String() string { return proto.CompactTextString(m) } -func (*ListCachePoolsResponseProto) ProtoMessage() {} -func (*ListCachePoolsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{107} } - -func (m *ListCachePoolsResponseProto) GetEntries() []*CachePoolEntryProto { - if m != nil { - return m.Entries - } - return nil -} - -func (m *ListCachePoolsResponseProto) GetHasMore() bool { - if m != nil && m.HasMore != nil { - return *m.HasMore - } - return false -} - -type CachePoolEntryProto struct { - Info *CachePoolInfoProto `protobuf:"bytes,1,req,name=info" json:"info,omitempty"` - Stats *CachePoolStatsProto `protobuf:"bytes,2,req,name=stats" json:"stats,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CachePoolEntryProto) Reset() { *m = CachePoolEntryProto{} } -func (m *CachePoolEntryProto) String() string { return proto.CompactTextString(m) } -func (*CachePoolEntryProto) ProtoMessage() {} -func (*CachePoolEntryProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{108} } - -func (m *CachePoolEntryProto) GetInfo() *CachePoolInfoProto { - if m != nil { - return m.Info - } - return nil -} - -func (m *CachePoolEntryProto) GetStats() *CachePoolStatsProto { - if m != nil { - return m.Stats - } - return nil -} - -type GetFileLinkInfoRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetFileLinkInfoRequestProto) Reset() { *m = GetFileLinkInfoRequestProto{} } -func (m *GetFileLinkInfoRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetFileLinkInfoRequestProto) ProtoMessage() {} -func (*GetFileLinkInfoRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{109} } - -func (m *GetFileLinkInfoRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type GetFileLinkInfoResponseProto struct { - Fs *HdfsFileStatusProto `protobuf:"bytes,1,opt,name=fs" json:"fs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetFileLinkInfoResponseProto) Reset() { *m = GetFileLinkInfoResponseProto{} } -func (m *GetFileLinkInfoResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetFileLinkInfoResponseProto) ProtoMessage() {} -func (*GetFileLinkInfoResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{110} } - -func (m *GetFileLinkInfoResponseProto) GetFs() *HdfsFileStatusProto { - if m != nil { - return m.Fs - } - return nil -} - -type GetContentSummaryRequestProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetContentSummaryRequestProto) Reset() { *m = GetContentSummaryRequestProto{} } -func (m *GetContentSummaryRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetContentSummaryRequestProto) ProtoMessage() {} -func (*GetContentSummaryRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{111} } - -func (m *GetContentSummaryRequestProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -type GetContentSummaryResponseProto struct { - Summary *ContentSummaryProto `protobuf:"bytes,1,req,name=summary" json:"summary,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetContentSummaryResponseProto) Reset() { *m = GetContentSummaryResponseProto{} } -func (m *GetContentSummaryResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetContentSummaryResponseProto) ProtoMessage() {} -func (*GetContentSummaryResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{112} -} - -func (m *GetContentSummaryResponseProto) GetSummary() *ContentSummaryProto { - if m != nil { - return m.Summary - } - return nil -} - -type GetQuotaUsageRequestProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetQuotaUsageRequestProto) Reset() { *m = GetQuotaUsageRequestProto{} } -func (m *GetQuotaUsageRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetQuotaUsageRequestProto) ProtoMessage() {} -func (*GetQuotaUsageRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{113} } - -func (m *GetQuotaUsageRequestProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -type GetQuotaUsageResponseProto struct { - Usage *QuotaUsageProto `protobuf:"bytes,1,req,name=usage" json:"usage,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetQuotaUsageResponseProto) Reset() { *m = GetQuotaUsageResponseProto{} } -func (m *GetQuotaUsageResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetQuotaUsageResponseProto) ProtoMessage() {} -func (*GetQuotaUsageResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{114} } - -func (m *GetQuotaUsageResponseProto) GetUsage() *QuotaUsageProto { - if m != nil { - return m.Usage - } - return nil -} - -type SetQuotaRequestProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - NamespaceQuota *uint64 `protobuf:"varint,2,req,name=namespaceQuota" json:"namespaceQuota,omitempty"` - StoragespaceQuota *uint64 `protobuf:"varint,3,req,name=storagespaceQuota" json:"storagespaceQuota,omitempty"` - StorageType *StorageTypeProto `protobuf:"varint,4,opt,name=storageType,enum=hadoop.hdfs.StorageTypeProto" json:"storageType,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetQuotaRequestProto) Reset() { *m = SetQuotaRequestProto{} } -func (m *SetQuotaRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetQuotaRequestProto) ProtoMessage() {} -func (*SetQuotaRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{115} } - -func (m *SetQuotaRequestProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *SetQuotaRequestProto) GetNamespaceQuota() uint64 { - if m != nil && m.NamespaceQuota != nil { - return *m.NamespaceQuota - } - return 0 -} - -func (m *SetQuotaRequestProto) GetStoragespaceQuota() uint64 { - if m != nil && m.StoragespaceQuota != nil { - return *m.StoragespaceQuota - } - return 0 -} - -func (m *SetQuotaRequestProto) GetStorageType() StorageTypeProto { - if m != nil && m.StorageType != nil { - return *m.StorageType - } - return StorageTypeProto_DISK -} - -type SetQuotaResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetQuotaResponseProto) Reset() { *m = SetQuotaResponseProto{} } -func (m *SetQuotaResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetQuotaResponseProto) ProtoMessage() {} -func (*SetQuotaResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{116} } - -type FsyncRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Client *string `protobuf:"bytes,2,req,name=client" json:"client,omitempty"` - LastBlockLength *int64 `protobuf:"zigzag64,3,opt,name=lastBlockLength,def=-1" json:"lastBlockLength,omitempty"` - FileId *uint64 `protobuf:"varint,4,opt,name=fileId,def=0" json:"fileId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FsyncRequestProto) Reset() { *m = FsyncRequestProto{} } -func (m *FsyncRequestProto) String() string { return proto.CompactTextString(m) } -func (*FsyncRequestProto) ProtoMessage() {} -func (*FsyncRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{117} } - -const Default_FsyncRequestProto_LastBlockLength int64 = -1 -const Default_FsyncRequestProto_FileId uint64 = 0 - -func (m *FsyncRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *FsyncRequestProto) GetClient() string { - if m != nil && m.Client != nil { - return *m.Client - } - return "" -} - -func (m *FsyncRequestProto) GetLastBlockLength() int64 { - if m != nil && m.LastBlockLength != nil { - return *m.LastBlockLength - } - return Default_FsyncRequestProto_LastBlockLength -} - -func (m *FsyncRequestProto) GetFileId() uint64 { - if m != nil && m.FileId != nil { - return *m.FileId - } - return Default_FsyncRequestProto_FileId -} - -type FsyncResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *FsyncResponseProto) Reset() { *m = FsyncResponseProto{} } -func (m *FsyncResponseProto) String() string { return proto.CompactTextString(m) } -func (*FsyncResponseProto) ProtoMessage() {} -func (*FsyncResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{118} } - -type SetTimesRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - Mtime *uint64 `protobuf:"varint,2,req,name=mtime" json:"mtime,omitempty"` - Atime *uint64 `protobuf:"varint,3,req,name=atime" json:"atime,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetTimesRequestProto) Reset() { *m = SetTimesRequestProto{} } -func (m *SetTimesRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetTimesRequestProto) ProtoMessage() {} -func (*SetTimesRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{119} } - -func (m *SetTimesRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetTimesRequestProto) GetMtime() uint64 { - if m != nil && m.Mtime != nil { - return *m.Mtime - } - return 0 -} - -func (m *SetTimesRequestProto) GetAtime() uint64 { - if m != nil && m.Atime != nil { - return *m.Atime - } - return 0 -} - -type SetTimesResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetTimesResponseProto) Reset() { *m = SetTimesResponseProto{} } -func (m *SetTimesResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetTimesResponseProto) ProtoMessage() {} -func (*SetTimesResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{120} } - -type CreateSymlinkRequestProto struct { - Target *string `protobuf:"bytes,1,req,name=target" json:"target,omitempty"` - Link *string `protobuf:"bytes,2,req,name=link" json:"link,omitempty"` - DirPerm *FsPermissionProto `protobuf:"bytes,3,req,name=dirPerm" json:"dirPerm,omitempty"` - CreateParent *bool `protobuf:"varint,4,req,name=createParent" json:"createParent,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateSymlinkRequestProto) Reset() { *m = CreateSymlinkRequestProto{} } -func (m *CreateSymlinkRequestProto) String() string { return proto.CompactTextString(m) } -func (*CreateSymlinkRequestProto) ProtoMessage() {} -func (*CreateSymlinkRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{121} } - -func (m *CreateSymlinkRequestProto) GetTarget() string { - if m != nil && m.Target != nil { - return *m.Target - } - return "" -} - -func (m *CreateSymlinkRequestProto) GetLink() string { - if m != nil && m.Link != nil { - return *m.Link - } - return "" -} - -func (m *CreateSymlinkRequestProto) GetDirPerm() *FsPermissionProto { - if m != nil { - return m.DirPerm - } - return nil -} - -func (m *CreateSymlinkRequestProto) GetCreateParent() bool { - if m != nil && m.CreateParent != nil { - return *m.CreateParent - } - return false -} - -type CreateSymlinkResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateSymlinkResponseProto) Reset() { *m = CreateSymlinkResponseProto{} } -func (m *CreateSymlinkResponseProto) String() string { return proto.CompactTextString(m) } -func (*CreateSymlinkResponseProto) ProtoMessage() {} -func (*CreateSymlinkResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{122} } - -type GetLinkTargetRequestProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetLinkTargetRequestProto) Reset() { *m = GetLinkTargetRequestProto{} } -func (m *GetLinkTargetRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetLinkTargetRequestProto) ProtoMessage() {} -func (*GetLinkTargetRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{123} } - -func (m *GetLinkTargetRequestProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -type GetLinkTargetResponseProto struct { - TargetPath *string `protobuf:"bytes,1,opt,name=targetPath" json:"targetPath,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetLinkTargetResponseProto) Reset() { *m = GetLinkTargetResponseProto{} } -func (m *GetLinkTargetResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetLinkTargetResponseProto) ProtoMessage() {} -func (*GetLinkTargetResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{124} } - -func (m *GetLinkTargetResponseProto) GetTargetPath() string { - if m != nil && m.TargetPath != nil { - return *m.TargetPath - } - return "" -} - -type UpdateBlockForPipelineRequestProto struct { - Block *ExtendedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - ClientName *string `protobuf:"bytes,2,req,name=clientName" json:"clientName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UpdateBlockForPipelineRequestProto) Reset() { *m = UpdateBlockForPipelineRequestProto{} } -func (m *UpdateBlockForPipelineRequestProto) String() string { return proto.CompactTextString(m) } -func (*UpdateBlockForPipelineRequestProto) ProtoMessage() {} -func (*UpdateBlockForPipelineRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{125} -} - -func (m *UpdateBlockForPipelineRequestProto) GetBlock() *ExtendedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -func (m *UpdateBlockForPipelineRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -type UpdateBlockForPipelineResponseProto struct { - Block *LocatedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UpdateBlockForPipelineResponseProto) Reset() { *m = UpdateBlockForPipelineResponseProto{} } -func (m *UpdateBlockForPipelineResponseProto) String() string { return proto.CompactTextString(m) } -func (*UpdateBlockForPipelineResponseProto) ProtoMessage() {} -func (*UpdateBlockForPipelineResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{126} -} - -func (m *UpdateBlockForPipelineResponseProto) GetBlock() *LocatedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -type UpdatePipelineRequestProto struct { - ClientName *string `protobuf:"bytes,1,req,name=clientName" json:"clientName,omitempty"` - OldBlock *ExtendedBlockProto `protobuf:"bytes,2,req,name=oldBlock" json:"oldBlock,omitempty"` - NewBlock *ExtendedBlockProto `protobuf:"bytes,3,req,name=newBlock" json:"newBlock,omitempty"` - NewNodes []*DatanodeIDProto `protobuf:"bytes,4,rep,name=newNodes" json:"newNodes,omitempty"` - StorageIDs []string `protobuf:"bytes,5,rep,name=storageIDs" json:"storageIDs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UpdatePipelineRequestProto) Reset() { *m = UpdatePipelineRequestProto{} } -func (m *UpdatePipelineRequestProto) String() string { return proto.CompactTextString(m) } -func (*UpdatePipelineRequestProto) ProtoMessage() {} -func (*UpdatePipelineRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{127} } - -func (m *UpdatePipelineRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -func (m *UpdatePipelineRequestProto) GetOldBlock() *ExtendedBlockProto { - if m != nil { - return m.OldBlock - } - return nil -} - -func (m *UpdatePipelineRequestProto) GetNewBlock() *ExtendedBlockProto { - if m != nil { - return m.NewBlock - } - return nil -} - -func (m *UpdatePipelineRequestProto) GetNewNodes() []*DatanodeIDProto { - if m != nil { - return m.NewNodes - } - return nil -} - -func (m *UpdatePipelineRequestProto) GetStorageIDs() []string { - if m != nil { - return m.StorageIDs - } - return nil -} - -type UpdatePipelineResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *UpdatePipelineResponseProto) Reset() { *m = UpdatePipelineResponseProto{} } -func (m *UpdatePipelineResponseProto) String() string { return proto.CompactTextString(m) } -func (*UpdatePipelineResponseProto) ProtoMessage() {} -func (*UpdatePipelineResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{128} } - -type SetBalancerBandwidthRequestProto struct { - Bandwidth *int64 `protobuf:"varint,1,req,name=bandwidth" json:"bandwidth,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetBalancerBandwidthRequestProto) Reset() { *m = SetBalancerBandwidthRequestProto{} } -func (m *SetBalancerBandwidthRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetBalancerBandwidthRequestProto) ProtoMessage() {} -func (*SetBalancerBandwidthRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{129} -} - -func (m *SetBalancerBandwidthRequestProto) GetBandwidth() int64 { - if m != nil && m.Bandwidth != nil { - return *m.Bandwidth - } - return 0 -} - -type SetBalancerBandwidthResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetBalancerBandwidthResponseProto) Reset() { *m = SetBalancerBandwidthResponseProto{} } -func (m *SetBalancerBandwidthResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetBalancerBandwidthResponseProto) ProtoMessage() {} -func (*SetBalancerBandwidthResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{130} -} - -type GetDataEncryptionKeyRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDataEncryptionKeyRequestProto) Reset() { *m = GetDataEncryptionKeyRequestProto{} } -func (m *GetDataEncryptionKeyRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetDataEncryptionKeyRequestProto) ProtoMessage() {} -func (*GetDataEncryptionKeyRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{131} -} - -type GetDataEncryptionKeyResponseProto struct { - DataEncryptionKey *DataEncryptionKeyProto `protobuf:"bytes,1,opt,name=dataEncryptionKey" json:"dataEncryptionKey,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetDataEncryptionKeyResponseProto) Reset() { *m = GetDataEncryptionKeyResponseProto{} } -func (m *GetDataEncryptionKeyResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetDataEncryptionKeyResponseProto) ProtoMessage() {} -func (*GetDataEncryptionKeyResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{132} -} - -func (m *GetDataEncryptionKeyResponseProto) GetDataEncryptionKey() *DataEncryptionKeyProto { - if m != nil { - return m.DataEncryptionKey - } - return nil -} - -type CreateSnapshotRequestProto struct { - SnapshotRoot *string `protobuf:"bytes,1,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - SnapshotName *string `protobuf:"bytes,2,opt,name=snapshotName" json:"snapshotName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateSnapshotRequestProto) Reset() { *m = CreateSnapshotRequestProto{} } -func (m *CreateSnapshotRequestProto) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotRequestProto) ProtoMessage() {} -func (*CreateSnapshotRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{133} } - -func (m *CreateSnapshotRequestProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -func (m *CreateSnapshotRequestProto) GetSnapshotName() string { - if m != nil && m.SnapshotName != nil { - return *m.SnapshotName - } - return "" -} - -type CreateSnapshotResponseProto struct { - SnapshotPath *string `protobuf:"bytes,1,req,name=snapshotPath" json:"snapshotPath,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateSnapshotResponseProto) Reset() { *m = CreateSnapshotResponseProto{} } -func (m *CreateSnapshotResponseProto) String() string { return proto.CompactTextString(m) } -func (*CreateSnapshotResponseProto) ProtoMessage() {} -func (*CreateSnapshotResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{134} } - -func (m *CreateSnapshotResponseProto) GetSnapshotPath() string { - if m != nil && m.SnapshotPath != nil { - return *m.SnapshotPath - } - return "" -} - -type RenameSnapshotRequestProto struct { - SnapshotRoot *string `protobuf:"bytes,1,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - SnapshotOldName *string `protobuf:"bytes,2,req,name=snapshotOldName" json:"snapshotOldName,omitempty"` - SnapshotNewName *string `protobuf:"bytes,3,req,name=snapshotNewName" json:"snapshotNewName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenameSnapshotRequestProto) Reset() { *m = RenameSnapshotRequestProto{} } -func (m *RenameSnapshotRequestProto) String() string { return proto.CompactTextString(m) } -func (*RenameSnapshotRequestProto) ProtoMessage() {} -func (*RenameSnapshotRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{135} } - -func (m *RenameSnapshotRequestProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -func (m *RenameSnapshotRequestProto) GetSnapshotOldName() string { - if m != nil && m.SnapshotOldName != nil { - return *m.SnapshotOldName - } - return "" -} - -func (m *RenameSnapshotRequestProto) GetSnapshotNewName() string { - if m != nil && m.SnapshotNewName != nil { - return *m.SnapshotNewName - } - return "" -} - -type RenameSnapshotResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenameSnapshotResponseProto) Reset() { *m = RenameSnapshotResponseProto{} } -func (m *RenameSnapshotResponseProto) String() string { return proto.CompactTextString(m) } -func (*RenameSnapshotResponseProto) ProtoMessage() {} -func (*RenameSnapshotResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{136} } - -type AllowSnapshotRequestProto struct { - SnapshotRoot *string `protobuf:"bytes,1,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllowSnapshotRequestProto) Reset() { *m = AllowSnapshotRequestProto{} } -func (m *AllowSnapshotRequestProto) String() string { return proto.CompactTextString(m) } -func (*AllowSnapshotRequestProto) ProtoMessage() {} -func (*AllowSnapshotRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{137} } - -func (m *AllowSnapshotRequestProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -type AllowSnapshotResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *AllowSnapshotResponseProto) Reset() { *m = AllowSnapshotResponseProto{} } -func (m *AllowSnapshotResponseProto) String() string { return proto.CompactTextString(m) } -func (*AllowSnapshotResponseProto) ProtoMessage() {} -func (*AllowSnapshotResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{138} } - -type DisallowSnapshotRequestProto struct { - SnapshotRoot *string `protobuf:"bytes,1,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DisallowSnapshotRequestProto) Reset() { *m = DisallowSnapshotRequestProto{} } -func (m *DisallowSnapshotRequestProto) String() string { return proto.CompactTextString(m) } -func (*DisallowSnapshotRequestProto) ProtoMessage() {} -func (*DisallowSnapshotRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{139} } - -func (m *DisallowSnapshotRequestProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -type DisallowSnapshotResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *DisallowSnapshotResponseProto) Reset() { *m = DisallowSnapshotResponseProto{} } -func (m *DisallowSnapshotResponseProto) String() string { return proto.CompactTextString(m) } -func (*DisallowSnapshotResponseProto) ProtoMessage() {} -func (*DisallowSnapshotResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{140} } - -type DeleteSnapshotRequestProto struct { - SnapshotRoot *string `protobuf:"bytes,1,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - SnapshotName *string `protobuf:"bytes,2,req,name=snapshotName" json:"snapshotName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteSnapshotRequestProto) Reset() { *m = DeleteSnapshotRequestProto{} } -func (m *DeleteSnapshotRequestProto) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotRequestProto) ProtoMessage() {} -func (*DeleteSnapshotRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{141} } - -func (m *DeleteSnapshotRequestProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -func (m *DeleteSnapshotRequestProto) GetSnapshotName() string { - if m != nil && m.SnapshotName != nil { - return *m.SnapshotName - } - return "" -} - -type DeleteSnapshotResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteSnapshotResponseProto) Reset() { *m = DeleteSnapshotResponseProto{} } -func (m *DeleteSnapshotResponseProto) String() string { return proto.CompactTextString(m) } -func (*DeleteSnapshotResponseProto) ProtoMessage() {} -func (*DeleteSnapshotResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{142} } - -type CheckAccessRequestProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - Mode *AclEntryProto_FsActionProto `protobuf:"varint,2,req,name=mode,enum=hadoop.hdfs.AclEntryProto_FsActionProto" json:"mode,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CheckAccessRequestProto) Reset() { *m = CheckAccessRequestProto{} } -func (m *CheckAccessRequestProto) String() string { return proto.CompactTextString(m) } -func (*CheckAccessRequestProto) ProtoMessage() {} -func (*CheckAccessRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{143} } - -func (m *CheckAccessRequestProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *CheckAccessRequestProto) GetMode() AclEntryProto_FsActionProto { - if m != nil && m.Mode != nil { - return *m.Mode - } - return AclEntryProto_NONE -} - -type CheckAccessResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *CheckAccessResponseProto) Reset() { *m = CheckAccessResponseProto{} } -func (m *CheckAccessResponseProto) String() string { return proto.CompactTextString(m) } -func (*CheckAccessResponseProto) ProtoMessage() {} -func (*CheckAccessResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{144} } - -type GetCurrentEditLogTxidRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetCurrentEditLogTxidRequestProto) Reset() { *m = GetCurrentEditLogTxidRequestProto{} } -func (m *GetCurrentEditLogTxidRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetCurrentEditLogTxidRequestProto) ProtoMessage() {} -func (*GetCurrentEditLogTxidRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{145} -} - -type GetCurrentEditLogTxidResponseProto struct { - Txid *int64 `protobuf:"varint,1,req,name=txid" json:"txid,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetCurrentEditLogTxidResponseProto) Reset() { *m = GetCurrentEditLogTxidResponseProto{} } -func (m *GetCurrentEditLogTxidResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetCurrentEditLogTxidResponseProto) ProtoMessage() {} -func (*GetCurrentEditLogTxidResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor4, []int{146} -} - -func (m *GetCurrentEditLogTxidResponseProto) GetTxid() int64 { - if m != nil && m.Txid != nil { - return *m.Txid - } - return 0 -} - -type GetEditsFromTxidRequestProto struct { - Txid *int64 `protobuf:"varint,1,req,name=txid" json:"txid,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetEditsFromTxidRequestProto) Reset() { *m = GetEditsFromTxidRequestProto{} } -func (m *GetEditsFromTxidRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetEditsFromTxidRequestProto) ProtoMessage() {} -func (*GetEditsFromTxidRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{147} } - -func (m *GetEditsFromTxidRequestProto) GetTxid() int64 { - if m != nil && m.Txid != nil { - return *m.Txid - } - return 0 -} - -type GetEditsFromTxidResponseProto struct { - EventsList *EventsListProto `protobuf:"bytes,1,req,name=eventsList" json:"eventsList,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetEditsFromTxidResponseProto) Reset() { *m = GetEditsFromTxidResponseProto{} } -func (m *GetEditsFromTxidResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetEditsFromTxidResponseProto) ProtoMessage() {} -func (*GetEditsFromTxidResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{148} } - -func (m *GetEditsFromTxidResponseProto) GetEventsList() *EventsListProto { - if m != nil { - return m.EventsList - } - return nil -} - -func init() { - proto.RegisterType((*GetBlockLocationsRequestProto)(nil), "hadoop.hdfs.GetBlockLocationsRequestProto") - proto.RegisterType((*GetBlockLocationsResponseProto)(nil), "hadoop.hdfs.GetBlockLocationsResponseProto") - proto.RegisterType((*GetServerDefaultsRequestProto)(nil), "hadoop.hdfs.GetServerDefaultsRequestProto") - proto.RegisterType((*GetServerDefaultsResponseProto)(nil), "hadoop.hdfs.GetServerDefaultsResponseProto") - proto.RegisterType((*CreateRequestProto)(nil), "hadoop.hdfs.CreateRequestProto") - proto.RegisterType((*CreateResponseProto)(nil), "hadoop.hdfs.CreateResponseProto") - proto.RegisterType((*AppendRequestProto)(nil), "hadoop.hdfs.AppendRequestProto") - proto.RegisterType((*AppendResponseProto)(nil), "hadoop.hdfs.AppendResponseProto") - proto.RegisterType((*SetReplicationRequestProto)(nil), "hadoop.hdfs.SetReplicationRequestProto") - proto.RegisterType((*SetReplicationResponseProto)(nil), "hadoop.hdfs.SetReplicationResponseProto") - proto.RegisterType((*SetStoragePolicyRequestProto)(nil), "hadoop.hdfs.SetStoragePolicyRequestProto") - proto.RegisterType((*SetStoragePolicyResponseProto)(nil), "hadoop.hdfs.SetStoragePolicyResponseProto") - proto.RegisterType((*UnsetStoragePolicyRequestProto)(nil), "hadoop.hdfs.UnsetStoragePolicyRequestProto") - proto.RegisterType((*UnsetStoragePolicyResponseProto)(nil), "hadoop.hdfs.UnsetStoragePolicyResponseProto") - proto.RegisterType((*GetStoragePolicyRequestProto)(nil), "hadoop.hdfs.GetStoragePolicyRequestProto") - proto.RegisterType((*GetStoragePolicyResponseProto)(nil), "hadoop.hdfs.GetStoragePolicyResponseProto") - proto.RegisterType((*GetStoragePoliciesRequestProto)(nil), "hadoop.hdfs.GetStoragePoliciesRequestProto") - proto.RegisterType((*GetStoragePoliciesResponseProto)(nil), "hadoop.hdfs.GetStoragePoliciesResponseProto") - proto.RegisterType((*SetPermissionRequestProto)(nil), "hadoop.hdfs.SetPermissionRequestProto") - proto.RegisterType((*SetPermissionResponseProto)(nil), "hadoop.hdfs.SetPermissionResponseProto") - proto.RegisterType((*SetOwnerRequestProto)(nil), "hadoop.hdfs.SetOwnerRequestProto") - proto.RegisterType((*SetOwnerResponseProto)(nil), "hadoop.hdfs.SetOwnerResponseProto") - proto.RegisterType((*AbandonBlockRequestProto)(nil), "hadoop.hdfs.AbandonBlockRequestProto") - proto.RegisterType((*AbandonBlockResponseProto)(nil), "hadoop.hdfs.AbandonBlockResponseProto") - proto.RegisterType((*AddBlockRequestProto)(nil), "hadoop.hdfs.AddBlockRequestProto") - proto.RegisterType((*AddBlockResponseProto)(nil), "hadoop.hdfs.AddBlockResponseProto") - proto.RegisterType((*GetAdditionalDatanodeRequestProto)(nil), "hadoop.hdfs.GetAdditionalDatanodeRequestProto") - proto.RegisterType((*GetAdditionalDatanodeResponseProto)(nil), "hadoop.hdfs.GetAdditionalDatanodeResponseProto") - proto.RegisterType((*CompleteRequestProto)(nil), "hadoop.hdfs.CompleteRequestProto") - proto.RegisterType((*CompleteResponseProto)(nil), "hadoop.hdfs.CompleteResponseProto") - proto.RegisterType((*ReportBadBlocksRequestProto)(nil), "hadoop.hdfs.ReportBadBlocksRequestProto") - proto.RegisterType((*ReportBadBlocksResponseProto)(nil), "hadoop.hdfs.ReportBadBlocksResponseProto") - proto.RegisterType((*ConcatRequestProto)(nil), "hadoop.hdfs.ConcatRequestProto") - proto.RegisterType((*ConcatResponseProto)(nil), "hadoop.hdfs.ConcatResponseProto") - proto.RegisterType((*TruncateRequestProto)(nil), "hadoop.hdfs.TruncateRequestProto") - proto.RegisterType((*TruncateResponseProto)(nil), "hadoop.hdfs.TruncateResponseProto") - proto.RegisterType((*RenameRequestProto)(nil), "hadoop.hdfs.RenameRequestProto") - proto.RegisterType((*RenameResponseProto)(nil), "hadoop.hdfs.RenameResponseProto") - proto.RegisterType((*Rename2RequestProto)(nil), "hadoop.hdfs.Rename2RequestProto") - proto.RegisterType((*Rename2ResponseProto)(nil), "hadoop.hdfs.Rename2ResponseProto") - proto.RegisterType((*DeleteRequestProto)(nil), "hadoop.hdfs.DeleteRequestProto") - proto.RegisterType((*DeleteResponseProto)(nil), "hadoop.hdfs.DeleteResponseProto") - proto.RegisterType((*MkdirsRequestProto)(nil), "hadoop.hdfs.MkdirsRequestProto") - proto.RegisterType((*MkdirsResponseProto)(nil), "hadoop.hdfs.MkdirsResponseProto") - proto.RegisterType((*GetListingRequestProto)(nil), "hadoop.hdfs.GetListingRequestProto") - proto.RegisterType((*GetListingResponseProto)(nil), "hadoop.hdfs.GetListingResponseProto") - proto.RegisterType((*GetSnapshottableDirListingRequestProto)(nil), "hadoop.hdfs.GetSnapshottableDirListingRequestProto") - proto.RegisterType((*GetSnapshottableDirListingResponseProto)(nil), "hadoop.hdfs.GetSnapshottableDirListingResponseProto") - proto.RegisterType((*GetSnapshotDiffReportRequestProto)(nil), "hadoop.hdfs.GetSnapshotDiffReportRequestProto") - proto.RegisterType((*GetSnapshotDiffReportResponseProto)(nil), "hadoop.hdfs.GetSnapshotDiffReportResponseProto") - proto.RegisterType((*RenewLeaseRequestProto)(nil), "hadoop.hdfs.RenewLeaseRequestProto") - proto.RegisterType((*RenewLeaseResponseProto)(nil), "hadoop.hdfs.RenewLeaseResponseProto") - proto.RegisterType((*RecoverLeaseRequestProto)(nil), "hadoop.hdfs.RecoverLeaseRequestProto") - proto.RegisterType((*RecoverLeaseResponseProto)(nil), "hadoop.hdfs.RecoverLeaseResponseProto") - proto.RegisterType((*GetFsStatusRequestProto)(nil), "hadoop.hdfs.GetFsStatusRequestProto") - proto.RegisterType((*GetFsStatsResponseProto)(nil), "hadoop.hdfs.GetFsStatsResponseProto") - proto.RegisterType((*GetDatanodeReportRequestProto)(nil), "hadoop.hdfs.GetDatanodeReportRequestProto") - proto.RegisterType((*GetDatanodeReportResponseProto)(nil), "hadoop.hdfs.GetDatanodeReportResponseProto") - proto.RegisterType((*GetDatanodeStorageReportRequestProto)(nil), "hadoop.hdfs.GetDatanodeStorageReportRequestProto") - proto.RegisterType((*DatanodeStorageReportProto)(nil), "hadoop.hdfs.DatanodeStorageReportProto") - proto.RegisterType((*GetDatanodeStorageReportResponseProto)(nil), "hadoop.hdfs.GetDatanodeStorageReportResponseProto") - proto.RegisterType((*GetPreferredBlockSizeRequestProto)(nil), "hadoop.hdfs.GetPreferredBlockSizeRequestProto") - proto.RegisterType((*GetPreferredBlockSizeResponseProto)(nil), "hadoop.hdfs.GetPreferredBlockSizeResponseProto") - proto.RegisterType((*SetSafeModeRequestProto)(nil), "hadoop.hdfs.SetSafeModeRequestProto") - proto.RegisterType((*SetSafeModeResponseProto)(nil), "hadoop.hdfs.SetSafeModeResponseProto") - proto.RegisterType((*SaveNamespaceRequestProto)(nil), "hadoop.hdfs.SaveNamespaceRequestProto") - proto.RegisterType((*SaveNamespaceResponseProto)(nil), "hadoop.hdfs.SaveNamespaceResponseProto") - proto.RegisterType((*RollEditsRequestProto)(nil), "hadoop.hdfs.RollEditsRequestProto") - proto.RegisterType((*RollEditsResponseProto)(nil), "hadoop.hdfs.RollEditsResponseProto") - proto.RegisterType((*RestoreFailedStorageRequestProto)(nil), "hadoop.hdfs.RestoreFailedStorageRequestProto") - proto.RegisterType((*RestoreFailedStorageResponseProto)(nil), "hadoop.hdfs.RestoreFailedStorageResponseProto") - proto.RegisterType((*RefreshNodesRequestProto)(nil), "hadoop.hdfs.RefreshNodesRequestProto") - proto.RegisterType((*RefreshNodesResponseProto)(nil), "hadoop.hdfs.RefreshNodesResponseProto") - proto.RegisterType((*FinalizeUpgradeRequestProto)(nil), "hadoop.hdfs.FinalizeUpgradeRequestProto") - proto.RegisterType((*FinalizeUpgradeResponseProto)(nil), "hadoop.hdfs.FinalizeUpgradeResponseProto") - proto.RegisterType((*RollingUpgradeRequestProto)(nil), "hadoop.hdfs.RollingUpgradeRequestProto") - proto.RegisterType((*RollingUpgradeInfoProto)(nil), "hadoop.hdfs.RollingUpgradeInfoProto") - proto.RegisterType((*RollingUpgradeResponseProto)(nil), "hadoop.hdfs.RollingUpgradeResponseProto") - proto.RegisterType((*ListCorruptFileBlocksRequestProto)(nil), "hadoop.hdfs.ListCorruptFileBlocksRequestProto") - proto.RegisterType((*ListCorruptFileBlocksResponseProto)(nil), "hadoop.hdfs.ListCorruptFileBlocksResponseProto") - proto.RegisterType((*MetaSaveRequestProto)(nil), "hadoop.hdfs.MetaSaveRequestProto") - proto.RegisterType((*MetaSaveResponseProto)(nil), "hadoop.hdfs.MetaSaveResponseProto") - proto.RegisterType((*GetFileInfoRequestProto)(nil), "hadoop.hdfs.GetFileInfoRequestProto") - proto.RegisterType((*GetFileInfoResponseProto)(nil), "hadoop.hdfs.GetFileInfoResponseProto") - proto.RegisterType((*IsFileClosedRequestProto)(nil), "hadoop.hdfs.IsFileClosedRequestProto") - proto.RegisterType((*IsFileClosedResponseProto)(nil), "hadoop.hdfs.IsFileClosedResponseProto") - proto.RegisterType((*CacheDirectiveInfoProto)(nil), "hadoop.hdfs.CacheDirectiveInfoProto") - proto.RegisterType((*CacheDirectiveInfoExpirationProto)(nil), "hadoop.hdfs.CacheDirectiveInfoExpirationProto") - proto.RegisterType((*CacheDirectiveStatsProto)(nil), "hadoop.hdfs.CacheDirectiveStatsProto") - proto.RegisterType((*AddCacheDirectiveRequestProto)(nil), "hadoop.hdfs.AddCacheDirectiveRequestProto") - proto.RegisterType((*AddCacheDirectiveResponseProto)(nil), "hadoop.hdfs.AddCacheDirectiveResponseProto") - proto.RegisterType((*ModifyCacheDirectiveRequestProto)(nil), "hadoop.hdfs.ModifyCacheDirectiveRequestProto") - proto.RegisterType((*ModifyCacheDirectiveResponseProto)(nil), "hadoop.hdfs.ModifyCacheDirectiveResponseProto") - proto.RegisterType((*RemoveCacheDirectiveRequestProto)(nil), "hadoop.hdfs.RemoveCacheDirectiveRequestProto") - proto.RegisterType((*RemoveCacheDirectiveResponseProto)(nil), "hadoop.hdfs.RemoveCacheDirectiveResponseProto") - proto.RegisterType((*ListCacheDirectivesRequestProto)(nil), "hadoop.hdfs.ListCacheDirectivesRequestProto") - proto.RegisterType((*CacheDirectiveEntryProto)(nil), "hadoop.hdfs.CacheDirectiveEntryProto") - proto.RegisterType((*ListCacheDirectivesResponseProto)(nil), "hadoop.hdfs.ListCacheDirectivesResponseProto") - proto.RegisterType((*CachePoolInfoProto)(nil), "hadoop.hdfs.CachePoolInfoProto") - proto.RegisterType((*CachePoolStatsProto)(nil), "hadoop.hdfs.CachePoolStatsProto") - proto.RegisterType((*AddCachePoolRequestProto)(nil), "hadoop.hdfs.AddCachePoolRequestProto") - proto.RegisterType((*AddCachePoolResponseProto)(nil), "hadoop.hdfs.AddCachePoolResponseProto") - proto.RegisterType((*ModifyCachePoolRequestProto)(nil), "hadoop.hdfs.ModifyCachePoolRequestProto") - proto.RegisterType((*ModifyCachePoolResponseProto)(nil), "hadoop.hdfs.ModifyCachePoolResponseProto") - proto.RegisterType((*RemoveCachePoolRequestProto)(nil), "hadoop.hdfs.RemoveCachePoolRequestProto") - proto.RegisterType((*RemoveCachePoolResponseProto)(nil), "hadoop.hdfs.RemoveCachePoolResponseProto") - proto.RegisterType((*ListCachePoolsRequestProto)(nil), "hadoop.hdfs.ListCachePoolsRequestProto") - proto.RegisterType((*ListCachePoolsResponseProto)(nil), "hadoop.hdfs.ListCachePoolsResponseProto") - proto.RegisterType((*CachePoolEntryProto)(nil), "hadoop.hdfs.CachePoolEntryProto") - proto.RegisterType((*GetFileLinkInfoRequestProto)(nil), "hadoop.hdfs.GetFileLinkInfoRequestProto") - proto.RegisterType((*GetFileLinkInfoResponseProto)(nil), "hadoop.hdfs.GetFileLinkInfoResponseProto") - proto.RegisterType((*GetContentSummaryRequestProto)(nil), "hadoop.hdfs.GetContentSummaryRequestProto") - proto.RegisterType((*GetContentSummaryResponseProto)(nil), "hadoop.hdfs.GetContentSummaryResponseProto") - proto.RegisterType((*GetQuotaUsageRequestProto)(nil), "hadoop.hdfs.GetQuotaUsageRequestProto") - proto.RegisterType((*GetQuotaUsageResponseProto)(nil), "hadoop.hdfs.GetQuotaUsageResponseProto") - proto.RegisterType((*SetQuotaRequestProto)(nil), "hadoop.hdfs.SetQuotaRequestProto") - proto.RegisterType((*SetQuotaResponseProto)(nil), "hadoop.hdfs.SetQuotaResponseProto") - proto.RegisterType((*FsyncRequestProto)(nil), "hadoop.hdfs.FsyncRequestProto") - proto.RegisterType((*FsyncResponseProto)(nil), "hadoop.hdfs.FsyncResponseProto") - proto.RegisterType((*SetTimesRequestProto)(nil), "hadoop.hdfs.SetTimesRequestProto") - proto.RegisterType((*SetTimesResponseProto)(nil), "hadoop.hdfs.SetTimesResponseProto") - proto.RegisterType((*CreateSymlinkRequestProto)(nil), "hadoop.hdfs.CreateSymlinkRequestProto") - proto.RegisterType((*CreateSymlinkResponseProto)(nil), "hadoop.hdfs.CreateSymlinkResponseProto") - proto.RegisterType((*GetLinkTargetRequestProto)(nil), "hadoop.hdfs.GetLinkTargetRequestProto") - proto.RegisterType((*GetLinkTargetResponseProto)(nil), "hadoop.hdfs.GetLinkTargetResponseProto") - proto.RegisterType((*UpdateBlockForPipelineRequestProto)(nil), "hadoop.hdfs.UpdateBlockForPipelineRequestProto") - proto.RegisterType((*UpdateBlockForPipelineResponseProto)(nil), "hadoop.hdfs.UpdateBlockForPipelineResponseProto") - proto.RegisterType((*UpdatePipelineRequestProto)(nil), "hadoop.hdfs.UpdatePipelineRequestProto") - proto.RegisterType((*UpdatePipelineResponseProto)(nil), "hadoop.hdfs.UpdatePipelineResponseProto") - proto.RegisterType((*SetBalancerBandwidthRequestProto)(nil), "hadoop.hdfs.SetBalancerBandwidthRequestProto") - proto.RegisterType((*SetBalancerBandwidthResponseProto)(nil), "hadoop.hdfs.SetBalancerBandwidthResponseProto") - proto.RegisterType((*GetDataEncryptionKeyRequestProto)(nil), "hadoop.hdfs.GetDataEncryptionKeyRequestProto") - proto.RegisterType((*GetDataEncryptionKeyResponseProto)(nil), "hadoop.hdfs.GetDataEncryptionKeyResponseProto") - proto.RegisterType((*CreateSnapshotRequestProto)(nil), "hadoop.hdfs.CreateSnapshotRequestProto") - proto.RegisterType((*CreateSnapshotResponseProto)(nil), "hadoop.hdfs.CreateSnapshotResponseProto") - proto.RegisterType((*RenameSnapshotRequestProto)(nil), "hadoop.hdfs.RenameSnapshotRequestProto") - proto.RegisterType((*RenameSnapshotResponseProto)(nil), "hadoop.hdfs.RenameSnapshotResponseProto") - proto.RegisterType((*AllowSnapshotRequestProto)(nil), "hadoop.hdfs.AllowSnapshotRequestProto") - proto.RegisterType((*AllowSnapshotResponseProto)(nil), "hadoop.hdfs.AllowSnapshotResponseProto") - proto.RegisterType((*DisallowSnapshotRequestProto)(nil), "hadoop.hdfs.DisallowSnapshotRequestProto") - proto.RegisterType((*DisallowSnapshotResponseProto)(nil), "hadoop.hdfs.DisallowSnapshotResponseProto") - proto.RegisterType((*DeleteSnapshotRequestProto)(nil), "hadoop.hdfs.DeleteSnapshotRequestProto") - proto.RegisterType((*DeleteSnapshotResponseProto)(nil), "hadoop.hdfs.DeleteSnapshotResponseProto") - proto.RegisterType((*CheckAccessRequestProto)(nil), "hadoop.hdfs.CheckAccessRequestProto") - proto.RegisterType((*CheckAccessResponseProto)(nil), "hadoop.hdfs.CheckAccessResponseProto") - proto.RegisterType((*GetCurrentEditLogTxidRequestProto)(nil), "hadoop.hdfs.GetCurrentEditLogTxidRequestProto") - proto.RegisterType((*GetCurrentEditLogTxidResponseProto)(nil), "hadoop.hdfs.GetCurrentEditLogTxidResponseProto") - proto.RegisterType((*GetEditsFromTxidRequestProto)(nil), "hadoop.hdfs.GetEditsFromTxidRequestProto") - proto.RegisterType((*GetEditsFromTxidResponseProto)(nil), "hadoop.hdfs.GetEditsFromTxidResponseProto") - proto.RegisterEnum("hadoop.hdfs.CreateFlagProto", CreateFlagProto_name, CreateFlagProto_value) - proto.RegisterEnum("hadoop.hdfs.DatanodeReportTypeProto", DatanodeReportTypeProto_name, DatanodeReportTypeProto_value) - proto.RegisterEnum("hadoop.hdfs.SafeModeActionProto", SafeModeActionProto_name, SafeModeActionProto_value) - proto.RegisterEnum("hadoop.hdfs.RollingUpgradeActionProto", RollingUpgradeActionProto_name, RollingUpgradeActionProto_value) - proto.RegisterEnum("hadoop.hdfs.CacheFlagProto", CacheFlagProto_name, CacheFlagProto_value) -} - -func init() { proto.RegisterFile("ClientNamenodeProtocol.proto", fileDescriptor4) } - -var fileDescriptor4 = []byte{ - // 5410 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5c, 0x5b, 0x73, 0xdc, 0x46, - 0x76, 0xae, 0x19, 0x0e, 0x29, 0xf2, 0x48, 0xa2, 0xc6, 0x10, 0x29, 0x8e, 0x86, 0x94, 0x34, 0x82, - 0x2e, 0xa6, 0xb5, 0x36, 0x65, 0xd3, 0x5e, 0x97, 0x22, 0x3b, 0xeb, 0x1d, 0x91, 0x43, 0x2e, 0x57, - 0x14, 0x49, 0x63, 0x28, 0x6b, 0xad, 0x5d, 0x17, 0x17, 0x02, 0x7a, 0x86, 0x08, 0x31, 0xc0, 0x04, - 0xc0, 0xf0, 0xe2, 0xdd, 0xaa, 0xd4, 0xee, 0x43, 0xe2, 0xaa, 0x54, 0xa5, 0x92, 0xaa, 0x3c, 0xe4, - 0x31, 0x2f, 0x79, 0xc8, 0xcf, 0xc8, 0xed, 0x07, 0xe4, 0x27, 0xe4, 0x3d, 0x7f, 0x20, 0x6f, 0xa9, - 0xbe, 0x60, 0xd0, 0x37, 0x00, 0x63, 0xcb, 0xa9, 0x3c, 0x71, 0x70, 0xfa, 0x9c, 0xd3, 0xa7, 0x6f, - 0xa7, 0x4f, 0xf7, 0xf9, 0x9a, 0xb0, 0xb2, 0xe1, 0x7b, 0x28, 0x48, 0xf6, 0xec, 0x01, 0x0a, 0x42, - 0x17, 0x1d, 0x44, 0x61, 0x12, 0x3a, 0xa1, 0xbf, 0x36, 0xc4, 0x3f, 0x8c, 0xcb, 0xc7, 0xb6, 0x1b, - 0x86, 0xc3, 0xb5, 0x63, 0xb7, 0x17, 0x37, 0xe7, 0xbb, 0xc8, 0x19, 0x45, 0x5e, 0x72, 0x41, 0x0b, - 0x9b, 0x80, 0xa9, 0xec, 0xf7, 0x9c, 0xed, 0x30, 0x99, 0xe6, 0xe5, 0x73, 0x3b, 0x49, 0x22, 0xf6, - 0x51, 0x47, 0x81, 0x13, 0x5d, 0x0c, 0x13, 0x2f, 0x0c, 0x18, 0xe5, 0xaa, 0x17, 0x84, 0x89, 0xd7, - 0x4b, 0x95, 0x5c, 0x47, 0x91, 0x1d, 0x8f, 0x22, 0xe4, 0x84, 0xae, 0x17, 0xf4, 0x29, 0xd1, 0xb4, - 0xe1, 0xd6, 0x36, 0x4a, 0x9e, 0xf9, 0xa1, 0x73, 0xb2, 0x1b, 0x3a, 0x36, 0x96, 0x8e, 0x2d, 0xf4, - 0xe7, 0x23, 0x14, 0x27, 0xc4, 0x40, 0xa3, 0x0e, 0x53, 0x71, 0xe4, 0x34, 0x2a, 0xad, 0xea, 0xea, - 0x9c, 0x85, 0x7f, 0x1a, 0x37, 0x60, 0x26, 0xec, 0xf5, 0x62, 0x94, 0x34, 0xaa, 0xad, 0xea, 0x6a, - 0xcd, 0x62, 0x5f, 0x98, 0xee, 0xa3, 0xa0, 0x9f, 0x1c, 0x37, 0xa6, 0x28, 0x9d, 0x7e, 0x99, 0x47, - 0x70, 0x5b, 0x53, 0x45, 0x3c, 0x0c, 0x83, 0x98, 0x76, 0x82, 0xf1, 0xa7, 0x30, 0xe7, 0xa7, 0x25, - 0x8d, 0x4a, 0xab, 0xb2, 0x7a, 0x79, 0xfd, 0xce, 0x1a, 0xd7, 0x1f, 0x6b, 0x44, 0x0e, 0xb9, 0x44, - 0x47, 0x4c, 0x64, 0xac, 0x4c, 0xc2, 0xbc, 0x43, 0xda, 0xd0, 0x45, 0xd1, 0x29, 0x8a, 0x36, 0x51, - 0xcf, 0x1e, 0xf9, 0x89, 0xd0, 0x06, 0xd3, 0x27, 0x16, 0xc8, 0x0c, 0xbc, 0x05, 0xbf, 0x84, 0xf9, - 0x58, 0x28, 0x26, 0x0d, 0xbe, 0xbc, 0x6e, 0x0a, 0x66, 0x6c, 0xc5, 0xa2, 0x0e, 0x6a, 0x89, 0x24, - 0x69, 0xfe, 0x57, 0x15, 0x8c, 0x8d, 0x08, 0xd9, 0x09, 0x2a, 0xe9, 0xc8, 0x4f, 0x61, 0x66, 0x60, - 0xc7, 0x27, 0xc8, 0x25, 0x1d, 0x79, 0x79, 0xfd, 0xb6, 0x54, 0xd9, 0x01, 0x8a, 0x06, 0x5e, 0x1c, - 0x7b, 0x61, 0x40, 0x2b, 0x62, 0xdc, 0xc6, 0x6d, 0x00, 0x67, 0x3c, 0x95, 0x48, 0x67, 0xcf, 0x59, - 0x1c, 0x85, 0x94, 0x93, 0xfa, 0xb7, 0x7c, 0xbb, 0xdf, 0xa8, 0xb5, 0xaa, 0xab, 0x57, 0x2d, 0x8e, - 0x62, 0x98, 0x70, 0x85, 0x7e, 0x1d, 0xd8, 0x11, 0x0a, 0x92, 0xc6, 0x74, 0xab, 0xba, 0x3a, 0x6b, - 0x09, 0x34, 0xa3, 0x05, 0x97, 0x23, 0x34, 0xf4, 0x3d, 0xda, 0xc7, 0x8d, 0x19, 0xa2, 0x84, 0x27, - 0x19, 0x2b, 0x30, 0xf7, 0x06, 0x8f, 0x47, 0xd7, 0xfb, 0x16, 0x35, 0x2e, 0x91, 0x11, 0xcf, 0x08, - 0xc6, 0x37, 0xb0, 0x48, 0x66, 0x63, 0x98, 0x4e, 0xf3, 0xaf, 0x50, 0x84, 0x5b, 0xd2, 0x98, 0x6d, - 0x4d, 0xad, 0xce, 0xaf, 0xbf, 0x2b, 0x34, 0x75, 0x43, 0xc7, 0x49, 0xdb, 0xac, 0xd7, 0x62, 0x6e, - 0xc3, 0xf5, 0xb4, 0x8b, 0xf9, 0x61, 0xfc, 0x10, 0xaa, 0xbd, 0x74, 0x06, 0xb5, 0x84, 0x2a, 0x7e, - 0xe1, 0xf6, 0xe2, 0x2d, 0xcf, 0x47, 0xdd, 0xc4, 0x4e, 0x46, 0x6c, 0xe0, 0xaa, 0xbd, 0xd8, 0x7c, - 0x0d, 0x46, 0x7b, 0x38, 0x44, 0x81, 0x5b, 0x32, 0x56, 0x62, 0x9f, 0x57, 0x95, 0x3e, 0x37, 0xa0, - 0xd6, 0xc3, 0xbd, 0x3d, 0xd5, 0xaa, 0xac, 0x5e, 0xb5, 0xc8, 0x6f, 0xf3, 0x0f, 0x15, 0xb8, 0x9e, - 0x2a, 0xe7, 0xad, 0xfc, 0x04, 0xa6, 0x49, 0x47, 0x31, 0x43, 0x6f, 0xe7, 0x4e, 0x75, 0x6a, 0x26, - 0x65, 0x36, 0x3e, 0x81, 0x5a, 0x9c, 0xd8, 0x78, 0xd1, 0x4d, 0xd6, 0x3a, 0xc2, 0x6d, 0x1e, 0x40, - 0xb3, 0x8b, 0x12, 0x2b, 0x1b, 0xb7, 0x92, 0x76, 0x4a, 0xe3, 0x5e, 0x55, 0xc6, 0xdd, 0xfc, 0x29, - 0x2c, 0xcb, 0x1a, 0xf9, 0xc6, 0xdd, 0x80, 0x99, 0x08, 0xc5, 0x23, 0x3f, 0x21, 0x5a, 0x67, 0x2d, - 0xf6, 0x65, 0x1e, 0xc0, 0x4a, 0x17, 0x25, 0xdd, 0x24, 0x8c, 0xec, 0x3e, 0x3a, 0x08, 0x7d, 0xcf, - 0xb9, 0x28, 0xef, 0xf2, 0x21, 0xe1, 0xe3, 0xbb, 0x3c, 0xa3, 0xe0, 0x65, 0xaf, 0x6a, 0xe4, 0x4c, - 0x31, 0xd7, 0xe1, 0xf6, 0xcb, 0x20, 0xfe, 0x5e, 0x95, 0x9a, 0x77, 0xe1, 0x8e, 0x4e, 0x46, 0x54, - 0xbb, 0xb2, 0x5d, 0xa4, 0xd4, 0x80, 0xda, 0xd0, 0x4e, 0x8e, 0x99, 0x56, 0xf2, 0xdb, 0x3c, 0xa1, - 0x2e, 0x2a, 0x57, 0xa9, 0xf1, 0x4b, 0xb8, 0x1a, 0xf3, 0xa5, 0xcc, 0xff, 0xdc, 0x17, 0x86, 0x99, - 0x4c, 0x0a, 0x41, 0x09, 0x1d, 0x6a, 0x51, 0xd4, 0x6c, 0x51, 0x77, 0xc7, 0xd1, 0x3c, 0x24, 0x3a, - 0x44, 0x07, 0xee, 0xe8, 0x38, 0x78, 0x83, 0x7e, 0x0e, 0xb3, 0x43, 0x56, 0xd0, 0xa8, 0xb4, 0xa6, - 0x26, 0xb6, 0x65, 0x2c, 0x65, 0x0e, 0xe0, 0x66, 0x17, 0x25, 0x99, 0x13, 0x2b, 0x19, 0xee, 0x9f, - 0x01, 0x0c, 0xc7, 0xbc, 0x13, 0x7a, 0x44, 0x4e, 0xc2, 0x5c, 0x21, 0x33, 0x9d, 0xaf, 0x8e, 0x1f, - 0xb4, 0x37, 0xb0, 0xd0, 0x45, 0xc9, 0xfe, 0x59, 0x80, 0xa2, 0x12, 0x3b, 0x9a, 0x30, 0x3b, 0x8a, - 0x51, 0x14, 0xd0, 0x49, 0x57, 0x59, 0x9d, 0xb3, 0xc6, 0xdf, 0xd8, 0xe7, 0xf5, 0xa3, 0x70, 0x34, - 0x0c, 0xa8, 0xe3, 0xc5, 0x85, 0x19, 0xc1, 0x5c, 0x82, 0xc5, 0xac, 0x0e, 0xbe, 0xf2, 0xbf, 0xa9, - 0x40, 0xa3, 0xfd, 0xc6, 0x0e, 0xdc, 0x30, 0x20, 0xdd, 0x26, 0x58, 0xf0, 0x01, 0x54, 0xde, 0xb0, - 0xd1, 0x16, 0x37, 0xbd, 0xce, 0x79, 0x82, 0x02, 0x57, 0x70, 0x05, 0x95, 0x37, 0xa9, 0xc1, 0x55, - 0x61, 0x3f, 0x3e, 0x0e, 0x7d, 0x17, 0x45, 0x6c, 0x2b, 0x60, 0x5f, 0xc6, 0x4d, 0x98, 0xe9, 0x79, - 0x3e, 0xda, 0x71, 0x1b, 0xb5, 0x56, 0x65, 0xb5, 0xf6, 0xb4, 0xf2, 0xa1, 0xc5, 0x08, 0xe6, 0x32, - 0xdc, 0x14, 0xed, 0xe1, 0xad, 0xfd, 0xae, 0x0a, 0x0b, 0x6d, 0xd7, 0x55, 0x2d, 0xfd, 0xfe, 0x5e, - 0xf1, 0x33, 0x98, 0x1d, 0x46, 0xe8, 0xd4, 0x0b, 0x47, 0x31, 0xe9, 0xae, 0x09, 0x9a, 0x38, 0x16, - 0x30, 0x9e, 0xc1, 0x15, 0x74, 0xee, 0xf8, 0x23, 0x17, 0xed, 0x85, 0x2e, 0x8a, 0x1b, 0x35, 0x32, - 0x0b, 0xc5, 0x29, 0xb1, 0x69, 0x27, 0x36, 0x0e, 0xa6, 0x76, 0x82, 0x1e, 0xdd, 0x23, 0x2c, 0x41, - 0x86, 0xeb, 0x83, 0x69, 0xa9, 0x0f, 0xf0, 0x2e, 0xd8, 0xb3, 0x4f, 0xc3, 0x08, 0xb9, 0x54, 0xfd, - 0x4c, 0x6b, 0x6a, 0x75, 0xce, 0x12, 0x68, 0xe6, 0x0b, 0x58, 0xcc, 0x7a, 0x22, 0xc7, 0x85, 0x57, - 0x27, 0x76, 0xe1, 0xe6, 0x1f, 0xa6, 0xe0, 0xee, 0x36, 0x4a, 0xda, 0xae, 0xeb, 0x61, 0xc7, 0x69, - 0xfb, 0xa9, 0xf9, 0x25, 0xdd, 0xfc, 0x11, 0x4c, 0xbd, 0xf1, 0x4f, 0xd8, 0x9a, 0x28, 0xed, 0x41, - 0xcc, 0x6b, 0x7c, 0x0e, 0x73, 0xe8, 0xdc, 0x8b, 0x13, 0x2f, 0xe8, 0xe3, 0xae, 0x9f, 0xa4, 0xe7, - 0x32, 0x01, 0xe3, 0x29, 0xcc, 0xb2, 0x6e, 0x9c, 0xb4, 0xdb, 0xc7, 0xfc, 0xc6, 0x1a, 0x18, 0xc1, - 0x68, 0x90, 0xb5, 0x91, 0xf6, 0xee, 0x34, 0xd9, 0x48, 0x34, 0x25, 0xd2, 0x1c, 0x9a, 0x51, 0xe6, - 0xd0, 0x3a, 0x2c, 0xa4, 0x86, 0x31, 0x77, 0xf3, 0x72, 0xe4, 0xb9, 0x71, 0xe3, 0x12, 0x19, 0x2f, - 0x6d, 0x19, 0x37, 0xec, 0xb3, 0xf2, 0xd4, 0x7f, 0x0d, 0x66, 0xce, 0x10, 0xbc, 0xfd, 0xf8, 0xfe, - 0x43, 0x05, 0x16, 0x36, 0xc2, 0xc1, 0xd0, 0x47, 0xa5, 0xb1, 0x5f, 0xd9, 0xca, 0xf9, 0x18, 0x6a, - 0xbe, 0x1d, 0x27, 0x93, 0xae, 0x1a, 0xc2, 0x5c, 0xb4, 0xe2, 0x1f, 0xc3, 0x62, 0x66, 0xd9, 0x24, - 0xfb, 0xf5, 0x4b, 0x58, 0xb6, 0xd0, 0x30, 0x8c, 0x92, 0x67, 0x36, 0x8b, 0xbb, 0x85, 0x16, 0x7d, - 0x0a, 0x33, 0xa4, 0xcd, 0xe9, 0xe6, 0x50, 0xd6, 0x43, 0x8c, 0xdb, 0xbc, 0x0d, 0x2b, 0x8a, 0x5a, - 0xde, 0xf9, 0x3c, 0x05, 0x63, 0x23, 0x0c, 0x1c, 0x3b, 0x91, 0xfb, 0x2f, 0x89, 0xfa, 0x69, 0xff, - 0x25, 0x51, 0x1f, 0x6f, 0xb2, 0x71, 0xe4, 0xc4, 0x8d, 0x2a, 0x99, 0x05, 0xe4, 0xb7, 0xb9, 0x08, - 0xd7, 0x53, 0x59, 0x5e, 0x65, 0x0f, 0x16, 0x0e, 0xa3, 0x11, 0xa6, 0x97, 0x0d, 0xca, 0x0a, 0xcc, - 0x05, 0xe8, 0x6c, 0x97, 0x1e, 0x62, 0xe8, 0xe1, 0x26, 0x23, 0x94, 0x85, 0xdd, 0xb8, 0x8b, 0xb3, - 0x7a, 0x26, 0xe9, 0xe2, 0x27, 0x60, 0x58, 0x08, 0xef, 0x1c, 0x25, 0x66, 0xd5, 0x61, 0xca, 0x8d, - 0x93, 0xd4, 0xe5, 0xbb, 0x71, 0x62, 0x7e, 0x00, 0xd7, 0x53, 0xc9, 0x49, 0x2a, 0x3a, 0x4a, 0xd9, - 0xd7, 0xbf, 0x6f, 0x4d, 0xc6, 0x7d, 0xb8, 0x1a, 0x9e, 0xa2, 0xe8, 0x2c, 0xf2, 0x12, 0xb4, 0x89, - 0xc8, 0x84, 0xc4, 0x9a, 0x45, 0xa2, 0x79, 0x03, 0x16, 0xc6, 0x15, 0xf0, 0x5d, 0xbf, 0x09, 0xc6, - 0x26, 0x9a, 0x60, 0x35, 0xac, 0xc0, 0x5c, 0x84, 0x4f, 0xbc, 0xb1, 0x77, 0x4a, 0x17, 0xc3, 0xac, - 0x95, 0x11, 0x70, 0x6b, 0x53, 0x2d, 0x93, 0xb4, 0xf6, 0x8f, 0x15, 0x30, 0x5e, 0x9c, 0xb8, 0x5e, - 0x14, 0xff, 0x1f, 0x9d, 0xbf, 0xe4, 0xf3, 0xd3, 0x94, 0x7a, 0x7e, 0xc2, 0x36, 0xa7, 0x36, 0x4c, - 0x62, 0x73, 0x00, 0x37, 0xb6, 0x51, 0xb2, 0x4b, 0x5d, 0x59, 0xb9, 0xeb, 0x88, 0x13, 0x3b, 0x4a, - 0xda, 0xbd, 0x04, 0x45, 0xc4, 0xf4, 0x2b, 0x16, 0x47, 0xc1, 0xe6, 0x05, 0x08, 0xb9, 0xe9, 0x59, - 0x3b, 0x35, 0x8f, 0xa7, 0x99, 0xaf, 0x60, 0x89, 0xaf, 0x8f, 0x37, 0xf1, 0x73, 0xb8, 0xe4, 0x7a, - 0x11, 0x2e, 0x62, 0xe7, 0x13, 0xf1, 0x0c, 0xbc, 0xe9, 0x45, 0xc8, 0x49, 0xc2, 0xe8, 0x82, 0x09, - 0xd3, 0xae, 0x49, 0x45, 0xcc, 0x55, 0x78, 0x88, 0x23, 0xcb, 0xc0, 0x1e, 0xc6, 0xc7, 0x61, 0x92, - 0xd8, 0x6f, 0x7c, 0xb4, 0x49, 0x8b, 0xa4, 0x86, 0xe1, 0xa0, 0xe8, 0xdd, 0x22, 0x56, 0xde, 0x26, - 0x07, 0x16, 0x62, 0x0d, 0x1f, 0x33, 0xf0, 0xb1, 0x60, 0xa0, 0xac, 0x50, 0x63, 0xad, 0x56, 0x99, - 0xf9, 0xd7, 0x15, 0xb2, 0x3b, 0xa7, 0xf2, 0x9b, 0x5e, 0xaf, 0x47, 0x5d, 0x95, 0x30, 0x1e, 0x26, - 0x5c, 0x49, 0xa5, 0xad, 0x30, 0x4c, 0xd8, 0xc0, 0x08, 0x34, 0x12, 0x5a, 0x44, 0xe1, 0x20, 0xd5, - 0xc4, 0xd6, 0x93, 0x40, 0xc3, 0xa3, 0x98, 0x84, 0x63, 0x0e, 0xe6, 0x4d, 0x32, 0x8a, 0xf9, 0x67, - 0x64, 0x9f, 0xd2, 0x19, 0xc3, 0x77, 0xcc, 0x26, 0x80, 0x3b, 0x2e, 0xd2, 0x9e, 0x19, 0x54, 0x0d, - 0x2c, 0x74, 0xce, 0xe4, 0xcc, 0x27, 0x70, 0xc3, 0x42, 0xc4, 0xd1, 0xd9, 0xb1, 0xb8, 0x54, 0x45, - 0x9f, 0x57, 0x51, 0x7c, 0xde, 0x4d, 0x58, 0xe2, 0x25, 0xf9, 0xb5, 0xbf, 0x0b, 0x0d, 0x0b, 0x39, - 0xd8, 0x4f, 0xa8, 0x6a, 0xbf, 0xf7, 0x7e, 0x68, 0x7e, 0x0c, 0x37, 0x45, 0x6d, 0x93, 0xac, 0xaa, - 0x9b, 0x64, 0x96, 0x6f, 0xc5, 0xf4, 0x58, 0x2c, 0xcc, 0xbe, 0xff, 0xa9, 0x72, 0x65, 0xd2, 0x22, - 0x6d, 0xc2, 0xac, 0x63, 0x0f, 0x6d, 0xc7, 0x4b, 0xe8, 0x31, 0xac, 0x66, 0x8d, 0xbf, 0xf1, 0xbe, - 0x33, 0x8a, 0x99, 0xc7, 0xa8, 0x59, 0xe4, 0x37, 0xf5, 0x5e, 0x03, 0xdb, 0x0b, 0xbc, 0xa0, 0xcf, - 0xee, 0xbe, 0x32, 0x82, 0xf1, 0x1e, 0xd4, 0x47, 0x81, 0x8b, 0xa2, 0xa3, 0xf4, 0x10, 0x8d, 0x5c, - 0x72, 0x27, 0x53, 0xb3, 0xae, 0x11, 0xba, 0x35, 0x26, 0x1b, 0x0f, 0x60, 0xde, 0x09, 0xa3, 0x68, - 0x34, 0x4c, 0x8e, 0xd8, 0xe6, 0x3a, 0x4d, 0x18, 0xaf, 0x32, 0x2a, 0xdd, 0x30, 0x31, 0x1b, 0xf1, - 0x4b, 0x41, 0x3f, 0x65, 0x9b, 0xa1, 0x6c, 0x8c, 0xca, 0xd8, 0x7e, 0x0a, 0x4b, 0x29, 0x1b, 0xae, - 0xfa, 0x28, 0x0c, 0x50, 0xca, 0x7f, 0x09, 0x87, 0x07, 0xd6, 0x02, 0x2b, 0xc6, 0x16, 0xec, 0x07, - 0x88, 0x89, 0xad, 0x42, 0x9d, 0x72, 0x1d, 0x79, 0xc1, 0x51, 0x6f, 0x94, 0x8c, 0x22, 0x44, 0xa3, - 0x28, 0x6b, 0x9e, 0xd2, 0x77, 0x82, 0x2d, 0x42, 0x35, 0x3e, 0x85, 0xa5, 0x21, 0x0a, 0x5c, 0x5c, - 0x81, 0x8b, 0xfd, 0xb3, 0x17, 0x06, 0x69, 0x05, 0x73, 0x44, 0x60, 0x91, 0x15, 0x6f, 0xb2, 0x52, - 0x5a, 0x83, 0xf9, 0x35, 0x39, 0x0c, 0x67, 0x81, 0x97, 0xb2, 0xc6, 0x9e, 0x40, 0x2d, 0xb9, 0x18, - 0xd2, 0xf9, 0x36, 0x2f, 0xcd, 0x67, 0x51, 0xec, 0xf0, 0x62, 0x88, 0x58, 0x04, 0x84, 0x25, 0xcc, - 0x03, 0x72, 0xf4, 0x95, 0x55, 0xf3, 0x83, 0xbb, 0x06, 0x55, 0xd7, 0xd3, 0x06, 0x2d, 0x6a, 0x50, - 0x5b, 0x75, 0x3d, 0xf3, 0xb7, 0x70, 0x9f, 0xd3, 0xc8, 0xa2, 0xcc, 0x1f, 0xd5, 0xe6, 0x7f, 0xae, - 0x40, 0x53, 0xab, 0x9f, 0x2a, 0x7e, 0x06, 0x57, 0x5c, 0xce, 0x32, 0x6d, 0x44, 0xaa, 0x39, 0x06, - 0xf1, 0x32, 0xc6, 0x36, 0xcc, 0xc7, 0xbc, 0x66, 0x1a, 0x37, 0xc9, 0x71, 0xa5, 0x5a, 0xb9, 0x25, - 0x89, 0x99, 0xdf, 0x55, 0xe0, 0x41, 0x7e, 0x77, 0xf0, 0xfd, 0x7c, 0x04, 0x37, 0x5c, 0x1d, 0x57, - 0x1a, 0x30, 0xbe, 0xab, 0x6d, 0x80, 0xc6, 0x84, 0x1c, 0x35, 0xe6, 0x17, 0xc4, 0x5b, 0x1f, 0x44, - 0xa8, 0x87, 0xa2, 0x88, 0xc5, 0x9a, 0x5d, 0xef, 0x5b, 0xd1, 0xd1, 0x34, 0x61, 0x16, 0x07, 0xc0, - 0x41, 0xe6, 0xbd, 0xc6, 0xdf, 0xe6, 0x53, 0xe2, 0x61, 0x75, 0x0a, 0xf8, 0x76, 0x2c, 0xc0, 0xf4, - 0x9b, 0xd8, 0xfb, 0x16, 0x31, 0x4f, 0x40, 0x3f, 0xcc, 0x04, 0x96, 0xba, 0x28, 0xe9, 0xda, 0x3d, - 0xf4, 0x42, 0x3e, 0xbe, 0x3d, 0x81, 0x19, 0xdb, 0x21, 0x1b, 0x2f, 0x9d, 0x0a, 0xe2, 0x4d, 0x5d, - 0x2a, 0xd2, 0x26, 0x2c, 0x2c, 0xae, 0xa0, 0xfc, 0xc6, 0x1d, 0xb8, 0xe4, 0x1c, 0x23, 0x87, 0x06, - 0x24, 0x95, 0xd5, 0xd9, 0xa7, 0xd3, 0x3d, 0xdb, 0x8f, 0x91, 0x95, 0x52, 0xcd, 0x75, 0x68, 0x08, - 0xb5, 0x4e, 0xe2, 0x03, 0x5f, 0xc1, 0xcd, 0xae, 0x7d, 0x8a, 0xb0, 0x13, 0x8d, 0x87, 0xb6, 0x23, - 0xda, 0x7a, 0x17, 0x20, 0xf1, 0x06, 0xe8, 0x95, 0x17, 0xb8, 0xe1, 0x19, 0xd9, 0x4d, 0xc9, 0xa1, - 0x81, 0x23, 0x1a, 0x4b, 0x30, 0x9d, 0x9c, 0x6f, 0xdb, 0x43, 0x62, 0x12, 0x29, 0xa5, 0xdf, 0xe6, - 0x13, 0x68, 0x4a, 0x8a, 0x45, 0x1f, 0x3a, 0x1d, 0xdb, 0xa7, 0xc8, 0x25, 0x4a, 0x67, 0x9f, 0xd6, - 0x92, 0x68, 0x84, 0x2c, 0x4a, 0x32, 0x97, 0x60, 0xd1, 0x0a, 0x7d, 0xbf, 0xe3, 0x7a, 0xd2, 0x3d, - 0xfd, 0xcf, 0xe1, 0x06, 0x57, 0xc0, 0xab, 0x7b, 0x08, 0xf3, 0x01, 0x3a, 0xeb, 0xa2, 0xfe, 0x00, - 0x05, 0xc9, 0xe1, 0xf9, 0x8e, 0xcb, 0x86, 0x43, 0xa2, 0x9a, 0x9f, 0x40, 0xcb, 0x42, 0x78, 0xce, - 0xa2, 0x2d, 0xdb, 0xf3, 0x91, 0x3b, 0x9e, 0x33, 0xe2, 0xe6, 0x63, 0x67, 0x87, 0x09, 0x3b, 0xea, - 0x9b, 0x9f, 0xc1, 0x5d, 0xbd, 0xd4, 0x24, 0x1d, 0xdc, 0xc4, 0xfb, 0x5c, 0x2f, 0x42, 0xf1, 0x31, - 0x39, 0xcf, 0x0a, 0x0d, 0x5a, 0xc6, 0xbb, 0x16, 0x5f, 0xc6, 0x6f, 0x90, 0xb7, 0x60, 0x79, 0xcb, - 0x0b, 0x6c, 0xdf, 0xfb, 0x16, 0xbd, 0x1c, 0xf6, 0x23, 0x5b, 0x9c, 0x47, 0xf8, 0xa4, 0xa4, 0x14, - 0xf3, 0xe2, 0xbf, 0x81, 0x26, 0xee, 0x2c, 0x2f, 0xe8, 0x6b, 0xa4, 0x8d, 0x9f, 0x49, 0xb3, 0xf0, - 0xa1, 0x30, 0x0b, 0x45, 0x41, 0xcd, 0x5c, 0x34, 0xff, 0xb3, 0x02, 0x4b, 0x22, 0xd7, 0xd8, 0xb7, - 0x60, 0xdd, 0x31, 0xd9, 0x51, 0x99, 0x2f, 0x2a, 0xd2, 0xcd, 0xdf, 0x48, 0x33, 0x29, 0xbc, 0x5f, - 0x92, 0x70, 0xf5, 0xd0, 0x63, 0x5b, 0x7d, 0xcd, 0xca, 0x08, 0x24, 0x78, 0x62, 0xed, 0x26, 0x0c, - 0x74, 0x43, 0x15, 0x68, 0xc6, 0x27, 0xb0, 0x48, 0xa3, 0x6d, 0x17, 0xd7, 0xf6, 0xc6, 0x76, 0x4e, - 0x76, 0x06, 0x76, 0x9f, 0x5c, 0x56, 0xe0, 0xa1, 0xd1, 0x17, 0x9a, 0x31, 0x2c, 0xcb, 0x3d, 0xc6, - 0x0f, 0xf0, 0x21, 0x18, 0x91, 0xd2, 0x62, 0x16, 0x62, 0xde, 0x2f, 0x68, 0x62, 0xe6, 0x74, 0x35, - 0xf2, 0xe6, 0x3e, 0xdc, 0xc5, 0xd1, 0xe5, 0x06, 0xdd, 0xc1, 0xb7, 0x3c, 0x1f, 0x69, 0x4e, 0xd3, - 0x9a, 0x2b, 0x63, 0x3c, 0xdf, 0x9c, 0x30, 0x3c, 0xf1, 0xd2, 0x5b, 0x48, 0xf6, 0x65, 0x3a, 0x60, - 0xe6, 0x28, 0x14, 0x53, 0x6a, 0x97, 0x58, 0xd0, 0xc0, 0x06, 0xe9, 0x9e, 0x98, 0x71, 0x91, 0xa5, - 0x59, 0x18, 0xcf, 0x64, 0xcc, 0x75, 0x58, 0x78, 0x81, 0x12, 0x1b, 0x2f, 0xf0, 0x89, 0xfd, 0xe9, - 0x12, 0x2c, 0x66, 0x32, 0xfc, 0x4c, 0xfd, 0x09, 0x0d, 0xb5, 0x3c, 0x9f, 0xf4, 0x48, 0xc9, 0x05, - 0xfc, 0x2e, 0x34, 0x04, 0xe6, 0xb7, 0x4b, 0xef, 0xbc, 0x0f, 0x8d, 0x1d, 0x52, 0xb0, 0xe1, 0x87, - 0x31, 0x2a, 0x49, 0xf2, 0xe0, 0x20, 0x53, 0xe4, 0x9e, 0x64, 0xfd, 0xff, 0x5b, 0x05, 0x96, 0x36, - 0x6c, 0xe7, 0x98, 0x9d, 0x35, 0xbc, 0x53, 0x6e, 0xa5, 0xcc, 0x43, 0xd5, 0xa3, 0x2e, 0x70, 0xca, - 0xaa, 0x7a, 0xee, 0x78, 0x9c, 0xe9, 0x88, 0xd2, 0x71, 0x96, 0x32, 0x2e, 0x34, 0x81, 0x24, 0x64, - 0xda, 0xb0, 0x54, 0x18, 0xfa, 0xe4, 0x52, 0x07, 0x4b, 0x85, 0xa1, 0x6f, 0xec, 0x01, 0xa0, 0xf3, - 0xa1, 0x17, 0x51, 0xa1, 0x69, 0xd2, 0x25, 0x6b, 0xe2, 0x10, 0x2b, 0x36, 0x75, 0xc6, 0x02, 0xec, - 0x08, 0x90, 0x69, 0x30, 0x7f, 0x0d, 0x77, 0x4b, 0x05, 0x70, 0x17, 0x0c, 0x3c, 0xdf, 0xf7, 0xe8, - 0xc2, 0x9f, 0xb2, 0xd8, 0x17, 0x0e, 0xde, 0xbd, 0xd8, 0x42, 0xbe, 0x9d, 0x64, 0xe7, 0x77, 0x8e, - 0x62, 0xfe, 0x6b, 0x05, 0x1a, 0xa2, 0x76, 0x12, 0x75, 0x53, 0xa5, 0x2d, 0xb8, 0xfc, 0xe6, 0x22, - 0x41, 0xf1, 0x1e, 0x42, 0x2e, 0x72, 0x99, 0x66, 0x9e, 0x34, 0xe6, 0x20, 0x2a, 0x68, 0xe8, 0x9d, - 0x72, 0x50, 0x12, 0xe6, 0xc0, 0xd3, 0x30, 0xd5, 0x31, 0x45, 0x39, 0x38, 0xd2, 0x98, 0x83, 0xe9, - 0xa8, 0x71, 0x1c, 0x4c, 0xc7, 0x6d, 0x80, 0x63, 0x3b, 0x26, 0x4d, 0x46, 0x2e, 0xcb, 0x89, 0x72, - 0x14, 0xf3, 0x02, 0x6e, 0xb5, 0x5d, 0x57, 0x6c, 0x86, 0x1c, 0x01, 0x7a, 0x59, 0x80, 0x76, 0xbf, - 0x64, 0x30, 0x58, 0x04, 0x88, 0x25, 0xc8, 0xe1, 0x07, 0x33, 0x6c, 0xf9, 0x76, 0x3f, 0x26, 0x93, - 0xe3, 0xaa, 0xc5, 0x51, 0xcc, 0x0f, 0xe1, 0xb6, 0xa6, 0x6a, 0x7e, 0x72, 0xa6, 0x13, 0xad, 0x4a, - 0x27, 0x9a, 0xf9, 0x7b, 0x68, 0xbd, 0x08, 0x5d, 0xaf, 0x77, 0xf1, 0xff, 0x62, 0xef, 0x3d, 0xb8, - 0xab, 0xaf, 0x5d, 0x4c, 0xa3, 0xb5, 0x2c, 0x34, 0x08, 0x4f, 0x51, 0x81, 0x89, 0x72, 0xb3, 0xee, - 0xe1, 0x8d, 0x5a, 0x27, 0xc3, 0x2b, 0x3e, 0x83, 0x3b, 0xc4, 0x41, 0x0a, 0x2c, 0xa2, 0xbf, 0xbd, - 0x01, 0x33, 0xc3, 0x08, 0x9d, 0xee, 0xa4, 0xba, 0xd9, 0x97, 0xf1, 0x39, 0xb9, 0x40, 0x4d, 0xaf, - 0x55, 0x26, 0xed, 0x14, 0x26, 0x63, 0xfe, 0x9d, 0x32, 0xcd, 0x3b, 0x41, 0x12, 0x5d, 0xbc, 0x6d, - 0x6f, 0x7f, 0x06, 0xd3, 0x78, 0xe3, 0x8c, 0x99, 0x4d, 0x0f, 0x0a, 0x44, 0xb3, 0x65, 0x65, 0x51, - 0x19, 0xf3, 0x2f, 0xa0, 0xa5, 0xed, 0x0c, 0x7e, 0xf2, 0xb4, 0x61, 0x16, 0xf9, 0x08, 0xc7, 0x50, - 0x69, 0x70, 0x5e, 0x54, 0x47, 0xd6, 0x26, 0x6b, 0x2c, 0x66, 0x34, 0xe0, 0xd2, 0xb1, 0x1d, 0xbf, - 0x08, 0xa3, 0x74, 0xf9, 0xa7, 0x9f, 0xe6, 0xbf, 0x57, 0xc0, 0x20, 0x0a, 0x0e, 0xc2, 0xd0, 0xcf, - 0x3c, 0x63, 0x13, 0x66, 0xb1, 0x1f, 0x63, 0xd7, 0x0a, 0x24, 0xcb, 0x96, 0x7e, 0xe3, 0xf8, 0x20, - 0x3c, 0x0b, 0x50, 0xb4, 0x97, 0xa5, 0xe0, 0x32, 0xc2, 0x38, 0x07, 0xb7, 0x27, 0xe7, 0xe0, 0xd2, - 0x3c, 0xfc, 0x20, 0x74, 0x11, 0xf1, 0x95, 0xd3, 0x16, 0xf9, 0x8d, 0x43, 0x78, 0xdf, 0x1b, 0x78, - 0x09, 0x71, 0x93, 0x53, 0x16, 0xfd, 0x30, 0xde, 0x87, 0x77, 0x06, 0xf6, 0x79, 0xea, 0xa3, 0xc8, - 0x2a, 0xbf, 0x68, 0xcc, 0x10, 0x0e, 0xb5, 0xc0, 0xfc, 0x8f, 0x0a, 0x5c, 0x1f, 0x37, 0xe3, 0x47, - 0xf6, 0x5e, 0x0f, 0x61, 0x9e, 0x7c, 0xee, 0x9f, 0xa2, 0x88, 0x1a, 0x4a, 0x1d, 0x98, 0x44, 0x95, - 0xbd, 0x5c, 0xad, 0xd4, 0xcb, 0x4d, 0x2b, 0x5e, 0xce, 0xdc, 0x87, 0x46, 0xea, 0x4a, 0x70, 0x4b, - 0x84, 0x55, 0xf1, 0xb1, 0x30, 0x45, 0xef, 0xa8, 0x73, 0x40, 0x18, 0x42, 0x3a, 0x3b, 0x49, 0x2a, - 0x51, 0x50, 0xc8, 0x2f, 0x45, 0x0b, 0x96, 0x39, 0x47, 0xf0, 0xe3, 0x54, 0x78, 0x1b, 0x56, 0x14, - 0x9d, 0x7c, 0x9d, 0x7f, 0x02, 0xcb, 0x9c, 0x8f, 0x50, 0xea, 0x14, 0x27, 0x5e, 0x95, 0x9f, 0x78, - 0x34, 0x39, 0x21, 0x89, 0xf2, 0xaa, 0x7f, 0x0e, 0xcd, 0xf1, 0x62, 0xc2, 0xa5, 0xb1, 0x7c, 0x33, - 0x88, 0xdd, 0xc8, 0x81, 0xa8, 0x5d, 0xa0, 0xe1, 0x10, 0x54, 0xd6, 0xc0, 0xaf, 0xc4, 0xa7, 0x70, - 0x09, 0x05, 0x49, 0x94, 0xe5, 0xdc, 0x5b, 0xfa, 0x3e, 0xe1, 0xd6, 0x60, 0x2a, 0x50, 0xb0, 0x04, - 0xff, 0xc8, 0xcf, 0x5d, 0xce, 0x25, 0xfd, 0x90, 0xee, 0x37, 0x3e, 0x15, 0xbd, 0x51, 0x8e, 0x81, - 0xaa, 0x23, 0x7a, 0x0c, 0xcb, 0x2c, 0xae, 0xdb, 0xf5, 0x82, 0x93, 0x09, 0x02, 0xc1, 0x03, 0x02, - 0xb3, 0x10, 0x05, 0xde, 0x2e, 0x18, 0xfc, 0x98, 0xdc, 0x3b, 0x6d, 0x84, 0x41, 0x82, 0x82, 0xa4, - 0x3b, 0x1a, 0x0c, 0xec, 0xa8, 0x1c, 0xb9, 0xf1, 0x1b, 0x72, 0xa3, 0x24, 0x0b, 0x49, 0x83, 0x16, - 0x53, 0x3a, 0xeb, 0x49, 0xa9, 0x4f, 0x04, 0x51, 0x36, 0x68, 0x4c, 0xc0, 0x7c, 0x0c, 0x37, 0xb7, - 0x51, 0xf2, 0xe5, 0x28, 0x4c, 0xec, 0x97, 0xb1, 0x7c, 0x50, 0xd5, 0x99, 0x73, 0x00, 0x4d, 0x49, - 0x80, 0x37, 0x65, 0x1d, 0xa6, 0x47, 0x98, 0xca, 0x0c, 0x59, 0x11, 0x0c, 0xc9, 0x84, 0xd8, 0xc0, - 0x10, 0x56, 0xf3, 0x5f, 0x2a, 0x04, 0x1a, 0x41, 0x4a, 0x4b, 0x0f, 0x25, 0xf8, 0x1c, 0x9e, 0x1e, - 0xf8, 0x89, 0x04, 0x3b, 0xbf, 0x49, 0x54, 0xec, 0x5c, 0xd9, 0xcd, 0x11, 0xc7, 0x4a, 0x4f, 0x72, - 0x6a, 0x81, 0xf1, 0x05, 0x5c, 0x66, 0xc4, 0xc3, 0x8b, 0x21, 0xf5, 0xdd, 0xf3, 0xeb, 0xb7, 0x74, - 0x77, 0x53, 0xd9, 0xdd, 0x19, 0x2f, 0xc1, 0x90, 0x17, 0xac, 0x09, 0xfc, 0x8a, 0xfd, 0xcb, 0x0a, - 0xbc, 0xb3, 0x15, 0x5f, 0x04, 0x4e, 0x39, 0xa6, 0x91, 0x5e, 0x36, 0xb3, 0xab, 0x67, 0xf6, 0x65, - 0xbc, 0x0f, 0xd7, 0x7c, 0x3b, 0x66, 0xe0, 0xc5, 0x14, 0xdc, 0x58, 0x59, 0x35, 0x9e, 0x56, 0x3f, - 0xf8, 0xc8, 0x92, 0x8b, 0x8a, 0xf2, 0xaf, 0x0b, 0x60, 0x30, 0x3b, 0x78, 0xf3, 0x0e, 0x49, 0xd7, - 0xe3, 0x23, 0x6d, 0x59, 0xae, 0x6a, 0x01, 0xa6, 0x07, 0x49, 0x76, 0x5e, 0xa6, 0x1f, 0x98, 0x6a, - 0x27, 0xd9, 0x21, 0x99, 0x7e, 0xb0, 0xde, 0x60, 0x5a, 0xf9, 0xea, 0xfe, 0xa9, 0x02, 0x37, 0x29, - 0x6c, 0xae, 0x7b, 0x31, 0xf0, 0xbd, 0xe0, 0x44, 0x0e, 0x8a, 0x12, 0x3b, 0xea, 0xa3, 0x34, 0xa7, - 0xc1, 0xbe, 0xf0, 0x3c, 0xc0, 0xbc, 0xac, 0x67, 0xc8, 0x6f, 0xe3, 0x09, 0x49, 0x12, 0x1d, 0xa0, - 0x68, 0x40, 0xaa, 0x2e, 0xcf, 0x9d, 0xa5, 0xec, 0x4a, 0xf2, 0xac, 0xa6, 0x49, 0x9e, 0xad, 0x40, - 0x53, 0x32, 0x93, 0x6f, 0x05, 0x5d, 0x33, 0xd8, 0x29, 0x1c, 0x12, 0x03, 0x4b, 0xd7, 0xcc, 0xe7, - 0x64, 0xcd, 0xf0, 0x02, 0xfc, 0x9a, 0xb9, 0x0d, 0x40, 0x1b, 0x7a, 0x40, 0xe5, 0x2a, 0x24, 0x11, - 0x33, 0xa6, 0x98, 0xbf, 0x03, 0xf3, 0xe5, 0xd0, 0xb5, 0x13, 0x7a, 0x50, 0xde, 0x0a, 0xa3, 0x03, - 0x6f, 0x88, 0x7c, 0x2f, 0x10, 0xd7, 0xea, 0x4f, 0x45, 0xc0, 0x40, 0x69, 0xc2, 0x9e, 0x81, 0xfa, - 0xca, 0xd2, 0x1e, 0xbf, 0x86, 0x7b, 0x79, 0x95, 0xbf, 0x3d, 0x5c, 0xe1, 0x6f, 0xab, 0xd0, 0xa4, - 0xda, 0xb5, 0x4d, 0x2a, 0xc9, 0xfd, 0x18, 0x9f, 0xc1, 0x6c, 0xe8, 0x53, 0xb5, 0x93, 0x42, 0x53, - 0xc6, 0x02, 0x58, 0x38, 0x40, 0x67, 0x54, 0x78, 0x6a, 0x42, 0xe1, 0x54, 0xc0, 0x78, 0x42, 0x84, - 0x79, 0x54, 0xd0, 0x8a, 0xfe, 0x3a, 0x7c, 0x33, 0x93, 0x1c, 0x83, 0x4d, 0x98, 0xdf, 0xd8, 0xd9, - 0x8c, 0x1b, 0xd3, 0x04, 0x3c, 0xc0, 0x51, 0xcc, 0x5b, 0xb0, 0x2c, 0xf7, 0x88, 0x18, 0x00, 0xb4, - 0xba, 0x28, 0x79, 0x66, 0xfb, 0x76, 0xe0, 0xa0, 0xe8, 0x99, 0x1d, 0xb8, 0x67, 0x9e, 0x9b, 0x1c, - 0x0b, 0xdd, 0xb6, 0x02, 0x73, 0x6f, 0xd2, 0x02, 0x16, 0x0f, 0x66, 0x04, 0x7c, 0x82, 0xd1, 0x6b, - 0xe0, 0xab, 0x31, 0xa1, 0xc5, 0x2e, 0xd9, 0x3b, 0x63, 0x4c, 0xf7, 0x73, 0x24, 0xec, 0x55, 0xe6, - 0x29, 0xb9, 0xfe, 0xd6, 0xf0, 0xf0, 0xf3, 0xe2, 0x4b, 0x78, 0xc7, 0x95, 0x39, 0xd8, 0x96, 0x79, - 0x4f, 0xe9, 0x31, 0x81, 0x8b, 0x76, 0x9c, 0x2a, 0x6d, 0xba, 0xe3, 0xb5, 0x99, 0x66, 0x3c, 0x7f, - 0x40, 0x76, 0x34, 0xfd, 0xe6, 0x22, 0x7c, 0x81, 0x66, 0xb6, 0x61, 0x59, 0xae, 0x85, 0x6f, 0x17, - 0xa7, 0xe2, 0x20, 0x5b, 0xed, 0x02, 0xcd, 0xfc, 0xfb, 0x0a, 0x34, 0x29, 0x28, 0xe1, 0x07, 0x5b, - 0xba, 0x0a, 0xd7, 0xd2, 0xef, 0x7d, 0xdf, 0xe5, 0x96, 0xa8, 0x4c, 0xe6, 0x39, 0xf7, 0xd0, 0x19, - 0x07, 0x10, 0x91, 0xc9, 0x78, 0x86, 0xc9, 0x56, 0xf1, 0x43, 0xff, 0x05, 0xdc, 0x6c, 0xfb, 0x7e, - 0x78, 0xf6, 0x43, 0x6d, 0xc6, 0xbe, 0x53, 0x52, 0xc0, 0xab, 0x7f, 0x06, 0x2b, 0x9b, 0x5e, 0x6c, - 0xbf, 0x55, 0x0d, 0x77, 0xe0, 0x96, 0xaa, 0x83, 0xaf, 0xc4, 0x85, 0x26, 0xc5, 0x6b, 0xfc, 0x88, - 0x53, 0xa4, 0xaa, 0x4c, 0x91, 0x5b, 0xb0, 0x2c, 0xd7, 0xc2, 0x1b, 0x71, 0x02, 0x4b, 0x1b, 0xc7, - 0xc8, 0x39, 0x69, 0x3b, 0x0e, 0x8a, 0xcb, 0x6f, 0x5b, 0x3f, 0x67, 0xe7, 0xc6, 0x2a, 0xb9, 0x2d, - 0x5f, 0x15, 0x16, 0x47, 0xdb, 0xe1, 0xa2, 0xe6, 0xb5, 0xad, 0x98, 0xbf, 0x2f, 0x27, 0x52, 0x66, - 0x13, 0x1a, 0x42, 0x65, 0xbc, 0x21, 0xf7, 0xc8, 0x42, 0xdd, 0x18, 0x45, 0x78, 0x6b, 0xeb, 0xb8, - 0x5e, 0xb2, 0x1b, 0xf6, 0x0f, 0xcf, 0x3d, 0xe1, 0x2e, 0xd2, 0x7c, 0x42, 0x72, 0x51, 0x3a, 0x26, - 0x7e, 0xda, 0x1b, 0x50, 0x4b, 0xce, 0xc7, 0x17, 0x22, 0xe4, 0x37, 0x43, 0x23, 0x93, 0x94, 0xc9, - 0x56, 0x14, 0x0e, 0x64, 0xcd, 0x5a, 0x99, 0x6f, 0x48, 0x20, 0x2c, 0xc9, 0x88, 0x18, 0x10, 0x40, - 0xa7, 0xf8, 0x60, 0xcf, 0x50, 0x16, 0x6a, 0x30, 0xd9, 0x19, 0x17, 0xa7, 0x77, 0x89, 0x63, 0xc2, - 0xa3, 0xaf, 0xe1, 0xda, 0xc6, 0xf8, 0xb5, 0x01, 0x55, 0x08, 0x30, 0xb3, 0x61, 0x75, 0xda, 0x87, - 0x9d, 0x7a, 0xc5, 0xb8, 0x0a, 0x73, 0xfb, 0x5f, 0x75, 0xac, 0x57, 0xd6, 0xce, 0x61, 0xa7, 0x5e, - 0xc5, 0x45, 0xed, 0x83, 0x83, 0xce, 0xde, 0x66, 0xbd, 0x66, 0xd4, 0xe1, 0xca, 0x6e, 0xfb, 0xf5, - 0xd7, 0x47, 0x07, 0x1d, 0xab, 0xbb, 0xd3, 0x3d, 0xac, 0xd7, 0x31, 0xf3, 0x5e, 0xe7, 0xd5, 0xd1, - 0xb3, 0xdd, 0xfd, 0x8d, 0xe7, 0xf5, 0xd6, 0xa3, 0xe7, 0xb0, 0x94, 0x93, 0x4c, 0x35, 0x2e, 0xc1, - 0x54, 0x7b, 0x77, 0xb7, 0x5e, 0x31, 0x66, 0xa1, 0xb6, 0xbb, 0xf3, 0x15, 0x56, 0x3d, 0x0b, 0xb5, - 0xcd, 0x4e, 0x7b, 0xb3, 0x3e, 0x65, 0x5c, 0x87, 0x6b, 0x9b, 0x9d, 0x8d, 0xfd, 0x17, 0x2f, 0x76, - 0xba, 0xdd, 0x9d, 0xfd, 0xbd, 0x9d, 0xbd, 0xed, 0x7a, 0xed, 0xd1, 0x31, 0x5c, 0xd7, 0xa4, 0xe3, - 0x0c, 0x03, 0xe6, 0xbb, 0xed, 0xad, 0xce, 0x8b, 0xfd, 0xcd, 0xce, 0xd1, 0x6e, 0xa7, 0xfd, 0x15, - 0xb6, 0x99, 0xa7, 0x75, 0xf6, 0x0e, 0x3b, 0x56, 0xbd, 0x8a, 0x8d, 0x1d, 0xd3, 0xb6, 0x3b, 0x87, - 0xf5, 0x29, 0x63, 0x09, 0xae, 0x8f, 0x29, 0x5b, 0xfb, 0xd6, 0x46, 0xe7, 0xa8, 0xf3, 0xab, 0x9d, - 0xc3, 0x7a, 0xed, 0xd1, 0x17, 0x70, 0x33, 0x37, 0xe5, 0x62, 0xcc, 0xc1, 0xf4, 0x97, 0x2f, 0x3b, - 0xd6, 0xd7, 0xf5, 0x0a, 0xfe, 0xd9, 0x3d, 0x6c, 0x5b, 0x87, 0xf5, 0xaa, 0x71, 0x05, 0x66, 0xb7, - 0x76, 0xf6, 0xda, 0xbb, 0x3b, 0xaf, 0x3b, 0xf5, 0xa9, 0x47, 0xcb, 0x30, 0xbf, 0x91, 0xde, 0xaf, - 0x8d, 0xa5, 0x48, 0x15, 0xf5, 0xca, 0xfa, 0x7f, 0x6f, 0xc1, 0x0d, 0xfd, 0xdb, 0x22, 0xc3, 0x87, - 0x77, 0xfa, 0xf2, 0xdb, 0x1b, 0xe3, 0x91, 0x30, 0x92, 0x85, 0xcf, 0x7f, 0x9a, 0x3f, 0x29, 0xe3, - 0xe5, 0xa7, 0x0d, 0xad, 0x4d, 0x7c, 0x23, 0xa3, 0xd6, 0x96, 0xff, 0x50, 0x47, 0xad, 0xad, 0xe8, - 0xcd, 0xce, 0x73, 0x98, 0xa1, 0x51, 0xa3, 0x21, 0x1d, 0x5d, 0x95, 0xb7, 0x37, 0xcd, 0x96, 0x96, - 0x41, 0x52, 0x66, 0x93, 0xa7, 0x1a, 0x92, 0x32, 0xf5, 0x71, 0x88, 0xa4, 0x4c, 0xf7, 0xc0, 0xc3, - 0x81, 0xf9, 0x58, 0x78, 0x22, 0x61, 0x88, 0xd9, 0xee, 0xfc, 0x17, 0x19, 0xcd, 0xd5, 0x42, 0x46, - 0xbe, 0x12, 0x0f, 0xea, 0xf2, 0x3b, 0x05, 0xe3, 0x3d, 0x59, 0x3a, 0xf7, 0x95, 0x42, 0xf3, 0x51, - 0x09, 0x2b, 0x5f, 0x55, 0x08, 0xc6, 0x48, 0x79, 0x14, 0x61, 0x88, 0x83, 0x55, 0xfc, 0xd2, 0xa2, - 0xf9, 0x7e, 0x29, 0xb3, 0xd4, 0xb6, 0x7e, 0x71, 0xdb, 0xb6, 0x27, 0x6f, 0xdb, 0x76, 0x59, 0xdb, - 0xfa, 0xca, 0x53, 0x08, 0xe3, 0x27, 0x45, 0x1a, 0xa4, 0xd7, 0x14, 0x52, 0xdb, 0xca, 0x1e, 0x56, - 0xfc, 0x16, 0xae, 0xc6, 0xfc, 0x3b, 0x05, 0xe3, 0xa1, 0x3c, 0x12, 0xfa, 0x27, 0x13, 0xcd, 0x77, - 0x8b, 0xf8, 0xc4, 0xa8, 0x6f, 0x36, 0x66, 0xef, 0x10, 0x8c, 0xbb, 0xb2, 0x90, 0xf2, 0x04, 0xa2, - 0x69, 0xe6, 0xb0, 0xf0, 0x2a, 0xbf, 0x81, 0x2b, 0x36, 0xf7, 0x60, 0xc0, 0x10, 0x2f, 0x88, 0xf3, - 0xde, 0x36, 0x34, 0x1f, 0x16, 0xb0, 0x49, 0x16, 0xdb, 0x0c, 0x67, 0x2f, 0x59, 0xac, 0x7b, 0x88, - 0x20, 0x59, 0xac, 0x47, 0xe8, 0x9f, 0xc3, 0x62, 0x5f, 0x87, 0xf3, 0x36, 0xd6, 0xe4, 0xd1, 0x2a, - 0x86, 0xe3, 0x37, 0x1f, 0x4f, 0xc2, 0x2f, 0x35, 0xc6, 0x61, 0x50, 0x6b, 0xa9, 0x31, 0x3a, 0x6c, - 0xb8, 0xd4, 0x18, 0x3d, 0x48, 0xbb, 0x07, 0xd7, 0x22, 0x11, 0x35, 0x6d, 0x88, 0x8e, 0xa2, 0x00, - 0xaa, 0xdd, 0x7c, 0xaf, 0x98, 0x53, 0x76, 0xa9, 0x04, 0x41, 0x2d, 0xbb, 0x54, 0x05, 0x92, 0xdd, - 0x6c, 0x69, 0x19, 0xa4, 0x7e, 0x48, 0x18, 0x1e, 0x5a, 0xea, 0x07, 0x1d, 0x1c, 0x5b, 0xea, 0x07, - 0x3d, 0x92, 0xfa, 0x39, 0xcc, 0x44, 0x24, 0x78, 0x96, 0xec, 0x53, 0x61, 0xd4, 0x92, 0x7d, 0x3a, - 0xb4, 0xf4, 0x1e, 0x5c, 0xa2, 0xca, 0xd6, 0x0d, 0x1d, 0xb3, 0x80, 0x95, 0x6e, 0xde, 0xd5, 0x73, - 0x48, 0xc6, 0x11, 0x18, 0x9c, 0x6c, 0x9c, 0x8a, 0x80, 0x96, 0x8c, 0xd3, 0x81, 0x9b, 0x9f, 0xc3, - 0xcc, 0x80, 0xe0, 0x87, 0x25, 0x65, 0x2a, 0xb0, 0x59, 0x52, 0xa6, 0x43, 0x1d, 0xbf, 0x02, 0xe8, - 0x8f, 0xd1, 0xbe, 0xc6, 0x3d, 0x79, 0x42, 0x6b, 0xd0, 0xb9, 0xcd, 0xfb, 0xb9, 0x4c, 0x92, 0xe2, - 0x68, 0x0c, 0xff, 0x94, 0x14, 0xeb, 0x11, 0xa5, 0x92, 0xe2, 0x1c, 0xf0, 0x28, 0xf6, 0x37, 0x11, - 0x07, 0xf7, 0x94, 0xfc, 0x4d, 0x1e, 0xae, 0x54, 0xf2, 0x37, 0xf9, 0x80, 0xd1, 0x5f, 0x91, 0x0e, - 0x61, 0xe0, 0x4f, 0x43, 0x69, 0xab, 0x0e, 0x31, 0xda, 0xcc, 0xe1, 0xd2, 0x86, 0x40, 0x62, 0x8c, - 0xaa, 0x86, 0x40, 0xf9, 0xd8, 0x47, 0x35, 0x04, 0x2a, 0x02, 0x33, 0xfe, 0xa1, 0x02, 0x8d, 0x7e, - 0x0e, 0x1c, 0xcf, 0xf8, 0x28, 0x4f, 0x53, 0x2e, 0x88, 0xb1, 0xb9, 0x3e, 0xa1, 0x88, 0xea, 0x68, - 0x55, 0x18, 0x9d, 0xea, 0x68, 0x8b, 0xb1, 0x7a, 0xaa, 0xa3, 0x2d, 0x83, 0xe6, 0xbd, 0x86, 0xcb, - 0x71, 0x06, 0x87, 0x93, 0x86, 0x31, 0x07, 0x9e, 0xd7, 0x7c, 0x90, 0xcf, 0x25, 0xef, 0xd2, 0x3c, - 0xba, 0x4d, 0xde, 0xa5, 0xf3, 0x20, 0x75, 0xf2, 0x2e, 0x9d, 0x8f, 0x90, 0x3b, 0x84, 0xb9, 0x28, - 0x05, 0xbb, 0x19, 0xa6, 0x82, 0x2f, 0x52, 0xd0, 0x71, 0xcd, 0x7b, 0x79, 0x3c, 0xbc, 0xd6, 0x11, - 0x2c, 0x44, 0x1a, 0x28, 0x9b, 0xf1, 0x81, 0xb4, 0x32, 0x8a, 0x31, 0x72, 0xcd, 0xb5, 0x09, 0xd8, - 0x95, 0xf5, 0x9a, 0x01, 0xdd, 0x94, 0xf5, 0xaa, 0xc7, 0xc7, 0x29, 0xeb, 0x35, 0x07, 0x2a, 0x87, - 0xf7, 0xbf, 0x9e, 0x88, 0x85, 0x93, 0xf6, 0xbf, 0x02, 0x20, 0x9d, 0xb4, 0xff, 0x15, 0x61, 0xea, - 0x70, 0xe0, 0x2e, 0x42, 0xb8, 0xa4, 0xc0, 0x3d, 0x1f, 0x70, 0x27, 0x05, 0xee, 0x45, 0x38, 0xb3, - 0x73, 0x58, 0xf4, 0x75, 0x00, 0x2e, 0x69, 0xc1, 0x94, 0xa2, 0xc6, 0xa4, 0x05, 0x33, 0x01, 0x28, - 0xec, 0x4b, 0x98, 0x1d, 0x30, 0x84, 0x96, 0xb4, 0x23, 0xeb, 0xc0, 0x5e, 0xd2, 0x8e, 0xac, 0xc5, - 0x76, 0xe1, 0x35, 0xd8, 0xcf, 0xe0, 0x5a, 0x1a, 0x57, 0xaa, 0x41, 0x7d, 0x49, 0x6b, 0x30, 0x17, - 0xee, 0xe5, 0xc3, 0x3b, 0xb6, 0x0c, 0x7b, 0x91, 0x7c, 0x69, 0x21, 0x22, 0x47, 0xf2, 0xa5, 0x25, - 0x10, 0x9a, 0x11, 0x2c, 0x0c, 0x34, 0xa0, 0x15, 0x69, 0xe5, 0x94, 0xa1, 0x6a, 0xa4, 0x95, 0x53, - 0x0a, 0x83, 0xa1, 0x0b, 0x56, 0x85, 0xb4, 0x28, 0x0b, 0xb6, 0x18, 0x29, 0xa3, 0x2c, 0xd8, 0x12, - 0x90, 0x8c, 0x11, 0xc1, 0x75, 0x5f, 0xc5, 0x85, 0x18, 0xef, 0xab, 0x53, 0x2a, 0x1f, 0x46, 0xd3, - 0xfc, 0xa0, 0x9c, 0x5b, 0x3e, 0x44, 0x70, 0x50, 0x01, 0xf9, 0x10, 0x91, 0x03, 0x4b, 0x90, 0x0f, - 0x11, 0x79, 0x60, 0x03, 0xec, 0x24, 0x06, 0x22, 0x30, 0x40, 0x72, 0x12, 0x05, 0x50, 0x04, 0xc9, - 0x49, 0x14, 0x01, 0x0c, 0x68, 0x30, 0x2e, 0xa0, 0x04, 0x94, 0x60, 0x3c, 0x17, 0x7e, 0xa0, 0x04, - 0xe3, 0xf9, 0x68, 0x03, 0xec, 0x8c, 0x7c, 0x01, 0x2b, 0x20, 0x39, 0xa3, 0x7c, 0x28, 0x82, 0xe4, - 0x8c, 0x8a, 0x10, 0x07, 0x3d, 0xb8, 0xd6, 0x17, 0xb3, 0xec, 0x52, 0x63, 0x0a, 0x92, 0xf6, 0xcd, - 0xf7, 0x8a, 0x39, 0xd5, 0xb8, 0x48, 0xcc, 0x85, 0xab, 0x71, 0x51, 0x7e, 0x6e, 0x5e, 0x8d, 0x8b, - 0x8a, 0x52, 0xf2, 0xf4, 0x04, 0x4c, 0x93, 0xcb, 0xca, 0x09, 0x58, 0xc9, 0x74, 0xab, 0x27, 0x60, - 0x35, 0x93, 0x6c, 0xfc, 0x02, 0xa6, 0x7b, 0xf1, 0x45, 0xe0, 0x18, 0x72, 0xa6, 0x53, 0x4a, 0x2e, - 0x37, 0xef, 0xe8, 0xca, 0x55, 0xe3, 0x48, 0x7a, 0x56, 0x35, 0x4e, 0xc9, 0x05, 0xab, 0xc6, 0xa9, - 0x89, 0x5d, 0x1c, 0xad, 0x38, 0x7c, 0xc2, 0x54, 0x8a, 0x56, 0x72, 0x73, 0xbe, 0xcd, 0x77, 0x8b, - 0xf8, 0xa4, 0x1a, 0xfa, 0x7c, 0x0e, 0x55, 0xaa, 0x21, 0x37, 0x21, 0x2b, 0xd5, 0x50, 0x90, 0x87, - 0xfd, 0x1d, 0xdc, 0x18, 0x69, 0x53, 0x9d, 0x86, 0xb8, 0xcf, 0x95, 0x27, 0x63, 0x9b, 0x1f, 0x4e, - 0x24, 0x20, 0xad, 0xb5, 0x91, 0x90, 0xf7, 0x93, 0xd6, 0x5a, 0x7e, 0x9a, 0x54, 0x5a, 0x6b, 0x05, - 0xd9, 0x43, 0x23, 0x26, 0x57, 0x4d, 0xf8, 0xb4, 0xd7, 0x27, 0xf7, 0x79, 0x87, 0xe1, 0x09, 0x0a, - 0x32, 0x47, 0xef, 0x84, 0x83, 0x41, 0x18, 0x90, 0xa8, 0x5b, 0x64, 0xd1, 0x3b, 0xfa, 0x02, 0x76, - 0xbe, 0xd2, 0x0b, 0xbc, 0xbf, 0x04, 0xe8, 0x4c, 0xae, 0xf6, 0xb1, 0xa4, 0xc7, 0xd2, 0x30, 0xe9, - 0x3b, 0xb5, 0x50, 0x80, 0xaf, 0xfa, 0xf7, 0xb0, 0xe8, 0xd8, 0x81, 0x83, 0x7c, 0xb9, 0x6e, 0x59, - 0xd5, 0x86, 0x8e, 0x4b, 0xa8, 0xfc, 0xa3, 0xc9, 0x24, 0xa4, 0x8d, 0x35, 0xd6, 0x64, 0x5a, 0xa5, - 0x8d, 0xb5, 0x2c, 0x9d, 0x2b, 0x6d, 0xac, 0xa5, 0xb9, 0x5b, 0x5c, 0x6d, 0x5f, 0x93, 0x97, 0x95, - 0xaa, 0x2d, 0x4b, 0xef, 0x36, 0xd7, 0x26, 0x60, 0x97, 0x26, 0xb0, 0x23, 0x24, 0x4c, 0x0d, 0xed, - 0xd2, 0xd6, 0x24, 0xe4, 0xa4, 0x09, 0x5c, 0x94, 0x76, 0xc5, 0xe1, 0xb1, 0x90, 0xbb, 0x94, 0xc3, - 0xe3, 0xdc, 0x74, 0xab, 0x1c, 0x1e, 0xe7, 0x67, 0x40, 0xb1, 0xa7, 0x11, 0x72, 0x8b, 0x92, 0xa7, - 0xc9, 0xcd, 0x8e, 0x4a, 0x9e, 0x26, 0x3f, 0x09, 0x6a, 0x78, 0x50, 0x77, 0xa5, 0x04, 0xa6, 0x74, - 0xbb, 0x5c, 0x94, 0x23, 0x95, 0x6e, 0x97, 0x0b, 0x53, 0xa1, 0xc6, 0x5f, 0x55, 0xa0, 0xd9, 0xcf, - 0x7d, 0xe4, 0x6c, 0x7c, 0xac, 0xdc, 0x1c, 0x97, 0x3f, 0x9c, 0x6e, 0x7e, 0x32, 0xb1, 0x90, 0x34, - 0x76, 0xae, 0x90, 0x2e, 0x95, 0xc6, 0x2e, 0x3f, 0x63, 0x2b, 0x8d, 0x5d, 0x41, 0xd2, 0x95, 0xdd, - 0x05, 0xa8, 0x4f, 0x8e, 0xd5, 0xbb, 0x80, 0xe2, 0x57, 0xd6, 0xea, 0x5d, 0x40, 0xd9, 0x43, 0xe8, - 0x6f, 0xe0, 0x8a, 0xc7, 0x3d, 0xdd, 0x90, 0x62, 0xcb, 0xbc, 0x37, 0x20, 0x52, 0x6c, 0x99, 0xff, - 0xf8, 0xc3, 0x83, 0x3a, 0x8d, 0x2d, 0x59, 0x2e, 0xd8, 0x43, 0xb1, 0xa1, 0x0b, 0x19, 0xb3, 0xe2, - 0x82, 0x29, 0xa3, 0xb2, 0x4a, 0x55, 0xd1, 0xf0, 0x32, 0xb7, 0x2a, 0x4b, 0x2a, 0x2e, 0xa8, 0x4a, - 0x65, 0xd5, 0x56, 0xc5, 0x12, 0x6c, 0x6d, 0xc7, 0xd7, 0x56, 0x95, 0x15, 0x97, 0x56, 0xc5, 0xb3, - 0xca, 0xb7, 0x1d, 0xa9, 0x2d, 0xf2, 0x6d, 0x47, 0x4a, 0x2f, 0xba, 0xed, 0xc8, 0x78, 0xa4, 0x5b, - 0xd2, 0x18, 0x11, 0xb3, 0xef, 0xc8, 0x6e, 0x5a, 0xd6, 0xd7, 0xd2, 0x32, 0x48, 0x53, 0xa8, 0x4f, - 0xc8, 0xf4, 0xf2, 0xcf, 0x50, 0x4e, 0xa9, 0xe3, 0xa2, 0x82, 0x29, 0x24, 0xb2, 0xa9, 0x61, 0xdf, - 0xaf, 0xda, 0x49, 0xa2, 0xc9, 0xca, 0x10, 0x72, 0x71, 0xd8, 0xc7, 0x58, 0xa4, 0x4e, 0xed, 0xb3, - 0x02, 0xf9, 0x0a, 0x69, 0x3b, 0xa5, 0x17, 0x74, 0x2a, 0xc7, 0x23, 0x5d, 0xea, 0xe2, 0x73, 0x07, - 0x53, 0x7b, 0x4f, 0x39, 0x4a, 0x68, 0xf4, 0xde, 0xcf, 0x65, 0x92, 0xee, 0x0a, 0xe8, 0x1c, 0xa0, - 0x9d, 0x70, 0x5f, 0x33, 0xc2, 0x6a, 0x3f, 0x3c, 0xc8, 0xe7, 0x92, 0x74, 0x3b, 0x19, 0x02, 0x43, - 0xd2, 0x9d, 0x03, 0x04, 0x91, 0x74, 0xe7, 0x21, 0x38, 0xf0, 0x96, 0x4e, 0xf7, 0xd6, 0x6c, 0xff, - 0x7d, 0x1d, 0x06, 0xf2, 0x11, 0x7d, 0x43, 0xc3, 0x52, 0xb0, 0xa5, 0xeb, 0xd9, 0x35, 0x47, 0x74, - 0x91, 0x45, 0x77, 0x44, 0x97, 0x38, 0x4a, 0x8e, 0xe8, 0x0a, 0xb7, 0xba, 0x06, 0x3a, 0xaf, 0x71, - 0xa0, 0x6c, 0x27, 0xc7, 0xea, 0x1a, 0x18, 0x17, 0x15, 0xaf, 0x01, 0x8e, 0x4d, 0x8a, 0xf1, 0x63, - 0x94, 0x74, 0xe8, 0xff, 0x21, 0xdd, 0x20, 0xff, 0x87, 0x94, 0x65, 0x77, 0x1f, 0xcb, 0xd3, 0x5d, - 0xc3, 0x54, 0x10, 0xe3, 0xe7, 0x09, 0xa8, 0x9b, 0x93, 0x8a, 0xb1, 0x51, 0x37, 0xa7, 0x62, 0xb0, - 0x8e, 0xba, 0x39, 0x95, 0xe1, 0x76, 0x68, 0x3a, 0x5b, 0xc0, 0xdb, 0xa8, 0xe9, 0xec, 0x5c, 0x08, - 0x8f, 0x9a, 0xce, 0x2e, 0x40, 0xee, 0xb0, 0x8c, 0x80, 0xda, 0x19, 0x78, 0x1b, 0x51, 0x32, 0x02, - 0x5a, 0xb6, 0xe2, 0x8c, 0x40, 0x8e, 0x88, 0x34, 0xca, 0xfd, 0x49, 0x46, 0x79, 0xfb, 0xfb, 0x8e, - 0xf2, 0xf6, 0x04, 0xa3, 0x4c, 0x0f, 0xaa, 0x19, 0xd6, 0x5d, 0x3d, 0xa8, 0xea, 0xd1, 0xf6, 0xea, - 0x41, 0x35, 0x07, 0x64, 0xff, 0xec, 0x39, 0x3c, 0x08, 0xa3, 0xfe, 0x9a, 0x3d, 0xb4, 0x9d, 0x63, - 0x24, 0x08, 0x0d, 0x85, 0x7f, 0xe9, 0xfb, 0x2c, 0xe7, 0x1f, 0xfe, 0x92, 0xbf, 0xf1, 0x77, 0x95, - 0xca, 0x3f, 0x56, 0x2a, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x97, 0x20, 0x5c, 0x2c, 0x15, 0x58, - 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.proto deleted file mode 100644 index 30732efc21d..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ClientNamenodeProtocol.proto +++ /dev/null @@ -1,902 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "ClientNamenodeProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "Security.proto"; -import "hdfs.proto"; -import "acl.proto"; -import "xattr.proto"; -import "encryption.proto"; -import "inotify.proto"; -import "erasurecoding.proto"; - -/** - * The ClientNamenodeProtocol Service defines the interface between a client - * (as runnign inside a MR Task) and the Namenode. - * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc - * for each of the methods. - * The exceptions declared in the above class also apply to this protocol. - * Exceptions are unwrapped and thrown by the PB libraries. - */ - -message GetBlockLocationsRequestProto { - required string src = 1; // file name - required uint64 offset = 2; // range start offset - required uint64 length = 3; // range length -} - -message GetBlockLocationsResponseProto { - optional LocatedBlocksProto locations = 1; -} - -message GetServerDefaultsRequestProto { // No parameters -} - -message GetServerDefaultsResponseProto { - required FsServerDefaultsProto serverDefaults = 1; -} - -enum CreateFlagProto { - CREATE = 0x01; // Create a file - OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC - APPEND = 0x04; // Append to a file - LAZY_PERSIST = 0x10; // File with reduced durability guarantees. - NEW_BLOCK = 0x20; // Write data to a new block when appending -} - -message CreateRequestProto { - required string src = 1; - required FsPermissionProto masked = 2; - required string clientName = 3; - required uint32 createFlag = 4; // bits set using CreateFlag - required bool createParent = 5; - required uint32 replication = 6; // Short: Only 16 bits used - required uint64 blockSize = 7; - repeated CryptoProtocolVersionProto cryptoProtocolVersion = 8; -} - -message CreateResponseProto { - optional HdfsFileStatusProto fs = 1; -} - -message AppendRequestProto { - required string src = 1; - required string clientName = 2; - optional uint32 flag = 3; // bits set using CreateFlag -} - -message AppendResponseProto { - optional LocatedBlockProto block = 1; - optional HdfsFileStatusProto stat = 2; -} - -message SetReplicationRequestProto { - required string src = 1; - required uint32 replication = 2; // Short: Only 16 bits used -} - -message SetReplicationResponseProto { - required bool result = 1; -} - -message SetStoragePolicyRequestProto { - required string src = 1; - required string policyName = 2; -} - -message SetStoragePolicyResponseProto { // void response -} - -message UnsetStoragePolicyRequestProto { - required string src = 1; -} - -message UnsetStoragePolicyResponseProto { -} - -message GetStoragePolicyRequestProto { - required string path = 1; -} - -message GetStoragePolicyResponseProto { - required BlockStoragePolicyProto storagePolicy = 1; -} - -message GetStoragePoliciesRequestProto { // void request -} - -message GetStoragePoliciesResponseProto { - repeated BlockStoragePolicyProto policies = 1; -} - -message SetPermissionRequestProto { - required string src = 1; - required FsPermissionProto permission = 2; -} - -message SetPermissionResponseProto { // void response -} - -message SetOwnerRequestProto { - required string src = 1; - optional string username = 2; - optional string groupname = 3; -} - -message SetOwnerResponseProto { // void response -} - -message AbandonBlockRequestProto { - required ExtendedBlockProto b = 1; - required string src = 2; - required string holder = 3; - optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID -} - -message AbandonBlockResponseProto { // void response -} - -message AddBlockRequestProto { - required string src = 1; - required string clientName = 2; - optional ExtendedBlockProto previous = 3; - repeated DatanodeInfoProto excludeNodes = 4; - optional uint64 fileId = 5 [default = 0]; // default as a bogus id - repeated string favoredNodes = 6; //the set of datanodes to use for the block -} - -message AddBlockResponseProto { - required LocatedBlockProto block = 1; -} - -message GetAdditionalDatanodeRequestProto { - required string src = 1; - required ExtendedBlockProto blk = 2; - repeated DatanodeInfoProto existings = 3; - repeated DatanodeInfoProto excludes = 4; - required uint32 numAdditionalNodes = 5; - required string clientName = 6; - repeated string existingStorageUuids = 7; - optional uint64 fileId = 8 [default = 0]; // default to GRANDFATHER_INODE_ID -} - -message GetAdditionalDatanodeResponseProto { - required LocatedBlockProto block = 1; -} - -message CompleteRequestProto { - required string src = 1; - required string clientName = 2; - optional ExtendedBlockProto last = 3; - optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID -} - -message CompleteResponseProto { - required bool result = 1; -} - -message ReportBadBlocksRequestProto { - repeated LocatedBlockProto blocks = 1; -} - -message ReportBadBlocksResponseProto { // void response -} - -message ConcatRequestProto { - required string trg = 1; - repeated string srcs = 2; -} - -message ConcatResponseProto { // void response -} - -message TruncateRequestProto { - required string src = 1; - required uint64 newLength = 2; - required string clientName = 3; -} - -message TruncateResponseProto { - required bool result = 1; -} - -message RenameRequestProto { - required string src = 1; - required string dst = 2; -} - -message RenameResponseProto { - required bool result = 1; -} - - -message Rename2RequestProto { - required string src = 1; - required string dst = 2; - required bool overwriteDest = 3; -} - -message Rename2ResponseProto { // void response -} - -message DeleteRequestProto { - required string src = 1; - required bool recursive = 2; -} - -message DeleteResponseProto { - required bool result = 1; -} - -message MkdirsRequestProto { - required string src = 1; - required FsPermissionProto masked = 2; - required bool createParent = 3; -} -message MkdirsResponseProto { - required bool result = 1; -} - -message GetListingRequestProto { - required string src = 1; - required bytes startAfter = 2; - required bool needLocation = 3; -} -message GetListingResponseProto { - optional DirectoryListingProto dirList = 1; -} - -message GetSnapshottableDirListingRequestProto { // no input parameters -} -message GetSnapshottableDirListingResponseProto { - optional SnapshottableDirectoryListingProto snapshottableDirList = 1; -} - -message GetSnapshotDiffReportRequestProto { - required string snapshotRoot = 1; - required string fromSnapshot = 2; - required string toSnapshot = 3; -} -message GetSnapshotDiffReportResponseProto { - required SnapshotDiffReportProto diffReport = 1; -} - -message RenewLeaseRequestProto { - required string clientName = 1; -} - -message RenewLeaseResponseProto { //void response -} - -message RecoverLeaseRequestProto { - required string src = 1; - required string clientName = 2; -} -message RecoverLeaseResponseProto { - required bool result = 1; -} - -message GetFsStatusRequestProto { // no input paramters -} - -message GetFsStatsResponseProto { - required uint64 capacity = 1; - required uint64 used = 2; - required uint64 remaining = 3; - required uint64 under_replicated = 4; - required uint64 corrupt_blocks = 5; - required uint64 missing_blocks = 6; - optional uint64 missing_repl_one_blocks = 7; - optional uint64 blocks_in_future = 8; - optional uint64 pending_deletion_blocks = 9; -} - -enum DatanodeReportTypeProto { // type of the datanode report - ALL = 1; - LIVE = 2; - DEAD = 3; - DECOMMISSIONING = 4; -} - -message GetDatanodeReportRequestProto { - required DatanodeReportTypeProto type = 1; -} - -message GetDatanodeReportResponseProto { - repeated DatanodeInfoProto di = 1; -} - -message GetDatanodeStorageReportRequestProto { - required DatanodeReportTypeProto type = 1; -} - -message DatanodeStorageReportProto { - required DatanodeInfoProto datanodeInfo = 1; - repeated StorageReportProto storageReports = 2; -} - -message GetDatanodeStorageReportResponseProto { - repeated DatanodeStorageReportProto datanodeStorageReports = 1; -} - -message GetPreferredBlockSizeRequestProto { - required string filename = 1; -} - -message GetPreferredBlockSizeResponseProto { - required uint64 bsize = 1; -} - -enum SafeModeActionProto { - SAFEMODE_LEAVE = 1; - SAFEMODE_ENTER = 2; - SAFEMODE_GET = 3; - SAFEMODE_FORCE_EXIT = 4; -} - -message SetSafeModeRequestProto { - required SafeModeActionProto action = 1; - optional bool checked = 2 [default = false]; -} - -message SetSafeModeResponseProto { - required bool result = 1; -} - -message SaveNamespaceRequestProto { - optional uint64 timeWindow = 1 [default = 0]; - optional uint64 txGap = 2 [default = 0]; -} - -message SaveNamespaceResponseProto { // void response - optional bool saved = 1 [default = true]; -} - -message RollEditsRequestProto { // no parameters -} - -message RollEditsResponseProto { // response - required uint64 newSegmentTxId = 1; -} - -message RestoreFailedStorageRequestProto { - required string arg = 1; -} - -message RestoreFailedStorageResponseProto { - required bool result = 1; -} - -message RefreshNodesRequestProto { // no parameters -} - -message RefreshNodesResponseProto { // void response -} - -message FinalizeUpgradeRequestProto { // no parameters -} - -message FinalizeUpgradeResponseProto { // void response -} - -enum RollingUpgradeActionProto { - QUERY = 1; - START = 2; - FINALIZE = 3; -} - -message RollingUpgradeRequestProto { - required RollingUpgradeActionProto action = 1; -} - -message RollingUpgradeInfoProto { - required RollingUpgradeStatusProto status = 1; - required uint64 startTime = 2; - required uint64 finalizeTime = 3; - required bool createdRollbackImages = 4; -} - -message RollingUpgradeResponseProto { - optional RollingUpgradeInfoProto rollingUpgradeInfo= 1; -} - -message ListCorruptFileBlocksRequestProto { - required string path = 1; - optional string cookie = 2; -} - -message ListCorruptFileBlocksResponseProto { - required CorruptFileBlocksProto corrupt = 1; -} - -message MetaSaveRequestProto { - required string filename = 1; -} - -message MetaSaveResponseProto { // void response -} - -message GetFileInfoRequestProto { - required string src = 1; -} - -message GetFileInfoResponseProto { - optional HdfsFileStatusProto fs = 1; -} - -message IsFileClosedRequestProto { - required string src = 1; -} - -message IsFileClosedResponseProto { - required bool result = 1; -} - -message CacheDirectiveInfoProto { - optional int64 id = 1; - optional string path = 2; - optional uint32 replication = 3; - optional string pool = 4; - optional CacheDirectiveInfoExpirationProto expiration = 5; -} - -message CacheDirectiveInfoExpirationProto { - required int64 millis = 1; - required bool isRelative = 2; -} - -message CacheDirectiveStatsProto { - required int64 bytesNeeded = 1; - required int64 bytesCached = 2; - required int64 filesNeeded = 3; - required int64 filesCached = 4; - required bool hasExpired = 5; -} - -enum CacheFlagProto { - FORCE = 0x01; // Ignore pool resource limits -} - -message AddCacheDirectiveRequestProto { - required CacheDirectiveInfoProto info = 1; - optional uint32 cacheFlags = 2; // bits set using CacheFlag -} - -message AddCacheDirectiveResponseProto { - required int64 id = 1; -} - -message ModifyCacheDirectiveRequestProto { - required CacheDirectiveInfoProto info = 1; - optional uint32 cacheFlags = 2; // bits set using CacheFlag -} - -message ModifyCacheDirectiveResponseProto { -} - -message RemoveCacheDirectiveRequestProto { - required int64 id = 1; -} - -message RemoveCacheDirectiveResponseProto { -} - -message ListCacheDirectivesRequestProto { - required int64 prevId = 1; - required CacheDirectiveInfoProto filter = 2; -} - -message CacheDirectiveEntryProto { - required CacheDirectiveInfoProto info = 1; - required CacheDirectiveStatsProto stats = 2; -} - -message ListCacheDirectivesResponseProto { - repeated CacheDirectiveEntryProto elements = 1; - required bool hasMore = 2; -} - -message CachePoolInfoProto { - optional string poolName = 1; - optional string ownerName = 2; - optional string groupName = 3; - optional int32 mode = 4; - optional int64 limit = 5; - optional int64 maxRelativeExpiry = 6; -} - -message CachePoolStatsProto { - required int64 bytesNeeded = 1; - required int64 bytesCached = 2; - required int64 bytesOverlimit = 3; - required int64 filesNeeded = 4; - required int64 filesCached = 5; -} - -message AddCachePoolRequestProto { - required CachePoolInfoProto info = 1; -} - -message AddCachePoolResponseProto { // void response -} - -message ModifyCachePoolRequestProto { - required CachePoolInfoProto info = 1; -} - -message ModifyCachePoolResponseProto { // void response -} - -message RemoveCachePoolRequestProto { - required string poolName = 1; -} - -message RemoveCachePoolResponseProto { // void response -} - -message ListCachePoolsRequestProto { - required string prevPoolName = 1; -} - -message ListCachePoolsResponseProto { - repeated CachePoolEntryProto entries = 1; - required bool hasMore = 2; -} - -message CachePoolEntryProto { - required CachePoolInfoProto info = 1; - required CachePoolStatsProto stats = 2; -} - -message GetFileLinkInfoRequestProto { - required string src = 1; -} - -message GetFileLinkInfoResponseProto { - optional HdfsFileStatusProto fs = 1; -} - -message GetContentSummaryRequestProto { - required string path = 1; -} - -message GetContentSummaryResponseProto { - required ContentSummaryProto summary = 1; -} - -message GetQuotaUsageRequestProto { - required string path = 1; -} - -message GetQuotaUsageResponseProto { - required QuotaUsageProto usage = 1; -} - -message SetQuotaRequestProto { - required string path = 1; - required uint64 namespaceQuota = 2; - required uint64 storagespaceQuota = 3; - optional StorageTypeProto storageType = 4; -} - -message SetQuotaResponseProto { // void response -} - -message FsyncRequestProto { - required string src = 1; - required string client = 2; - optional sint64 lastBlockLength = 3 [default = -1]; - optional uint64 fileId = 4 [default = 0]; // default to GRANDFATHER_INODE_ID -} - -message FsyncResponseProto { // void response -} - -message SetTimesRequestProto { - required string src = 1; - required uint64 mtime = 2; - required uint64 atime = 3; -} - -message SetTimesResponseProto { // void response -} - -message CreateSymlinkRequestProto { - required string target = 1; - required string link = 2; - required FsPermissionProto dirPerm = 3; - required bool createParent = 4; -} - -message CreateSymlinkResponseProto { // void response -} - -message GetLinkTargetRequestProto { - required string path = 1; -} -message GetLinkTargetResponseProto { - optional string targetPath = 1; -} - -message UpdateBlockForPipelineRequestProto { - required ExtendedBlockProto block = 1; - required string clientName = 2; -} - -message UpdateBlockForPipelineResponseProto { - required LocatedBlockProto block = 1; -} - -message UpdatePipelineRequestProto { - required string clientName = 1; - required ExtendedBlockProto oldBlock = 2; - required ExtendedBlockProto newBlock = 3; - repeated DatanodeIDProto newNodes = 4; - repeated string storageIDs = 5; -} - -message UpdatePipelineResponseProto { // void response -} - -message SetBalancerBandwidthRequestProto { - required int64 bandwidth = 1; -} - -message SetBalancerBandwidthResponseProto { // void response -} - -message GetDataEncryptionKeyRequestProto { // no parameters -} - -message GetDataEncryptionKeyResponseProto { - optional DataEncryptionKeyProto dataEncryptionKey = 1; -} - -message CreateSnapshotRequestProto { - required string snapshotRoot = 1; - optional string snapshotName = 2; -} - -message CreateSnapshotResponseProto { - required string snapshotPath = 1; -} - -message RenameSnapshotRequestProto { - required string snapshotRoot = 1; - required string snapshotOldName = 2; - required string snapshotNewName = 3; -} - -message RenameSnapshotResponseProto { // void response -} - -message AllowSnapshotRequestProto { - required string snapshotRoot = 1; -} - -message AllowSnapshotResponseProto { -} - -message DisallowSnapshotRequestProto { - required string snapshotRoot = 1; -} - -message DisallowSnapshotResponseProto { -} - -message DeleteSnapshotRequestProto { - required string snapshotRoot = 1; - required string snapshotName = 2; -} - -message DeleteSnapshotResponseProto { // void response -} - -message CheckAccessRequestProto { - required string path = 1; - required AclEntryProto.FsActionProto mode = 2; -} - -message CheckAccessResponseProto { // void response -} - -message GetCurrentEditLogTxidRequestProto { -} - -message GetCurrentEditLogTxidResponseProto { - required int64 txid = 1; -} - -message GetEditsFromTxidRequestProto { - required int64 txid = 1; -} - -message GetEditsFromTxidResponseProto { - required EventsListProto eventsList = 1; -} - -service ClientNamenodeProtocol { - rpc getBlockLocations(GetBlockLocationsRequestProto) - returns(GetBlockLocationsResponseProto); - rpc getServerDefaults(GetServerDefaultsRequestProto) - returns(GetServerDefaultsResponseProto); - rpc create(CreateRequestProto)returns(CreateResponseProto); - rpc append(AppendRequestProto) returns(AppendResponseProto); - rpc setReplication(SetReplicationRequestProto) - returns(SetReplicationResponseProto); - rpc setStoragePolicy(SetStoragePolicyRequestProto) - returns(SetStoragePolicyResponseProto); - rpc unsetStoragePolicy(UnsetStoragePolicyRequestProto) - returns(UnsetStoragePolicyResponseProto); - rpc getStoragePolicy(GetStoragePolicyRequestProto) - returns(GetStoragePolicyResponseProto); - rpc getStoragePolicies(GetStoragePoliciesRequestProto) - returns(GetStoragePoliciesResponseProto); - rpc setPermission(SetPermissionRequestProto) - returns(SetPermissionResponseProto); - rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto); - rpc abandonBlock(AbandonBlockRequestProto) returns(AbandonBlockResponseProto); - rpc addBlock(AddBlockRequestProto) returns(AddBlockResponseProto); - rpc getAdditionalDatanode(GetAdditionalDatanodeRequestProto) - returns(GetAdditionalDatanodeResponseProto); - rpc complete(CompleteRequestProto) returns(CompleteResponseProto); - rpc reportBadBlocks(ReportBadBlocksRequestProto) - returns(ReportBadBlocksResponseProto); - rpc concat(ConcatRequestProto) returns(ConcatResponseProto); - rpc truncate(TruncateRequestProto) returns(TruncateResponseProto); - rpc rename(RenameRequestProto) returns(RenameResponseProto); - rpc rename2(Rename2RequestProto) returns(Rename2ResponseProto); - rpc delete(DeleteRequestProto) returns(DeleteResponseProto); - rpc mkdirs(MkdirsRequestProto) returns(MkdirsResponseProto); - rpc getListing(GetListingRequestProto) returns(GetListingResponseProto); - rpc renewLease(RenewLeaseRequestProto) returns(RenewLeaseResponseProto); - rpc recoverLease(RecoverLeaseRequestProto) - returns(RecoverLeaseResponseProto); - rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto); - rpc getDatanodeReport(GetDatanodeReportRequestProto) - returns(GetDatanodeReportResponseProto); - rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto) - returns(GetDatanodeStorageReportResponseProto); - rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto) - returns(GetPreferredBlockSizeResponseProto); - rpc setSafeMode(SetSafeModeRequestProto) - returns(SetSafeModeResponseProto); - rpc saveNamespace(SaveNamespaceRequestProto) - returns(SaveNamespaceResponseProto); - rpc rollEdits(RollEditsRequestProto) - returns(RollEditsResponseProto); - rpc restoreFailedStorage(RestoreFailedStorageRequestProto) - returns(RestoreFailedStorageResponseProto); - rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto); - rpc finalizeUpgrade(FinalizeUpgradeRequestProto) - returns(FinalizeUpgradeResponseProto); - rpc rollingUpgrade(RollingUpgradeRequestProto) - returns(RollingUpgradeResponseProto); - rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto) - returns(ListCorruptFileBlocksResponseProto); - rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto); - rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto); - rpc addCacheDirective(AddCacheDirectiveRequestProto) - returns (AddCacheDirectiveResponseProto); - rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto) - returns (ModifyCacheDirectiveResponseProto); - rpc removeCacheDirective(RemoveCacheDirectiveRequestProto) - returns (RemoveCacheDirectiveResponseProto); - rpc listCacheDirectives(ListCacheDirectivesRequestProto) - returns (ListCacheDirectivesResponseProto); - rpc addCachePool(AddCachePoolRequestProto) - returns(AddCachePoolResponseProto); - rpc modifyCachePool(ModifyCachePoolRequestProto) - returns(ModifyCachePoolResponseProto); - rpc removeCachePool(RemoveCachePoolRequestProto) - returns(RemoveCachePoolResponseProto); - rpc listCachePools(ListCachePoolsRequestProto) - returns(ListCachePoolsResponseProto); - rpc getFileLinkInfo(GetFileLinkInfoRequestProto) - returns(GetFileLinkInfoResponseProto); - rpc getContentSummary(GetContentSummaryRequestProto) - returns(GetContentSummaryResponseProto); - rpc setQuota(SetQuotaRequestProto) returns(SetQuotaResponseProto); - rpc fsync(FsyncRequestProto) returns(FsyncResponseProto); - rpc setTimes(SetTimesRequestProto) returns(SetTimesResponseProto); - rpc createSymlink(CreateSymlinkRequestProto) - returns(CreateSymlinkResponseProto); - rpc getLinkTarget(GetLinkTargetRequestProto) - returns(GetLinkTargetResponseProto); - rpc updateBlockForPipeline(UpdateBlockForPipelineRequestProto) - returns(UpdateBlockForPipelineResponseProto); - rpc updatePipeline(UpdatePipelineRequestProto) - returns(UpdatePipelineResponseProto); - rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto) - returns(hadoop.common.GetDelegationTokenResponseProto); - rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto) - returns(hadoop.common.RenewDelegationTokenResponseProto); - rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto) - returns(hadoop.common.CancelDelegationTokenResponseProto); - rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto) - returns(SetBalancerBandwidthResponseProto); - rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto) - returns(GetDataEncryptionKeyResponseProto); - rpc createSnapshot(CreateSnapshotRequestProto) - returns(CreateSnapshotResponseProto); - rpc renameSnapshot(RenameSnapshotRequestProto) - returns(RenameSnapshotResponseProto); - rpc allowSnapshot(AllowSnapshotRequestProto) - returns(AllowSnapshotResponseProto); - rpc disallowSnapshot(DisallowSnapshotRequestProto) - returns(DisallowSnapshotResponseProto); - rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto) - returns(GetSnapshottableDirListingResponseProto); - rpc deleteSnapshot(DeleteSnapshotRequestProto) - returns(DeleteSnapshotResponseProto); - rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto) - returns(GetSnapshotDiffReportResponseProto); - rpc isFileClosed(IsFileClosedRequestProto) - returns(IsFileClosedResponseProto); - rpc modifyAclEntries(ModifyAclEntriesRequestProto) - returns(ModifyAclEntriesResponseProto); - rpc removeAclEntries(RemoveAclEntriesRequestProto) - returns(RemoveAclEntriesResponseProto); - rpc removeDefaultAcl(RemoveDefaultAclRequestProto) - returns(RemoveDefaultAclResponseProto); - rpc removeAcl(RemoveAclRequestProto) - returns(RemoveAclResponseProto); - rpc setAcl(SetAclRequestProto) - returns(SetAclResponseProto); - rpc getAclStatus(GetAclStatusRequestProto) - returns(GetAclStatusResponseProto); - rpc setXAttr(SetXAttrRequestProto) - returns(SetXAttrResponseProto); - rpc getXAttrs(GetXAttrsRequestProto) - returns(GetXAttrsResponseProto); - rpc listXAttrs(ListXAttrsRequestProto) - returns(ListXAttrsResponseProto); - rpc removeXAttr(RemoveXAttrRequestProto) - returns(RemoveXAttrResponseProto); - rpc checkAccess(CheckAccessRequestProto) - returns(CheckAccessResponseProto); - rpc createEncryptionZone(CreateEncryptionZoneRequestProto) - returns(CreateEncryptionZoneResponseProto); - rpc listEncryptionZones(ListEncryptionZonesRequestProto) - returns(ListEncryptionZonesResponseProto); - rpc getEZForPath(GetEZForPathRequestProto) - returns(GetEZForPathResponseProto); - rpc setErasureCodingPolicy(SetErasureCodingPolicyRequestProto) - returns(SetErasureCodingPolicyResponseProto); - rpc getCurrentEditLogTxid(GetCurrentEditLogTxidRequestProto) - returns(GetCurrentEditLogTxidResponseProto); - rpc getEditsFromTxid(GetEditsFromTxidRequestProto) - returns(GetEditsFromTxidResponseProto); - rpc getErasureCodingPolicies(GetErasureCodingPoliciesRequestProto) - returns(GetErasureCodingPoliciesResponseProto); - rpc getErasureCodingPolicy(GetErasureCodingPolicyRequestProto) - returns(GetErasureCodingPolicyResponseProto); - rpc getQuotaUsage(GetQuotaUsageRequestProto) - returns(GetQuotaUsageResponseProto); -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.pb.go deleted file mode 100644 index 32301a0890a..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.pb.go +++ /dev/null @@ -1,513 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: ReconfigurationProtocol.proto - -/* -Package hadoop_hdfs is a generated protocol buffer package. - -It is generated from these files: - ReconfigurationProtocol.proto - xattr.proto - encryption.proto - erasurecoding.proto - ClientNamenodeProtocol.proto - datatransfer.proto - ClientDatanodeProtocol.proto - inotify.proto - hdfs.proto - acl.proto - -It has these top-level messages: - StartReconfigurationRequestProto - StartReconfigurationResponseProto - GetReconfigurationStatusRequestProto - GetReconfigurationStatusConfigChangeProto - GetReconfigurationStatusResponseProto - ListReconfigurablePropertiesRequestProto - ListReconfigurablePropertiesResponseProto - XAttrProto - SetXAttrRequestProto - SetXAttrResponseProto - GetXAttrsRequestProto - GetXAttrsResponseProto - ListXAttrsRequestProto - ListXAttrsResponseProto - RemoveXAttrRequestProto - RemoveXAttrResponseProto - CreateEncryptionZoneRequestProto - CreateEncryptionZoneResponseProto - ListEncryptionZonesRequestProto - EncryptionZoneProto - ListEncryptionZonesResponseProto - GetEZForPathRequestProto - GetEZForPathResponseProto - SetErasureCodingPolicyRequestProto - SetErasureCodingPolicyResponseProto - GetErasureCodingPoliciesRequestProto - GetErasureCodingPoliciesResponseProto - GetErasureCodingPolicyRequestProto - GetErasureCodingPolicyResponseProto - BlockECReconstructionInfoProto - GetBlockLocationsRequestProto - GetBlockLocationsResponseProto - GetServerDefaultsRequestProto - GetServerDefaultsResponseProto - CreateRequestProto - CreateResponseProto - AppendRequestProto - AppendResponseProto - SetReplicationRequestProto - SetReplicationResponseProto - SetStoragePolicyRequestProto - SetStoragePolicyResponseProto - UnsetStoragePolicyRequestProto - UnsetStoragePolicyResponseProto - GetStoragePolicyRequestProto - GetStoragePolicyResponseProto - GetStoragePoliciesRequestProto - GetStoragePoliciesResponseProto - SetPermissionRequestProto - SetPermissionResponseProto - SetOwnerRequestProto - SetOwnerResponseProto - AbandonBlockRequestProto - AbandonBlockResponseProto - AddBlockRequestProto - AddBlockResponseProto - GetAdditionalDatanodeRequestProto - GetAdditionalDatanodeResponseProto - CompleteRequestProto - CompleteResponseProto - ReportBadBlocksRequestProto - ReportBadBlocksResponseProto - ConcatRequestProto - ConcatResponseProto - TruncateRequestProto - TruncateResponseProto - RenameRequestProto - RenameResponseProto - Rename2RequestProto - Rename2ResponseProto - DeleteRequestProto - DeleteResponseProto - MkdirsRequestProto - MkdirsResponseProto - GetListingRequestProto - GetListingResponseProto - GetSnapshottableDirListingRequestProto - GetSnapshottableDirListingResponseProto - GetSnapshotDiffReportRequestProto - GetSnapshotDiffReportResponseProto - RenewLeaseRequestProto - RenewLeaseResponseProto - RecoverLeaseRequestProto - RecoverLeaseResponseProto - GetFsStatusRequestProto - GetFsStatsResponseProto - GetDatanodeReportRequestProto - GetDatanodeReportResponseProto - GetDatanodeStorageReportRequestProto - DatanodeStorageReportProto - GetDatanodeStorageReportResponseProto - GetPreferredBlockSizeRequestProto - GetPreferredBlockSizeResponseProto - SetSafeModeRequestProto - SetSafeModeResponseProto - SaveNamespaceRequestProto - SaveNamespaceResponseProto - RollEditsRequestProto - RollEditsResponseProto - RestoreFailedStorageRequestProto - RestoreFailedStorageResponseProto - RefreshNodesRequestProto - RefreshNodesResponseProto - FinalizeUpgradeRequestProto - FinalizeUpgradeResponseProto - RollingUpgradeRequestProto - RollingUpgradeInfoProto - RollingUpgradeResponseProto - ListCorruptFileBlocksRequestProto - ListCorruptFileBlocksResponseProto - MetaSaveRequestProto - MetaSaveResponseProto - GetFileInfoRequestProto - GetFileInfoResponseProto - IsFileClosedRequestProto - IsFileClosedResponseProto - CacheDirectiveInfoProto - CacheDirectiveInfoExpirationProto - CacheDirectiveStatsProto - AddCacheDirectiveRequestProto - AddCacheDirectiveResponseProto - ModifyCacheDirectiveRequestProto - ModifyCacheDirectiveResponseProto - RemoveCacheDirectiveRequestProto - RemoveCacheDirectiveResponseProto - ListCacheDirectivesRequestProto - CacheDirectiveEntryProto - ListCacheDirectivesResponseProto - CachePoolInfoProto - CachePoolStatsProto - AddCachePoolRequestProto - AddCachePoolResponseProto - ModifyCachePoolRequestProto - ModifyCachePoolResponseProto - RemoveCachePoolRequestProto - RemoveCachePoolResponseProto - ListCachePoolsRequestProto - ListCachePoolsResponseProto - CachePoolEntryProto - GetFileLinkInfoRequestProto - GetFileLinkInfoResponseProto - GetContentSummaryRequestProto - GetContentSummaryResponseProto - GetQuotaUsageRequestProto - GetQuotaUsageResponseProto - SetQuotaRequestProto - SetQuotaResponseProto - FsyncRequestProto - FsyncResponseProto - SetTimesRequestProto - SetTimesResponseProto - CreateSymlinkRequestProto - CreateSymlinkResponseProto - GetLinkTargetRequestProto - GetLinkTargetResponseProto - UpdateBlockForPipelineRequestProto - UpdateBlockForPipelineResponseProto - UpdatePipelineRequestProto - UpdatePipelineResponseProto - SetBalancerBandwidthRequestProto - SetBalancerBandwidthResponseProto - GetDataEncryptionKeyRequestProto - GetDataEncryptionKeyResponseProto - CreateSnapshotRequestProto - CreateSnapshotResponseProto - RenameSnapshotRequestProto - RenameSnapshotResponseProto - AllowSnapshotRequestProto - AllowSnapshotResponseProto - DisallowSnapshotRequestProto - DisallowSnapshotResponseProto - DeleteSnapshotRequestProto - DeleteSnapshotResponseProto - CheckAccessRequestProto - CheckAccessResponseProto - GetCurrentEditLogTxidRequestProto - GetCurrentEditLogTxidResponseProto - GetEditsFromTxidRequestProto - GetEditsFromTxidResponseProto - DataTransferEncryptorMessageProto - BaseHeaderProto - DataTransferTraceInfoProto - ClientOperationHeaderProto - CachingStrategyProto - OpReadBlockProto - ChecksumProto - OpWriteBlockProto - OpTransferBlockProto - OpReplaceBlockProto - OpCopyBlockProto - OpBlockChecksumProto - OpBlockGroupChecksumProto - ShortCircuitShmIdProto - ShortCircuitShmSlotProto - OpRequestShortCircuitAccessProto - ReleaseShortCircuitAccessRequestProto - ReleaseShortCircuitAccessResponseProto - ShortCircuitShmRequestProto - ShortCircuitShmResponseProto - PacketHeaderProto - PipelineAckProto - ReadOpChecksumInfoProto - BlockOpResponseProto - ClientReadStatusProto - DNTransferAckProto - OpBlockChecksumResponseProto - OpCustomProto - GetReplicaVisibleLengthRequestProto - GetReplicaVisibleLengthResponseProto - RefreshNamenodesRequestProto - RefreshNamenodesResponseProto - DeleteBlockPoolRequestProto - DeleteBlockPoolResponseProto - GetBlockLocalPathInfoRequestProto - GetBlockLocalPathInfoResponseProto - ShutdownDatanodeRequestProto - ShutdownDatanodeResponseProto - EvictWritersRequestProto - EvictWritersResponseProto - GetDatanodeInfoRequestProto - GetDatanodeInfoResponseProto - TriggerBlockReportRequestProto - TriggerBlockReportResponseProto - GetBalancerBandwidthRequestProto - GetBalancerBandwidthResponseProto - EventProto - EventBatchProto - CreateEventProto - CloseEventProto - TruncateEventProto - AppendEventProto - RenameEventProto - MetadataUpdateEventProto - UnlinkEventProto - EventsListProto - ExtendedBlockProto - DatanodeIDProto - DatanodeLocalInfoProto - DatanodeInfosProto - DatanodeInfoProto - DatanodeStorageProto - StorageReportProto - ContentSummaryProto - QuotaUsageProto - StorageTypeQuotaInfosProto - StorageTypeQuotaInfoProto - CorruptFileBlocksProto - FsPermissionProto - StorageTypesProto - BlockStoragePolicyProto - LocatedBlockProto - DataEncryptionKeyProto - FileEncryptionInfoProto - PerFileEncryptionInfoProto - ZoneEncryptionInfoProto - CipherOptionProto - LocatedBlocksProto - ECSchemaOptionEntryProto - ECSchemaProto - ErasureCodingPolicyProto - HdfsFileStatusProto - FsServerDefaultsProto - DirectoryListingProto - SnapshottableDirectoryStatusProto - SnapshottableDirectoryListingProto - SnapshotDiffReportEntryProto - SnapshotDiffReportProto - BlockProto - SnapshotInfoProto - RollingUpgradeStatusProto - StorageUuidsProto - AclEntryProto - AclStatusProto - ModifyAclEntriesRequestProto - ModifyAclEntriesResponseProto - RemoveAclRequestProto - RemoveAclResponseProto - RemoveAclEntriesRequestProto - RemoveAclEntriesResponseProto - RemoveDefaultAclRequestProto - RemoveDefaultAclResponseProto - SetAclRequestProto - SetAclResponseProto - GetAclStatusRequestProto - GetAclStatusResponseProto -*/ -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// * Asks NN/DN to reload configuration file. -type StartReconfigurationRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartReconfigurationRequestProto) Reset() { *m = StartReconfigurationRequestProto{} } -func (m *StartReconfigurationRequestProto) String() string { return proto.CompactTextString(m) } -func (*StartReconfigurationRequestProto) ProtoMessage() {} -func (*StartReconfigurationRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{0} -} - -type StartReconfigurationResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *StartReconfigurationResponseProto) Reset() { *m = StartReconfigurationResponseProto{} } -func (m *StartReconfigurationResponseProto) String() string { return proto.CompactTextString(m) } -func (*StartReconfigurationResponseProto) ProtoMessage() {} -func (*StartReconfigurationResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{1} -} - -// * Query the running status of reconfiguration process -type GetReconfigurationStatusRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetReconfigurationStatusRequestProto) Reset() { *m = GetReconfigurationStatusRequestProto{} } -func (m *GetReconfigurationStatusRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetReconfigurationStatusRequestProto) ProtoMessage() {} -func (*GetReconfigurationStatusRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2} -} - -type GetReconfigurationStatusConfigChangeProto struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - OldValue *string `protobuf:"bytes,2,req,name=oldValue" json:"oldValue,omitempty"` - NewValue *string `protobuf:"bytes,3,opt,name=newValue" json:"newValue,omitempty"` - ErrorMessage *string `protobuf:"bytes,4,opt,name=errorMessage" json:"errorMessage,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetReconfigurationStatusConfigChangeProto) Reset() { - *m = GetReconfigurationStatusConfigChangeProto{} -} -func (m *GetReconfigurationStatusConfigChangeProto) String() string { return proto.CompactTextString(m) } -func (*GetReconfigurationStatusConfigChangeProto) ProtoMessage() {} -func (*GetReconfigurationStatusConfigChangeProto) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{3} -} - -func (m *GetReconfigurationStatusConfigChangeProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *GetReconfigurationStatusConfigChangeProto) GetOldValue() string { - if m != nil && m.OldValue != nil { - return *m.OldValue - } - return "" -} - -func (m *GetReconfigurationStatusConfigChangeProto) GetNewValue() string { - if m != nil && m.NewValue != nil { - return *m.NewValue - } - return "" -} - -func (m *GetReconfigurationStatusConfigChangeProto) GetErrorMessage() string { - if m != nil && m.ErrorMessage != nil { - return *m.ErrorMessage - } - return "" -} - -type GetReconfigurationStatusResponseProto struct { - StartTime *int64 `protobuf:"varint,1,req,name=startTime" json:"startTime,omitempty"` - EndTime *int64 `protobuf:"varint,2,opt,name=endTime" json:"endTime,omitempty"` - Changes []*GetReconfigurationStatusConfigChangeProto `protobuf:"bytes,3,rep,name=changes" json:"changes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetReconfigurationStatusResponseProto) Reset() { *m = GetReconfigurationStatusResponseProto{} } -func (m *GetReconfigurationStatusResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetReconfigurationStatusResponseProto) ProtoMessage() {} -func (*GetReconfigurationStatusResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{4} -} - -func (m *GetReconfigurationStatusResponseProto) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *GetReconfigurationStatusResponseProto) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *GetReconfigurationStatusResponseProto) GetChanges() []*GetReconfigurationStatusConfigChangeProto { - if m != nil { - return m.Changes - } - return nil -} - -// * Query the reconfigurable properties on NN/DN. -type ListReconfigurablePropertiesRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListReconfigurablePropertiesRequestProto) Reset() { - *m = ListReconfigurablePropertiesRequestProto{} -} -func (m *ListReconfigurablePropertiesRequestProto) String() string { return proto.CompactTextString(m) } -func (*ListReconfigurablePropertiesRequestProto) ProtoMessage() {} -func (*ListReconfigurablePropertiesRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{5} -} - -type ListReconfigurablePropertiesResponseProto struct { - Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListReconfigurablePropertiesResponseProto) Reset() { - *m = ListReconfigurablePropertiesResponseProto{} -} -func (m *ListReconfigurablePropertiesResponseProto) String() string { return proto.CompactTextString(m) } -func (*ListReconfigurablePropertiesResponseProto) ProtoMessage() {} -func (*ListReconfigurablePropertiesResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{6} -} - -func (m *ListReconfigurablePropertiesResponseProto) GetName() []string { - if m != nil { - return m.Name - } - return nil -} - -func init() { - proto.RegisterType((*StartReconfigurationRequestProto)(nil), "hadoop.hdfs.StartReconfigurationRequestProto") - proto.RegisterType((*StartReconfigurationResponseProto)(nil), "hadoop.hdfs.StartReconfigurationResponseProto") - proto.RegisterType((*GetReconfigurationStatusRequestProto)(nil), "hadoop.hdfs.GetReconfigurationStatusRequestProto") - proto.RegisterType((*GetReconfigurationStatusConfigChangeProto)(nil), "hadoop.hdfs.GetReconfigurationStatusConfigChangeProto") - proto.RegisterType((*GetReconfigurationStatusResponseProto)(nil), "hadoop.hdfs.GetReconfigurationStatusResponseProto") - proto.RegisterType((*ListReconfigurablePropertiesRequestProto)(nil), "hadoop.hdfs.ListReconfigurablePropertiesRequestProto") - proto.RegisterType((*ListReconfigurablePropertiesResponseProto)(nil), "hadoop.hdfs.ListReconfigurablePropertiesResponseProto") -} - -func init() { proto.RegisterFile("ReconfigurationProtocol.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 401 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xb1, 0x8e, 0xd3, 0x40, - 0x10, 0xd5, 0xc6, 0x91, 0x42, 0x26, 0x54, 0x2b, 0x0a, 0xcb, 0x0a, 0xc8, 0x18, 0x82, 0x1c, 0x24, - 0x2c, 0x11, 0x89, 0xb4, 0x48, 0x49, 0x41, 0x13, 0xa4, 0xc8, 0x41, 0xf4, 0x8b, 0x3d, 0xb1, 0x2d, - 0x19, 0xaf, 0xd9, 0x5d, 0x43, 0x4d, 0x47, 0xc3, 0x17, 0x5c, 0x73, 0xfd, 0x55, 0xf7, 0x87, 0x27, - 0x6f, 0xce, 0x97, 0x75, 0x14, 0xe7, 0x7c, 0x95, 0x77, 0x66, 0xde, 0x3c, 0xcf, 0x7b, 0x3b, 0x5a, - 0x78, 0x19, 0x62, 0xc4, 0x8b, 0x7d, 0x96, 0x54, 0x82, 0xa9, 0x8c, 0x17, 0x5b, 0xc1, 0x15, 0x8f, - 0x78, 0x1e, 0x94, 0xf5, 0x81, 0x4e, 0x52, 0x16, 0x73, 0x5e, 0x06, 0x69, 0xbc, 0x97, 0x9e, 0x07, - 0xee, 0x4e, 0x31, 0xa1, 0x4e, 0x5a, 0x42, 0xfc, 0x55, 0xa1, 0x54, 0xba, 0xd3, 0x7b, 0x03, 0xaf, - 0xcf, 0x63, 0x64, 0xc9, 0x0b, 0x89, 0x07, 0xd0, 0x3b, 0x78, 0xfb, 0x05, 0x4f, 0x21, 0x3b, 0xc5, - 0x54, 0x25, 0x5b, 0x64, 0x57, 0x04, 0xe6, 0x5d, 0xc0, 0xb5, 0x4e, 0xad, 0x53, 0x56, 0x24, 0x07, - 0x56, 0x4a, 0x61, 0x58, 0xb0, 0x9f, 0x68, 0x13, 0x77, 0xe0, 0x8f, 0x43, 0x7d, 0xa6, 0x0e, 0x3c, - 0xe3, 0x79, 0xfc, 0x9d, 0xe5, 0x15, 0xda, 0x03, 0x9d, 0x7f, 0x88, 0xeb, 0x5a, 0x81, 0x7f, 0x0e, - 0x35, 0xcb, 0x25, 0x75, 0xad, 0x89, 0xa9, 0x07, 0xcf, 0x51, 0x08, 0x2e, 0xbe, 0xa2, 0x94, 0x2c, - 0x41, 0x7b, 0xa8, 0xeb, 0xad, 0x9c, 0x77, 0x4b, 0x60, 0xd6, 0x2d, 0xc3, 0xd0, 0x4b, 0xa7, 0x30, - 0x96, 0xb5, 0x29, 0xdf, 0xb2, 0xfb, 0xf1, 0xac, 0xf0, 0x98, 0xa0, 0x36, 0x8c, 0xb0, 0x88, 0x75, - 0x6d, 0xe0, 0x12, 0xdf, 0x0a, 0x9b, 0x90, 0x6e, 0x61, 0x14, 0x69, 0x81, 0xd2, 0xb6, 0x5c, 0xcb, - 0x9f, 0x2c, 0x96, 0x81, 0x71, 0x1f, 0x41, 0x6f, 0x6b, 0xc2, 0x86, 0xc6, 0x7b, 0x0f, 0xfe, 0x26, - 0x93, 0x66, 0xdb, 0x8f, 0xbc, 0xc6, 0x94, 0x28, 0x54, 0x86, 0x6d, 0xf7, 0x3f, 0xc3, 0xfc, 0x32, - 0xd6, 0x94, 0x78, 0x34, 0xdf, 0x6a, 0xcc, 0x5f, 0xdc, 0x58, 0xf0, 0xaa, 0x63, 0xbd, 0x76, 0x28, - 0x7e, 0x67, 0x11, 0xd2, 0xbf, 0x04, 0xec, 0xa4, 0x43, 0x06, 0xfd, 0xd8, 0x4b, 0xad, 0x39, 0xb3, - 0xb3, 0xe8, 0xd9, 0x62, 0x8e, 0x5e, 0xc1, 0x0b, 0x79, 0x66, 0x65, 0xe9, 0x87, 0x16, 0xd7, 0x63, - 0x9b, 0xef, 0x04, 0x3d, 0xe0, 0xe6, 0x6f, 0xff, 0x13, 0x98, 0xe6, 0x17, 0xfc, 0xa5, 0x9f, 0x5a, - 0x84, 0x7d, 0xaf, 0xcd, 0x59, 0x3e, 0xa1, 0xcd, 0x98, 0x67, 0xb5, 0x81, 0x19, 0x17, 0x49, 0xc0, - 0x4a, 0x16, 0xa5, 0xd8, 0xe2, 0x28, 0x5b, 0x6f, 0xc2, 0xaa, 0xeb, 0xc9, 0xd0, 0x5f, 0xf9, 0x8f, - 0x90, 0x6b, 0x42, 0xee, 0x02, 0x00, 0x00, 0xff, 0xff, 0x93, 0x3f, 0x1a, 0x33, 0x58, 0x04, 0x00, - 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.proto deleted file mode 100644 index 12a38b110fe..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/ReconfigurationProtocol.proto +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - // This file contains protocol buffers that are used to reconfigure NameNode - // and DataNode by HDFS admin. - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "ReconfigurationProtocolProtos"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -/** Asks NN/DN to reload configuration file. */ -message StartReconfigurationRequestProto { -} - -message StartReconfigurationResponseProto { -} - -/** Query the running status of reconfiguration process */ -message GetReconfigurationStatusRequestProto { -} - -message GetReconfigurationStatusConfigChangeProto { - required string name = 1; - required string oldValue = 2; - optional string newValue = 3; - optional string errorMessage = 4; // It is empty if success. -} - -message GetReconfigurationStatusResponseProto { - required int64 startTime = 1; - optional int64 endTime = 2; - repeated GetReconfigurationStatusConfigChangeProto changes = 3; -} - -/** Query the reconfigurable properties on NN/DN. */ -message ListReconfigurablePropertiesRequestProto { -} - -message ListReconfigurablePropertiesResponseProto { - repeated string name = 1; -} - -/** - * Protocol used from client to the NN/DN. - * See the request and response for details of rpc call. - */ -service ReconfigurationProtocolService { - rpc getReconfigurationStatus(GetReconfigurationStatusRequestProto) - returns(GetReconfigurationStatusResponseProto); - - rpc startReconfiguration(StartReconfigurationRequestProto) - returns(StartReconfigurationResponseProto); - - rpc listReconfigurableProperties( - ListReconfigurablePropertiesRequestProto) - returns(ListReconfigurablePropertiesResponseProto); -} \ No newline at end of file diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.pb.go deleted file mode 100644 index 5d766a35217..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.pb.go +++ /dev/null @@ -1,487 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: acl.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AclEntryProto_AclEntryScopeProto int32 - -const ( - AclEntryProto_ACCESS AclEntryProto_AclEntryScopeProto = 0 - AclEntryProto_DEFAULT AclEntryProto_AclEntryScopeProto = 1 -) - -var AclEntryProto_AclEntryScopeProto_name = map[int32]string{ - 0: "ACCESS", - 1: "DEFAULT", -} -var AclEntryProto_AclEntryScopeProto_value = map[string]int32{ - "ACCESS": 0, - "DEFAULT": 1, -} - -func (x AclEntryProto_AclEntryScopeProto) Enum() *AclEntryProto_AclEntryScopeProto { - p := new(AclEntryProto_AclEntryScopeProto) - *p = x - return p -} -func (x AclEntryProto_AclEntryScopeProto) String() string { - return proto.EnumName(AclEntryProto_AclEntryScopeProto_name, int32(x)) -} -func (x *AclEntryProto_AclEntryScopeProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AclEntryProto_AclEntryScopeProto_value, data, "AclEntryProto_AclEntryScopeProto") - if err != nil { - return err - } - *x = AclEntryProto_AclEntryScopeProto(value) - return nil -} -func (AclEntryProto_AclEntryScopeProto) EnumDescriptor() ([]byte, []int) { - return fileDescriptor9, []int{0, 0} -} - -type AclEntryProto_AclEntryTypeProto int32 - -const ( - AclEntryProto_USER AclEntryProto_AclEntryTypeProto = 0 - AclEntryProto_GROUP AclEntryProto_AclEntryTypeProto = 1 - AclEntryProto_MASK AclEntryProto_AclEntryTypeProto = 2 - AclEntryProto_OTHER AclEntryProto_AclEntryTypeProto = 3 -) - -var AclEntryProto_AclEntryTypeProto_name = map[int32]string{ - 0: "USER", - 1: "GROUP", - 2: "MASK", - 3: "OTHER", -} -var AclEntryProto_AclEntryTypeProto_value = map[string]int32{ - "USER": 0, - "GROUP": 1, - "MASK": 2, - "OTHER": 3, -} - -func (x AclEntryProto_AclEntryTypeProto) Enum() *AclEntryProto_AclEntryTypeProto { - p := new(AclEntryProto_AclEntryTypeProto) - *p = x - return p -} -func (x AclEntryProto_AclEntryTypeProto) String() string { - return proto.EnumName(AclEntryProto_AclEntryTypeProto_name, int32(x)) -} -func (x *AclEntryProto_AclEntryTypeProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AclEntryProto_AclEntryTypeProto_value, data, "AclEntryProto_AclEntryTypeProto") - if err != nil { - return err - } - *x = AclEntryProto_AclEntryTypeProto(value) - return nil -} -func (AclEntryProto_AclEntryTypeProto) EnumDescriptor() ([]byte, []int) { - return fileDescriptor9, []int{0, 1} -} - -type AclEntryProto_FsActionProto int32 - -const ( - AclEntryProto_NONE AclEntryProto_FsActionProto = 0 - AclEntryProto_EXECUTE AclEntryProto_FsActionProto = 1 - AclEntryProto_WRITE AclEntryProto_FsActionProto = 2 - AclEntryProto_WRITE_EXECUTE AclEntryProto_FsActionProto = 3 - AclEntryProto_READ AclEntryProto_FsActionProto = 4 - AclEntryProto_READ_EXECUTE AclEntryProto_FsActionProto = 5 - AclEntryProto_READ_WRITE AclEntryProto_FsActionProto = 6 - AclEntryProto_PERM_ALL AclEntryProto_FsActionProto = 7 -) - -var AclEntryProto_FsActionProto_name = map[int32]string{ - 0: "NONE", - 1: "EXECUTE", - 2: "WRITE", - 3: "WRITE_EXECUTE", - 4: "READ", - 5: "READ_EXECUTE", - 6: "READ_WRITE", - 7: "PERM_ALL", -} -var AclEntryProto_FsActionProto_value = map[string]int32{ - "NONE": 0, - "EXECUTE": 1, - "WRITE": 2, - "WRITE_EXECUTE": 3, - "READ": 4, - "READ_EXECUTE": 5, - "READ_WRITE": 6, - "PERM_ALL": 7, -} - -func (x AclEntryProto_FsActionProto) Enum() *AclEntryProto_FsActionProto { - p := new(AclEntryProto_FsActionProto) - *p = x - return p -} -func (x AclEntryProto_FsActionProto) String() string { - return proto.EnumName(AclEntryProto_FsActionProto_name, int32(x)) -} -func (x *AclEntryProto_FsActionProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AclEntryProto_FsActionProto_value, data, "AclEntryProto_FsActionProto") - if err != nil { - return err - } - *x = AclEntryProto_FsActionProto(value) - return nil -} -func (AclEntryProto_FsActionProto) EnumDescriptor() ([]byte, []int) { - return fileDescriptor9, []int{0, 2} -} - -type AclEntryProto struct { - Type *AclEntryProto_AclEntryTypeProto `protobuf:"varint,1,req,name=type,enum=hadoop.hdfs.AclEntryProto_AclEntryTypeProto" json:"type,omitempty"` - Scope *AclEntryProto_AclEntryScopeProto `protobuf:"varint,2,req,name=scope,enum=hadoop.hdfs.AclEntryProto_AclEntryScopeProto" json:"scope,omitempty"` - Permissions *AclEntryProto_FsActionProto `protobuf:"varint,3,req,name=permissions,enum=hadoop.hdfs.AclEntryProto_FsActionProto" json:"permissions,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AclEntryProto) Reset() { *m = AclEntryProto{} } -func (m *AclEntryProto) String() string { return proto.CompactTextString(m) } -func (*AclEntryProto) ProtoMessage() {} -func (*AclEntryProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} } - -func (m *AclEntryProto) GetType() AclEntryProto_AclEntryTypeProto { - if m != nil && m.Type != nil { - return *m.Type - } - return AclEntryProto_USER -} - -func (m *AclEntryProto) GetScope() AclEntryProto_AclEntryScopeProto { - if m != nil && m.Scope != nil { - return *m.Scope - } - return AclEntryProto_ACCESS -} - -func (m *AclEntryProto) GetPermissions() AclEntryProto_FsActionProto { - if m != nil && m.Permissions != nil { - return *m.Permissions - } - return AclEntryProto_NONE -} - -func (m *AclEntryProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type AclStatusProto struct { - Owner *string `protobuf:"bytes,1,req,name=owner" json:"owner,omitempty"` - Group *string `protobuf:"bytes,2,req,name=group" json:"group,omitempty"` - Sticky *bool `protobuf:"varint,3,req,name=sticky" json:"sticky,omitempty"` - Entries []*AclEntryProto `protobuf:"bytes,4,rep,name=entries" json:"entries,omitempty"` - Permission *FsPermissionProto `protobuf:"bytes,5,opt,name=permission" json:"permission,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AclStatusProto) Reset() { *m = AclStatusProto{} } -func (m *AclStatusProto) String() string { return proto.CompactTextString(m) } -func (*AclStatusProto) ProtoMessage() {} -func (*AclStatusProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{1} } - -func (m *AclStatusProto) GetOwner() string { - if m != nil && m.Owner != nil { - return *m.Owner - } - return "" -} - -func (m *AclStatusProto) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *AclStatusProto) GetSticky() bool { - if m != nil && m.Sticky != nil { - return *m.Sticky - } - return false -} - -func (m *AclStatusProto) GetEntries() []*AclEntryProto { - if m != nil { - return m.Entries - } - return nil -} - -func (m *AclStatusProto) GetPermission() *FsPermissionProto { - if m != nil { - return m.Permission - } - return nil -} - -type ModifyAclEntriesRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - AclSpec []*AclEntryProto `protobuf:"bytes,2,rep,name=aclSpec" json:"aclSpec,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModifyAclEntriesRequestProto) Reset() { *m = ModifyAclEntriesRequestProto{} } -func (m *ModifyAclEntriesRequestProto) String() string { return proto.CompactTextString(m) } -func (*ModifyAclEntriesRequestProto) ProtoMessage() {} -func (*ModifyAclEntriesRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{2} } - -func (m *ModifyAclEntriesRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *ModifyAclEntriesRequestProto) GetAclSpec() []*AclEntryProto { - if m != nil { - return m.AclSpec - } - return nil -} - -type ModifyAclEntriesResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ModifyAclEntriesResponseProto) Reset() { *m = ModifyAclEntriesResponseProto{} } -func (m *ModifyAclEntriesResponseProto) String() string { return proto.CompactTextString(m) } -func (*ModifyAclEntriesResponseProto) ProtoMessage() {} -func (*ModifyAclEntriesResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{3} } - -type RemoveAclRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveAclRequestProto) Reset() { *m = RemoveAclRequestProto{} } -func (m *RemoveAclRequestProto) String() string { return proto.CompactTextString(m) } -func (*RemoveAclRequestProto) ProtoMessage() {} -func (*RemoveAclRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{4} } - -func (m *RemoveAclRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type RemoveAclResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveAclResponseProto) Reset() { *m = RemoveAclResponseProto{} } -func (m *RemoveAclResponseProto) String() string { return proto.CompactTextString(m) } -func (*RemoveAclResponseProto) ProtoMessage() {} -func (*RemoveAclResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{5} } - -type RemoveAclEntriesRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - AclSpec []*AclEntryProto `protobuf:"bytes,2,rep,name=aclSpec" json:"aclSpec,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveAclEntriesRequestProto) Reset() { *m = RemoveAclEntriesRequestProto{} } -func (m *RemoveAclEntriesRequestProto) String() string { return proto.CompactTextString(m) } -func (*RemoveAclEntriesRequestProto) ProtoMessage() {} -func (*RemoveAclEntriesRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{6} } - -func (m *RemoveAclEntriesRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *RemoveAclEntriesRequestProto) GetAclSpec() []*AclEntryProto { - if m != nil { - return m.AclSpec - } - return nil -} - -type RemoveAclEntriesResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveAclEntriesResponseProto) Reset() { *m = RemoveAclEntriesResponseProto{} } -func (m *RemoveAclEntriesResponseProto) String() string { return proto.CompactTextString(m) } -func (*RemoveAclEntriesResponseProto) ProtoMessage() {} -func (*RemoveAclEntriesResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{7} } - -type RemoveDefaultAclRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveDefaultAclRequestProto) Reset() { *m = RemoveDefaultAclRequestProto{} } -func (m *RemoveDefaultAclRequestProto) String() string { return proto.CompactTextString(m) } -func (*RemoveDefaultAclRequestProto) ProtoMessage() {} -func (*RemoveDefaultAclRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{8} } - -func (m *RemoveDefaultAclRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type RemoveDefaultAclResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveDefaultAclResponseProto) Reset() { *m = RemoveDefaultAclResponseProto{} } -func (m *RemoveDefaultAclResponseProto) String() string { return proto.CompactTextString(m) } -func (*RemoveDefaultAclResponseProto) ProtoMessage() {} -func (*RemoveDefaultAclResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{9} } - -type SetAclRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - AclSpec []*AclEntryProto `protobuf:"bytes,2,rep,name=aclSpec" json:"aclSpec,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetAclRequestProto) Reset() { *m = SetAclRequestProto{} } -func (m *SetAclRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetAclRequestProto) ProtoMessage() {} -func (*SetAclRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{10} } - -func (m *SetAclRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetAclRequestProto) GetAclSpec() []*AclEntryProto { - if m != nil { - return m.AclSpec - } - return nil -} - -type SetAclResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetAclResponseProto) Reset() { *m = SetAclResponseProto{} } -func (m *SetAclResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetAclResponseProto) ProtoMessage() {} -func (*SetAclResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{11} } - -type GetAclStatusRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAclStatusRequestProto) Reset() { *m = GetAclStatusRequestProto{} } -func (m *GetAclStatusRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetAclStatusRequestProto) ProtoMessage() {} -func (*GetAclStatusRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{12} } - -func (m *GetAclStatusRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type GetAclStatusResponseProto struct { - Result *AclStatusProto `protobuf:"bytes,1,req,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetAclStatusResponseProto) Reset() { *m = GetAclStatusResponseProto{} } -func (m *GetAclStatusResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetAclStatusResponseProto) ProtoMessage() {} -func (*GetAclStatusResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{13} } - -func (m *GetAclStatusResponseProto) GetResult() *AclStatusProto { - if m != nil { - return m.Result - } - return nil -} - -func init() { - proto.RegisterType((*AclEntryProto)(nil), "hadoop.hdfs.AclEntryProto") - proto.RegisterType((*AclStatusProto)(nil), "hadoop.hdfs.AclStatusProto") - proto.RegisterType((*ModifyAclEntriesRequestProto)(nil), "hadoop.hdfs.ModifyAclEntriesRequestProto") - proto.RegisterType((*ModifyAclEntriesResponseProto)(nil), "hadoop.hdfs.ModifyAclEntriesResponseProto") - proto.RegisterType((*RemoveAclRequestProto)(nil), "hadoop.hdfs.RemoveAclRequestProto") - proto.RegisterType((*RemoveAclResponseProto)(nil), "hadoop.hdfs.RemoveAclResponseProto") - proto.RegisterType((*RemoveAclEntriesRequestProto)(nil), "hadoop.hdfs.RemoveAclEntriesRequestProto") - proto.RegisterType((*RemoveAclEntriesResponseProto)(nil), "hadoop.hdfs.RemoveAclEntriesResponseProto") - proto.RegisterType((*RemoveDefaultAclRequestProto)(nil), "hadoop.hdfs.RemoveDefaultAclRequestProto") - proto.RegisterType((*RemoveDefaultAclResponseProto)(nil), "hadoop.hdfs.RemoveDefaultAclResponseProto") - proto.RegisterType((*SetAclRequestProto)(nil), "hadoop.hdfs.SetAclRequestProto") - proto.RegisterType((*SetAclResponseProto)(nil), "hadoop.hdfs.SetAclResponseProto") - proto.RegisterType((*GetAclStatusRequestProto)(nil), "hadoop.hdfs.GetAclStatusRequestProto") - proto.RegisterType((*GetAclStatusResponseProto)(nil), "hadoop.hdfs.GetAclStatusResponseProto") - proto.RegisterEnum("hadoop.hdfs.AclEntryProto_AclEntryScopeProto", AclEntryProto_AclEntryScopeProto_name, AclEntryProto_AclEntryScopeProto_value) - proto.RegisterEnum("hadoop.hdfs.AclEntryProto_AclEntryTypeProto", AclEntryProto_AclEntryTypeProto_name, AclEntryProto_AclEntryTypeProto_value) - proto.RegisterEnum("hadoop.hdfs.AclEntryProto_FsActionProto", AclEntryProto_FsActionProto_name, AclEntryProto_FsActionProto_value) -} - -func init() { proto.RegisterFile("acl.proto", fileDescriptor9) } - -var fileDescriptor9 = []byte{ - // 605 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xdb, 0x6e, 0xd3, 0x40, - 0x10, 0xad, 0x63, 0x27, 0xad, 0xc7, 0x6d, 0xb5, 0x5d, 0x68, 0x65, 0x4a, 0x81, 0xc8, 0x12, 0x52, - 0x90, 0xda, 0x08, 0x05, 0x78, 0x04, 0xe1, 0xa6, 0xdb, 0x72, 0xe9, 0x25, 0x5a, 0x27, 0x82, 0x07, - 0xa4, 0xca, 0xda, 0x6e, 0x5a, 0x0b, 0xd7, 0x6b, 0xbc, 0x0e, 0x28, 0x2f, 0x7c, 0x0b, 0xdf, 0xc3, - 0x77, 0xf0, 0x21, 0xc8, 0xbb, 0x4e, 0x70, 0x88, 0x68, 0x10, 0x12, 0x2f, 0xd1, 0x5c, 0xce, 0x39, - 0x73, 0x46, 0x13, 0x2f, 0xd8, 0x21, 0x8b, 0xdb, 0x69, 0x26, 0x72, 0x81, 0x9d, 0xab, 0xf0, 0x42, - 0x88, 0xb4, 0x7d, 0x75, 0x31, 0x94, 0xdb, 0x50, 0xfc, 0xea, 0x86, 0xf7, 0xc3, 0x84, 0x35, 0x9f, - 0xc5, 0x24, 0xc9, 0xb3, 0x71, 0x4f, 0x41, 0x5f, 0x82, 0x95, 0x8f, 0x53, 0xee, 0x1a, 0xcd, 0x5a, - 0x6b, 0xbd, 0xb3, 0xdb, 0xae, 0x30, 0xdb, 0x33, 0xc8, 0x69, 0xd6, 0x1f, 0xa7, 0x5c, 0x55, 0xa8, - 0x62, 0xe2, 0x2e, 0xd4, 0x25, 0x13, 0x29, 0x77, 0x6b, 0x4a, 0x62, 0xef, 0x2f, 0x24, 0x82, 0x02, - 0xaf, 0x35, 0x34, 0x17, 0xbf, 0x01, 0x27, 0xe5, 0xd9, 0x75, 0x24, 0x65, 0x24, 0x12, 0xe9, 0x9a, - 0x4a, 0xaa, 0x75, 0x83, 0xd4, 0xa1, 0xf4, 0x59, 0x1e, 0x89, 0x44, 0xab, 0x54, 0xc9, 0x18, 0x83, - 0x95, 0x84, 0xd7, 0xdc, 0xb5, 0x9a, 0x46, 0xcb, 0xa6, 0x2a, 0xf6, 0xf6, 0x00, 0xcf, 0x0f, 0xc7, - 0x00, 0x0d, 0xbf, 0xdb, 0x25, 0x41, 0x80, 0x96, 0xb0, 0x03, 0xcb, 0x07, 0xe4, 0xd0, 0x1f, 0x1c, - 0xf7, 0x91, 0xe1, 0x3d, 0x87, 0x8d, 0xb9, 0x75, 0xf1, 0x0a, 0x58, 0x83, 0x80, 0x50, 0xb4, 0x84, - 0x6d, 0xa8, 0x1f, 0xd1, 0xb3, 0x41, 0x0f, 0x19, 0x45, 0xf1, 0xc4, 0x0f, 0xde, 0xa2, 0x5a, 0x51, - 0x3c, 0xeb, 0xbf, 0x22, 0x14, 0x99, 0xde, 0x57, 0x58, 0x9b, 0xf1, 0x57, 0xa0, 0x4e, 0xcf, 0x4e, - 0x89, 0x1e, 0x43, 0xde, 0x93, 0xee, 0xa0, 0x4f, 0x90, 0x51, 0x50, 0xde, 0xd1, 0xd7, 0x7d, 0x82, - 0x6a, 0x78, 0x03, 0xd6, 0x54, 0x78, 0x3e, 0xe9, 0x9a, 0x05, 0x89, 0x12, 0xff, 0x00, 0x59, 0x18, - 0xc1, 0x6a, 0x11, 0x4d, 0x7b, 0x75, 0xbc, 0x0e, 0xa0, 0x2a, 0x9a, 0xde, 0xc0, 0xab, 0xb0, 0xd2, - 0x23, 0xf4, 0xe4, 0xdc, 0x3f, 0x3e, 0x46, 0xcb, 0xde, 0x77, 0x03, 0xd6, 0x7d, 0x16, 0x07, 0x79, - 0x98, 0x8f, 0xa4, 0x76, 0x70, 0x1b, 0xea, 0xe2, 0x4b, 0xc2, 0x33, 0x75, 0x68, 0x9b, 0xea, 0xa4, - 0xa8, 0x5e, 0x66, 0x62, 0x94, 0xaa, 0xdb, 0xd9, 0x54, 0x27, 0x78, 0x0b, 0x1a, 0x32, 0x8f, 0xd8, - 0xc7, 0xb1, 0xba, 0xc3, 0x0a, 0x2d, 0x33, 0xfc, 0x14, 0x96, 0x79, 0x92, 0x67, 0x11, 0x97, 0xae, - 0xd5, 0x34, 0x5b, 0x4e, 0x67, 0xfb, 0xcf, 0x07, 0xa2, 0x13, 0x28, 0x7e, 0x01, 0xf0, 0xeb, 0x3a, - 0x6e, 0xbd, 0x69, 0xb4, 0x9c, 0xce, 0xfd, 0x19, 0xe2, 0xa1, 0xec, 0x4d, 0x01, 0x9a, 0x5c, 0x61, - 0x78, 0x43, 0xd8, 0x39, 0x11, 0x17, 0xd1, 0x70, 0x5c, 0xea, 0x47, 0x5c, 0x52, 0xfe, 0x69, 0xc4, - 0x65, 0xae, 0x37, 0x43, 0x60, 0xca, 0x8c, 0x95, 0x7b, 0x15, 0x61, 0xe1, 0x33, 0x64, 0x71, 0x90, - 0x72, 0xe6, 0xd6, 0x16, 0xfb, 0x2c, 0xa1, 0xde, 0x03, 0xb8, 0x37, 0x3f, 0x47, 0xa6, 0x22, 0x91, - 0xfa, 0xfe, 0xde, 0x23, 0xd8, 0xa4, 0xfc, 0x5a, 0x7c, 0xe6, 0x3e, 0x8b, 0x6f, 0x76, 0xe0, 0xb9, - 0xb0, 0x55, 0x81, 0x56, 0x45, 0x86, 0xb0, 0x33, 0xed, 0xfc, 0xe7, 0x6d, 0xe6, 0xe7, 0x54, 0x8d, - 0x3c, 0x9e, 0x18, 0x39, 0xe0, 0xc3, 0x70, 0x14, 0xe7, 0x8b, 0x97, 0x9a, 0x4a, 0x56, 0x19, 0x55, - 0xc9, 0x0f, 0x80, 0x03, 0xbe, 0x58, 0xe8, 0x1f, 0x37, 0xda, 0x84, 0x5b, 0x13, 0xf5, 0xea, 0xd0, - 0x5d, 0x70, 0x8f, 0x54, 0x59, 0xff, 0xdb, 0x17, 0xec, 0xd0, 0x83, 0x3b, 0xb3, 0xe8, 0x8a, 0x14, - 0x7e, 0x02, 0x8d, 0x8c, 0xcb, 0x51, 0x9c, 0x2b, 0x86, 0xd3, 0xb9, 0xfb, 0xbb, 0xad, 0xca, 0x07, - 0x45, 0x4b, 0xe8, 0xfe, 0x33, 0x78, 0x28, 0xb2, 0xcb, 0x76, 0x98, 0x86, 0xec, 0x8a, 0xcf, 0x10, - 0xd4, 0x93, 0xcb, 0x44, 0xf9, 0x28, 0xef, 0xdb, 0x3e, 0x8b, 0x15, 0x55, 0x7e, 0x33, 0x8c, 0x9f, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x5a, 0xf8, 0x89, 0x4e, 0xae, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.proto deleted file mode 100644 index bb7fdb0168f..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/acl.proto +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "AclProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "hdfs.proto"; - -message AclEntryProto { - enum AclEntryScopeProto { - ACCESS = 0x0; - DEFAULT = 0x1; - } - - enum AclEntryTypeProto { - USER = 0x0; - GROUP = 0x1; - MASK = 0x2; - OTHER = 0x3; - } - - enum FsActionProto { - NONE = 0x0; - EXECUTE = 0x1; - WRITE = 0x2; - WRITE_EXECUTE = 0x3; - READ = 0x4; - READ_EXECUTE = 0x5; - READ_WRITE = 0x6; - PERM_ALL = 0x7; - } - - required AclEntryTypeProto type = 1; - required AclEntryScopeProto scope = 2; - required FsActionProto permissions = 3; - optional string name = 4; -} - -message AclStatusProto { - required string owner = 1; - required string group = 2; - required bool sticky = 3; - repeated AclEntryProto entries = 4; - optional FsPermissionProto permission = 5; -} - -message ModifyAclEntriesRequestProto { - required string src = 1; - repeated AclEntryProto aclSpec = 2; -} - -message ModifyAclEntriesResponseProto { -} - -message RemoveAclRequestProto { - required string src = 1; -} - -message RemoveAclResponseProto { -} - -message RemoveAclEntriesRequestProto { - required string src = 1; - repeated AclEntryProto aclSpec = 2; -} - -message RemoveAclEntriesResponseProto { -} - -message RemoveDefaultAclRequestProto { - required string src = 1; -} - -message RemoveDefaultAclResponseProto { -} - -message SetAclRequestProto { - required string src = 1; - repeated AclEntryProto aclSpec = 2; -} - -message SetAclResponseProto { -} - -message GetAclStatusRequestProto { - required string src = 1; -} - -message GetAclStatusResponseProto { - required AclStatusProto result = 1; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.pb.go deleted file mode 100644 index 00784625fe1..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.pb.go +++ /dev/null @@ -1,1419 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: datatransfer.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import hadoop_common "github.com/colinmarc/hdfs/protocol/hadoop_common" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Status is a 4-bit enum -type Status int32 - -const ( - Status_SUCCESS Status = 0 - Status_ERROR Status = 1 - Status_ERROR_CHECKSUM Status = 2 - Status_ERROR_INVALID Status = 3 - Status_ERROR_EXISTS Status = 4 - Status_ERROR_ACCESS_TOKEN Status = 5 - Status_CHECKSUM_OK Status = 6 - Status_ERROR_UNSUPPORTED Status = 7 - Status_OOB_RESTART Status = 8 - Status_OOB_RESERVED1 Status = 9 - Status_OOB_RESERVED2 Status = 10 - Status_OOB_RESERVED3 Status = 11 - Status_IN_PROGRESS Status = 12 -) - -var Status_name = map[int32]string{ - 0: "SUCCESS", - 1: "ERROR", - 2: "ERROR_CHECKSUM", - 3: "ERROR_INVALID", - 4: "ERROR_EXISTS", - 5: "ERROR_ACCESS_TOKEN", - 6: "CHECKSUM_OK", - 7: "ERROR_UNSUPPORTED", - 8: "OOB_RESTART", - 9: "OOB_RESERVED1", - 10: "OOB_RESERVED2", - 11: "OOB_RESERVED3", - 12: "IN_PROGRESS", -} -var Status_value = map[string]int32{ - "SUCCESS": 0, - "ERROR": 1, - "ERROR_CHECKSUM": 2, - "ERROR_INVALID": 3, - "ERROR_EXISTS": 4, - "ERROR_ACCESS_TOKEN": 5, - "CHECKSUM_OK": 6, - "ERROR_UNSUPPORTED": 7, - "OOB_RESTART": 8, - "OOB_RESERVED1": 9, - "OOB_RESERVED2": 10, - "OOB_RESERVED3": 11, - "IN_PROGRESS": 12, -} - -func (x Status) Enum() *Status { - p := new(Status) - *p = x - return p -} -func (x Status) String() string { - return proto.EnumName(Status_name, int32(x)) -} -func (x *Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Status_value, data, "Status") - if err != nil { - return err - } - *x = Status(value) - return nil -} -func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } - -type ShortCircuitFdResponse int32 - -const ( - ShortCircuitFdResponse_DO_NOT_USE_RECEIPT_VERIFICATION ShortCircuitFdResponse = 0 - ShortCircuitFdResponse_USE_RECEIPT_VERIFICATION ShortCircuitFdResponse = 1 -) - -var ShortCircuitFdResponse_name = map[int32]string{ - 0: "DO_NOT_USE_RECEIPT_VERIFICATION", - 1: "USE_RECEIPT_VERIFICATION", -} -var ShortCircuitFdResponse_value = map[string]int32{ - "DO_NOT_USE_RECEIPT_VERIFICATION": 0, - "USE_RECEIPT_VERIFICATION": 1, -} - -func (x ShortCircuitFdResponse) Enum() *ShortCircuitFdResponse { - p := new(ShortCircuitFdResponse) - *p = x - return p -} -func (x ShortCircuitFdResponse) String() string { - return proto.EnumName(ShortCircuitFdResponse_name, int32(x)) -} -func (x *ShortCircuitFdResponse) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ShortCircuitFdResponse_value, data, "ShortCircuitFdResponse") - if err != nil { - return err - } - *x = ShortCircuitFdResponse(value) - return nil -} -func (ShortCircuitFdResponse) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } - -type DataTransferEncryptorMessageProto_DataTransferEncryptorStatus int32 - -const ( - DataTransferEncryptorMessageProto_SUCCESS DataTransferEncryptorMessageProto_DataTransferEncryptorStatus = 0 - DataTransferEncryptorMessageProto_ERROR_UNKNOWN_KEY DataTransferEncryptorMessageProto_DataTransferEncryptorStatus = 1 - DataTransferEncryptorMessageProto_ERROR DataTransferEncryptorMessageProto_DataTransferEncryptorStatus = 2 -) - -var DataTransferEncryptorMessageProto_DataTransferEncryptorStatus_name = map[int32]string{ - 0: "SUCCESS", - 1: "ERROR_UNKNOWN_KEY", - 2: "ERROR", -} -var DataTransferEncryptorMessageProto_DataTransferEncryptorStatus_value = map[string]int32{ - "SUCCESS": 0, - "ERROR_UNKNOWN_KEY": 1, - "ERROR": 2, -} - -func (x DataTransferEncryptorMessageProto_DataTransferEncryptorStatus) Enum() *DataTransferEncryptorMessageProto_DataTransferEncryptorStatus { - p := new(DataTransferEncryptorMessageProto_DataTransferEncryptorStatus) - *p = x - return p -} -func (x DataTransferEncryptorMessageProto_DataTransferEncryptorStatus) String() string { - return proto.EnumName(DataTransferEncryptorMessageProto_DataTransferEncryptorStatus_name, int32(x)) -} -func (x *DataTransferEncryptorMessageProto_DataTransferEncryptorStatus) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DataTransferEncryptorMessageProto_DataTransferEncryptorStatus_value, data, "DataTransferEncryptorMessageProto_DataTransferEncryptorStatus") - if err != nil { - return err - } - *x = DataTransferEncryptorMessageProto_DataTransferEncryptorStatus(value) - return nil -} -func (DataTransferEncryptorMessageProto_DataTransferEncryptorStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor5, []int{0, 0} -} - -type OpWriteBlockProto_BlockConstructionStage int32 - -const ( - OpWriteBlockProto_PIPELINE_SETUP_APPEND OpWriteBlockProto_BlockConstructionStage = 0 - // pipeline set up for failed PIPELINE_SETUP_APPEND recovery - OpWriteBlockProto_PIPELINE_SETUP_APPEND_RECOVERY OpWriteBlockProto_BlockConstructionStage = 1 - // data streaming - OpWriteBlockProto_DATA_STREAMING OpWriteBlockProto_BlockConstructionStage = 2 - // pipeline setup for failed data streaming recovery - OpWriteBlockProto_PIPELINE_SETUP_STREAMING_RECOVERY OpWriteBlockProto_BlockConstructionStage = 3 - // close the block and pipeline - OpWriteBlockProto_PIPELINE_CLOSE OpWriteBlockProto_BlockConstructionStage = 4 - // Recover a failed PIPELINE_CLOSE - OpWriteBlockProto_PIPELINE_CLOSE_RECOVERY OpWriteBlockProto_BlockConstructionStage = 5 - // pipeline set up for block creation - OpWriteBlockProto_PIPELINE_SETUP_CREATE OpWriteBlockProto_BlockConstructionStage = 6 - // transfer RBW for adding datanodes - OpWriteBlockProto_TRANSFER_RBW OpWriteBlockProto_BlockConstructionStage = 7 - // transfer Finalized for adding datanodes - OpWriteBlockProto_TRANSFER_FINALIZED OpWriteBlockProto_BlockConstructionStage = 8 -) - -var OpWriteBlockProto_BlockConstructionStage_name = map[int32]string{ - 0: "PIPELINE_SETUP_APPEND", - 1: "PIPELINE_SETUP_APPEND_RECOVERY", - 2: "DATA_STREAMING", - 3: "PIPELINE_SETUP_STREAMING_RECOVERY", - 4: "PIPELINE_CLOSE", - 5: "PIPELINE_CLOSE_RECOVERY", - 6: "PIPELINE_SETUP_CREATE", - 7: "TRANSFER_RBW", - 8: "TRANSFER_FINALIZED", -} -var OpWriteBlockProto_BlockConstructionStage_value = map[string]int32{ - "PIPELINE_SETUP_APPEND": 0, - "PIPELINE_SETUP_APPEND_RECOVERY": 1, - "DATA_STREAMING": 2, - "PIPELINE_SETUP_STREAMING_RECOVERY": 3, - "PIPELINE_CLOSE": 4, - "PIPELINE_CLOSE_RECOVERY": 5, - "PIPELINE_SETUP_CREATE": 6, - "TRANSFER_RBW": 7, - "TRANSFER_FINALIZED": 8, -} - -func (x OpWriteBlockProto_BlockConstructionStage) Enum() *OpWriteBlockProto_BlockConstructionStage { - p := new(OpWriteBlockProto_BlockConstructionStage) - *p = x - return p -} -func (x OpWriteBlockProto_BlockConstructionStage) String() string { - return proto.EnumName(OpWriteBlockProto_BlockConstructionStage_name, int32(x)) -} -func (x *OpWriteBlockProto_BlockConstructionStage) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(OpWriteBlockProto_BlockConstructionStage_value, data, "OpWriteBlockProto_BlockConstructionStage") - if err != nil { - return err - } - *x = OpWriteBlockProto_BlockConstructionStage(value) - return nil -} -func (OpWriteBlockProto_BlockConstructionStage) EnumDescriptor() ([]byte, []int) { - return fileDescriptor5, []int{7, 0} -} - -type DataTransferEncryptorMessageProto struct { - Status *DataTransferEncryptorMessageProto_DataTransferEncryptorStatus `protobuf:"varint,1,req,name=status,enum=hadoop.hdfs.DataTransferEncryptorMessageProto_DataTransferEncryptorStatus" json:"status,omitempty"` - Payload []byte `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"` - Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` - CipherOption []*CipherOptionProto `protobuf:"bytes,4,rep,name=cipherOption" json:"cipherOption,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DataTransferEncryptorMessageProto) Reset() { *m = DataTransferEncryptorMessageProto{} } -func (m *DataTransferEncryptorMessageProto) String() string { return proto.CompactTextString(m) } -func (*DataTransferEncryptorMessageProto) ProtoMessage() {} -func (*DataTransferEncryptorMessageProto) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{0} -} - -func (m *DataTransferEncryptorMessageProto) GetStatus() DataTransferEncryptorMessageProto_DataTransferEncryptorStatus { - if m != nil && m.Status != nil { - return *m.Status - } - return DataTransferEncryptorMessageProto_SUCCESS -} - -func (m *DataTransferEncryptorMessageProto) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func (m *DataTransferEncryptorMessageProto) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *DataTransferEncryptorMessageProto) GetCipherOption() []*CipherOptionProto { - if m != nil { - return m.CipherOption - } - return nil -} - -type BaseHeaderProto struct { - Block *ExtendedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - Token *hadoop_common.TokenProto `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"` - TraceInfo *DataTransferTraceInfoProto `protobuf:"bytes,3,opt,name=traceInfo" json:"traceInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BaseHeaderProto) Reset() { *m = BaseHeaderProto{} } -func (m *BaseHeaderProto) String() string { return proto.CompactTextString(m) } -func (*BaseHeaderProto) ProtoMessage() {} -func (*BaseHeaderProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } - -func (m *BaseHeaderProto) GetBlock() *ExtendedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -func (m *BaseHeaderProto) GetToken() *hadoop_common.TokenProto { - if m != nil { - return m.Token - } - return nil -} - -func (m *BaseHeaderProto) GetTraceInfo() *DataTransferTraceInfoProto { - if m != nil { - return m.TraceInfo - } - return nil -} - -type DataTransferTraceInfoProto struct { - TraceId *uint64 `protobuf:"varint,1,req,name=traceId" json:"traceId,omitempty"` - ParentId *uint64 `protobuf:"varint,2,req,name=parentId" json:"parentId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DataTransferTraceInfoProto) Reset() { *m = DataTransferTraceInfoProto{} } -func (m *DataTransferTraceInfoProto) String() string { return proto.CompactTextString(m) } -func (*DataTransferTraceInfoProto) ProtoMessage() {} -func (*DataTransferTraceInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{2} } - -func (m *DataTransferTraceInfoProto) GetTraceId() uint64 { - if m != nil && m.TraceId != nil { - return *m.TraceId - } - return 0 -} - -func (m *DataTransferTraceInfoProto) GetParentId() uint64 { - if m != nil && m.ParentId != nil { - return *m.ParentId - } - return 0 -} - -type ClientOperationHeaderProto struct { - BaseHeader *BaseHeaderProto `protobuf:"bytes,1,req,name=baseHeader" json:"baseHeader,omitempty"` - ClientName *string `protobuf:"bytes,2,req,name=clientName" json:"clientName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ClientOperationHeaderProto) Reset() { *m = ClientOperationHeaderProto{} } -func (m *ClientOperationHeaderProto) String() string { return proto.CompactTextString(m) } -func (*ClientOperationHeaderProto) ProtoMessage() {} -func (*ClientOperationHeaderProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{3} } - -func (m *ClientOperationHeaderProto) GetBaseHeader() *BaseHeaderProto { - if m != nil { - return m.BaseHeader - } - return nil -} - -func (m *ClientOperationHeaderProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -type CachingStrategyProto struct { - DropBehind *bool `protobuf:"varint,1,opt,name=dropBehind" json:"dropBehind,omitempty"` - Readahead *int64 `protobuf:"varint,2,opt,name=readahead" json:"readahead,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CachingStrategyProto) Reset() { *m = CachingStrategyProto{} } -func (m *CachingStrategyProto) String() string { return proto.CompactTextString(m) } -func (*CachingStrategyProto) ProtoMessage() {} -func (*CachingStrategyProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{4} } - -func (m *CachingStrategyProto) GetDropBehind() bool { - if m != nil && m.DropBehind != nil { - return *m.DropBehind - } - return false -} - -func (m *CachingStrategyProto) GetReadahead() int64 { - if m != nil && m.Readahead != nil { - return *m.Readahead - } - return 0 -} - -type OpReadBlockProto struct { - Header *ClientOperationHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - Offset *uint64 `protobuf:"varint,2,req,name=offset" json:"offset,omitempty"` - Len *uint64 `protobuf:"varint,3,req,name=len" json:"len,omitempty"` - SendChecksums *bool `protobuf:"varint,4,opt,name=sendChecksums,def=1" json:"sendChecksums,omitempty"` - CachingStrategy *CachingStrategyProto `protobuf:"bytes,5,opt,name=cachingStrategy" json:"cachingStrategy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpReadBlockProto) Reset() { *m = OpReadBlockProto{} } -func (m *OpReadBlockProto) String() string { return proto.CompactTextString(m) } -func (*OpReadBlockProto) ProtoMessage() {} -func (*OpReadBlockProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{5} } - -const Default_OpReadBlockProto_SendChecksums bool = true - -func (m *OpReadBlockProto) GetHeader() *ClientOperationHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -func (m *OpReadBlockProto) GetOffset() uint64 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return 0 -} - -func (m *OpReadBlockProto) GetLen() uint64 { - if m != nil && m.Len != nil { - return *m.Len - } - return 0 -} - -func (m *OpReadBlockProto) GetSendChecksums() bool { - if m != nil && m.SendChecksums != nil { - return *m.SendChecksums - } - return Default_OpReadBlockProto_SendChecksums -} - -func (m *OpReadBlockProto) GetCachingStrategy() *CachingStrategyProto { - if m != nil { - return m.CachingStrategy - } - return nil -} - -type ChecksumProto struct { - Type *ChecksumTypeProto `protobuf:"varint,1,req,name=type,enum=hadoop.hdfs.ChecksumTypeProto" json:"type,omitempty"` - BytesPerChecksum *uint32 `protobuf:"varint,2,req,name=bytesPerChecksum" json:"bytesPerChecksum,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ChecksumProto) Reset() { *m = ChecksumProto{} } -func (m *ChecksumProto) String() string { return proto.CompactTextString(m) } -func (*ChecksumProto) ProtoMessage() {} -func (*ChecksumProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{6} } - -func (m *ChecksumProto) GetType() ChecksumTypeProto { - if m != nil && m.Type != nil { - return *m.Type - } - return ChecksumTypeProto_CHECKSUM_NULL -} - -func (m *ChecksumProto) GetBytesPerChecksum() uint32 { - if m != nil && m.BytesPerChecksum != nil { - return *m.BytesPerChecksum - } - return 0 -} - -type OpWriteBlockProto struct { - Header *ClientOperationHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - Targets []*DatanodeInfoProto `protobuf:"bytes,2,rep,name=targets" json:"targets,omitempty"` - Source *DatanodeInfoProto `protobuf:"bytes,3,opt,name=source" json:"source,omitempty"` - Stage *OpWriteBlockProto_BlockConstructionStage `protobuf:"varint,4,req,name=stage,enum=hadoop.hdfs.OpWriteBlockProto_BlockConstructionStage" json:"stage,omitempty"` - PipelineSize *uint32 `protobuf:"varint,5,req,name=pipelineSize" json:"pipelineSize,omitempty"` - MinBytesRcvd *uint64 `protobuf:"varint,6,req,name=minBytesRcvd" json:"minBytesRcvd,omitempty"` - MaxBytesRcvd *uint64 `protobuf:"varint,7,req,name=maxBytesRcvd" json:"maxBytesRcvd,omitempty"` - LatestGenerationStamp *uint64 `protobuf:"varint,8,req,name=latestGenerationStamp" json:"latestGenerationStamp,omitempty"` - // * - // The requested checksum mechanism for this block write. - RequestedChecksum *ChecksumProto `protobuf:"bytes,9,req,name=requestedChecksum" json:"requestedChecksum,omitempty"` - CachingStrategy *CachingStrategyProto `protobuf:"bytes,10,opt,name=cachingStrategy" json:"cachingStrategy,omitempty"` - StorageType *StorageTypeProto `protobuf:"varint,11,opt,name=storageType,enum=hadoop.hdfs.StorageTypeProto,def=1" json:"storageType,omitempty"` - TargetStorageTypes []StorageTypeProto `protobuf:"varint,12,rep,name=targetStorageTypes,enum=hadoop.hdfs.StorageTypeProto" json:"targetStorageTypes,omitempty"` - // * - // Hint to the DataNode that the block can be allocated on transient - // storage i.e. memory and written to disk lazily. The DataNode is free - // to ignore this hint. - AllowLazyPersist *bool `protobuf:"varint,13,opt,name=allowLazyPersist,def=0" json:"allowLazyPersist,omitempty"` - // whether to pin the block, so Balancer won't move it. - Pinning *bool `protobuf:"varint,14,opt,name=pinning,def=0" json:"pinning,omitempty"` - TargetPinnings []bool `protobuf:"varint,15,rep,name=targetPinnings" json:"targetPinnings,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpWriteBlockProto) Reset() { *m = OpWriteBlockProto{} } -func (m *OpWriteBlockProto) String() string { return proto.CompactTextString(m) } -func (*OpWriteBlockProto) ProtoMessage() {} -func (*OpWriteBlockProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{7} } - -const Default_OpWriteBlockProto_StorageType StorageTypeProto = StorageTypeProto_DISK -const Default_OpWriteBlockProto_AllowLazyPersist bool = false -const Default_OpWriteBlockProto_Pinning bool = false - -func (m *OpWriteBlockProto) GetHeader() *ClientOperationHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -func (m *OpWriteBlockProto) GetTargets() []*DatanodeInfoProto { - if m != nil { - return m.Targets - } - return nil -} - -func (m *OpWriteBlockProto) GetSource() *DatanodeInfoProto { - if m != nil { - return m.Source - } - return nil -} - -func (m *OpWriteBlockProto) GetStage() OpWriteBlockProto_BlockConstructionStage { - if m != nil && m.Stage != nil { - return *m.Stage - } - return OpWriteBlockProto_PIPELINE_SETUP_APPEND -} - -func (m *OpWriteBlockProto) GetPipelineSize() uint32 { - if m != nil && m.PipelineSize != nil { - return *m.PipelineSize - } - return 0 -} - -func (m *OpWriteBlockProto) GetMinBytesRcvd() uint64 { - if m != nil && m.MinBytesRcvd != nil { - return *m.MinBytesRcvd - } - return 0 -} - -func (m *OpWriteBlockProto) GetMaxBytesRcvd() uint64 { - if m != nil && m.MaxBytesRcvd != nil { - return *m.MaxBytesRcvd - } - return 0 -} - -func (m *OpWriteBlockProto) GetLatestGenerationStamp() uint64 { - if m != nil && m.LatestGenerationStamp != nil { - return *m.LatestGenerationStamp - } - return 0 -} - -func (m *OpWriteBlockProto) GetRequestedChecksum() *ChecksumProto { - if m != nil { - return m.RequestedChecksum - } - return nil -} - -func (m *OpWriteBlockProto) GetCachingStrategy() *CachingStrategyProto { - if m != nil { - return m.CachingStrategy - } - return nil -} - -func (m *OpWriteBlockProto) GetStorageType() StorageTypeProto { - if m != nil && m.StorageType != nil { - return *m.StorageType - } - return Default_OpWriteBlockProto_StorageType -} - -func (m *OpWriteBlockProto) GetTargetStorageTypes() []StorageTypeProto { - if m != nil { - return m.TargetStorageTypes - } - return nil -} - -func (m *OpWriteBlockProto) GetAllowLazyPersist() bool { - if m != nil && m.AllowLazyPersist != nil { - return *m.AllowLazyPersist - } - return Default_OpWriteBlockProto_AllowLazyPersist -} - -func (m *OpWriteBlockProto) GetPinning() bool { - if m != nil && m.Pinning != nil { - return *m.Pinning - } - return Default_OpWriteBlockProto_Pinning -} - -func (m *OpWriteBlockProto) GetTargetPinnings() []bool { - if m != nil { - return m.TargetPinnings - } - return nil -} - -type OpTransferBlockProto struct { - Header *ClientOperationHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - Targets []*DatanodeInfoProto `protobuf:"bytes,2,rep,name=targets" json:"targets,omitempty"` - TargetStorageTypes []StorageTypeProto `protobuf:"varint,3,rep,name=targetStorageTypes,enum=hadoop.hdfs.StorageTypeProto" json:"targetStorageTypes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpTransferBlockProto) Reset() { *m = OpTransferBlockProto{} } -func (m *OpTransferBlockProto) String() string { return proto.CompactTextString(m) } -func (*OpTransferBlockProto) ProtoMessage() {} -func (*OpTransferBlockProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{8} } - -func (m *OpTransferBlockProto) GetHeader() *ClientOperationHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -func (m *OpTransferBlockProto) GetTargets() []*DatanodeInfoProto { - if m != nil { - return m.Targets - } - return nil -} - -func (m *OpTransferBlockProto) GetTargetStorageTypes() []StorageTypeProto { - if m != nil { - return m.TargetStorageTypes - } - return nil -} - -type OpReplaceBlockProto struct { - Header *BaseHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - DelHint *string `protobuf:"bytes,2,req,name=delHint" json:"delHint,omitempty"` - Source *DatanodeInfoProto `protobuf:"bytes,3,req,name=source" json:"source,omitempty"` - StorageType *StorageTypeProto `protobuf:"varint,4,opt,name=storageType,enum=hadoop.hdfs.StorageTypeProto,def=1" json:"storageType,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpReplaceBlockProto) Reset() { *m = OpReplaceBlockProto{} } -func (m *OpReplaceBlockProto) String() string { return proto.CompactTextString(m) } -func (*OpReplaceBlockProto) ProtoMessage() {} -func (*OpReplaceBlockProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{9} } - -const Default_OpReplaceBlockProto_StorageType StorageTypeProto = StorageTypeProto_DISK - -func (m *OpReplaceBlockProto) GetHeader() *BaseHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -func (m *OpReplaceBlockProto) GetDelHint() string { - if m != nil && m.DelHint != nil { - return *m.DelHint - } - return "" -} - -func (m *OpReplaceBlockProto) GetSource() *DatanodeInfoProto { - if m != nil { - return m.Source - } - return nil -} - -func (m *OpReplaceBlockProto) GetStorageType() StorageTypeProto { - if m != nil && m.StorageType != nil { - return *m.StorageType - } - return Default_OpReplaceBlockProto_StorageType -} - -type OpCopyBlockProto struct { - Header *BaseHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpCopyBlockProto) Reset() { *m = OpCopyBlockProto{} } -func (m *OpCopyBlockProto) String() string { return proto.CompactTextString(m) } -func (*OpCopyBlockProto) ProtoMessage() {} -func (*OpCopyBlockProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{10} } - -func (m *OpCopyBlockProto) GetHeader() *BaseHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -type OpBlockChecksumProto struct { - Header *BaseHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpBlockChecksumProto) Reset() { *m = OpBlockChecksumProto{} } -func (m *OpBlockChecksumProto) String() string { return proto.CompactTextString(m) } -func (*OpBlockChecksumProto) ProtoMessage() {} -func (*OpBlockChecksumProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{11} } - -func (m *OpBlockChecksumProto) GetHeader() *BaseHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -type OpBlockGroupChecksumProto struct { - Header *BaseHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - Datanodes *DatanodeInfosProto `protobuf:"bytes,2,req,name=datanodes" json:"datanodes,omitempty"` - // each internal block has a block token - BlockTokens []*hadoop_common.TokenProto `protobuf:"bytes,3,rep,name=blockTokens" json:"blockTokens,omitempty"` - EcPolicy *ErasureCodingPolicyProto `protobuf:"bytes,4,req,name=ecPolicy" json:"ecPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpBlockGroupChecksumProto) Reset() { *m = OpBlockGroupChecksumProto{} } -func (m *OpBlockGroupChecksumProto) String() string { return proto.CompactTextString(m) } -func (*OpBlockGroupChecksumProto) ProtoMessage() {} -func (*OpBlockGroupChecksumProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{12} } - -func (m *OpBlockGroupChecksumProto) GetHeader() *BaseHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -func (m *OpBlockGroupChecksumProto) GetDatanodes() *DatanodeInfosProto { - if m != nil { - return m.Datanodes - } - return nil -} - -func (m *OpBlockGroupChecksumProto) GetBlockTokens() []*hadoop_common.TokenProto { - if m != nil { - return m.BlockTokens - } - return nil -} - -func (m *OpBlockGroupChecksumProto) GetEcPolicy() *ErasureCodingPolicyProto { - if m != nil { - return m.EcPolicy - } - return nil -} - -// * -// An ID uniquely identifying a shared memory segment. -type ShortCircuitShmIdProto struct { - Hi *int64 `protobuf:"varint,1,req,name=hi" json:"hi,omitempty"` - Lo *int64 `protobuf:"varint,2,req,name=lo" json:"lo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ShortCircuitShmIdProto) Reset() { *m = ShortCircuitShmIdProto{} } -func (m *ShortCircuitShmIdProto) String() string { return proto.CompactTextString(m) } -func (*ShortCircuitShmIdProto) ProtoMessage() {} -func (*ShortCircuitShmIdProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{13} } - -func (m *ShortCircuitShmIdProto) GetHi() int64 { - if m != nil && m.Hi != nil { - return *m.Hi - } - return 0 -} - -func (m *ShortCircuitShmIdProto) GetLo() int64 { - if m != nil && m.Lo != nil { - return *m.Lo - } - return 0 -} - -// * -// An ID uniquely identifying a slot within a shared memory segment. -type ShortCircuitShmSlotProto struct { - ShmId *ShortCircuitShmIdProto `protobuf:"bytes,1,req,name=shmId" json:"shmId,omitempty"` - SlotIdx *int32 `protobuf:"varint,2,req,name=slotIdx" json:"slotIdx,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ShortCircuitShmSlotProto) Reset() { *m = ShortCircuitShmSlotProto{} } -func (m *ShortCircuitShmSlotProto) String() string { return proto.CompactTextString(m) } -func (*ShortCircuitShmSlotProto) ProtoMessage() {} -func (*ShortCircuitShmSlotProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{14} } - -func (m *ShortCircuitShmSlotProto) GetShmId() *ShortCircuitShmIdProto { - if m != nil { - return m.ShmId - } - return nil -} - -func (m *ShortCircuitShmSlotProto) GetSlotIdx() int32 { - if m != nil && m.SlotIdx != nil { - return *m.SlotIdx - } - return 0 -} - -type OpRequestShortCircuitAccessProto struct { - Header *BaseHeaderProto `protobuf:"bytes,1,req,name=header" json:"header,omitempty"` - // * In order to get short-circuit access to block data, clients must set this - // to the highest version of the block data that they can understand. - // Currently 1 is the only version, but more versions may exist in the future - // if the on-disk format changes. - MaxVersion *uint32 `protobuf:"varint,2,req,name=maxVersion" json:"maxVersion,omitempty"` - // * - // The shared memory slot to use, if we are using one. - SlotId *ShortCircuitShmSlotProto `protobuf:"bytes,3,opt,name=slotId" json:"slotId,omitempty"` - // * - // True if the client supports verifying that the file descriptor has been - // sent successfully. - SupportsReceiptVerification *bool `protobuf:"varint,4,opt,name=supportsReceiptVerification,def=0" json:"supportsReceiptVerification,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpRequestShortCircuitAccessProto) Reset() { *m = OpRequestShortCircuitAccessProto{} } -func (m *OpRequestShortCircuitAccessProto) String() string { return proto.CompactTextString(m) } -func (*OpRequestShortCircuitAccessProto) ProtoMessage() {} -func (*OpRequestShortCircuitAccessProto) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{15} -} - -const Default_OpRequestShortCircuitAccessProto_SupportsReceiptVerification bool = false - -func (m *OpRequestShortCircuitAccessProto) GetHeader() *BaseHeaderProto { - if m != nil { - return m.Header - } - return nil -} - -func (m *OpRequestShortCircuitAccessProto) GetMaxVersion() uint32 { - if m != nil && m.MaxVersion != nil { - return *m.MaxVersion - } - return 0 -} - -func (m *OpRequestShortCircuitAccessProto) GetSlotId() *ShortCircuitShmSlotProto { - if m != nil { - return m.SlotId - } - return nil -} - -func (m *OpRequestShortCircuitAccessProto) GetSupportsReceiptVerification() bool { - if m != nil && m.SupportsReceiptVerification != nil { - return *m.SupportsReceiptVerification - } - return Default_OpRequestShortCircuitAccessProto_SupportsReceiptVerification -} - -type ReleaseShortCircuitAccessRequestProto struct { - SlotId *ShortCircuitShmSlotProto `protobuf:"bytes,1,req,name=slotId" json:"slotId,omitempty"` - TraceInfo *DataTransferTraceInfoProto `protobuf:"bytes,2,opt,name=traceInfo" json:"traceInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ReleaseShortCircuitAccessRequestProto) Reset() { *m = ReleaseShortCircuitAccessRequestProto{} } -func (m *ReleaseShortCircuitAccessRequestProto) String() string { return proto.CompactTextString(m) } -func (*ReleaseShortCircuitAccessRequestProto) ProtoMessage() {} -func (*ReleaseShortCircuitAccessRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{16} -} - -func (m *ReleaseShortCircuitAccessRequestProto) GetSlotId() *ShortCircuitShmSlotProto { - if m != nil { - return m.SlotId - } - return nil -} - -func (m *ReleaseShortCircuitAccessRequestProto) GetTraceInfo() *DataTransferTraceInfoProto { - if m != nil { - return m.TraceInfo - } - return nil -} - -type ReleaseShortCircuitAccessResponseProto struct { - Status *Status `protobuf:"varint,1,req,name=status,enum=hadoop.hdfs.Status" json:"status,omitempty"` - Error *string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ReleaseShortCircuitAccessResponseProto) Reset() { - *m = ReleaseShortCircuitAccessResponseProto{} -} -func (m *ReleaseShortCircuitAccessResponseProto) String() string { return proto.CompactTextString(m) } -func (*ReleaseShortCircuitAccessResponseProto) ProtoMessage() {} -func (*ReleaseShortCircuitAccessResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor5, []int{17} -} - -func (m *ReleaseShortCircuitAccessResponseProto) GetStatus() Status { - if m != nil && m.Status != nil { - return *m.Status - } - return Status_SUCCESS -} - -func (m *ReleaseShortCircuitAccessResponseProto) GetError() string { - if m != nil && m.Error != nil { - return *m.Error - } - return "" -} - -type ShortCircuitShmRequestProto struct { - // The name of the client requesting the shared memory segment. This is - // purely for logging / debugging purposes. - ClientName *string `protobuf:"bytes,1,req,name=clientName" json:"clientName,omitempty"` - TraceInfo *DataTransferTraceInfoProto `protobuf:"bytes,2,opt,name=traceInfo" json:"traceInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ShortCircuitShmRequestProto) Reset() { *m = ShortCircuitShmRequestProto{} } -func (m *ShortCircuitShmRequestProto) String() string { return proto.CompactTextString(m) } -func (*ShortCircuitShmRequestProto) ProtoMessage() {} -func (*ShortCircuitShmRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{18} } - -func (m *ShortCircuitShmRequestProto) GetClientName() string { - if m != nil && m.ClientName != nil { - return *m.ClientName - } - return "" -} - -func (m *ShortCircuitShmRequestProto) GetTraceInfo() *DataTransferTraceInfoProto { - if m != nil { - return m.TraceInfo - } - return nil -} - -type ShortCircuitShmResponseProto struct { - Status *Status `protobuf:"varint,1,req,name=status,enum=hadoop.hdfs.Status" json:"status,omitempty"` - Error *string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` - Id *ShortCircuitShmIdProto `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ShortCircuitShmResponseProto) Reset() { *m = ShortCircuitShmResponseProto{} } -func (m *ShortCircuitShmResponseProto) String() string { return proto.CompactTextString(m) } -func (*ShortCircuitShmResponseProto) ProtoMessage() {} -func (*ShortCircuitShmResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{19} } - -func (m *ShortCircuitShmResponseProto) GetStatus() Status { - if m != nil && m.Status != nil { - return *m.Status - } - return Status_SUCCESS -} - -func (m *ShortCircuitShmResponseProto) GetError() string { - if m != nil && m.Error != nil { - return *m.Error - } - return "" -} - -func (m *ShortCircuitShmResponseProto) GetId() *ShortCircuitShmIdProto { - if m != nil { - return m.Id - } - return nil -} - -type PacketHeaderProto struct { - // All fields must be fixed-length! - OffsetInBlock *int64 `protobuf:"fixed64,1,req,name=offsetInBlock" json:"offsetInBlock,omitempty"` - Seqno *int64 `protobuf:"fixed64,2,req,name=seqno" json:"seqno,omitempty"` - LastPacketInBlock *bool `protobuf:"varint,3,req,name=lastPacketInBlock" json:"lastPacketInBlock,omitempty"` - DataLen *int32 `protobuf:"fixed32,4,req,name=dataLen" json:"dataLen,omitempty"` - SyncBlock *bool `protobuf:"varint,5,opt,name=syncBlock,def=0" json:"syncBlock,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PacketHeaderProto) Reset() { *m = PacketHeaderProto{} } -func (m *PacketHeaderProto) String() string { return proto.CompactTextString(m) } -func (*PacketHeaderProto) ProtoMessage() {} -func (*PacketHeaderProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{20} } - -const Default_PacketHeaderProto_SyncBlock bool = false - -func (m *PacketHeaderProto) GetOffsetInBlock() int64 { - if m != nil && m.OffsetInBlock != nil { - return *m.OffsetInBlock - } - return 0 -} - -func (m *PacketHeaderProto) GetSeqno() int64 { - if m != nil && m.Seqno != nil { - return *m.Seqno - } - return 0 -} - -func (m *PacketHeaderProto) GetLastPacketInBlock() bool { - if m != nil && m.LastPacketInBlock != nil { - return *m.LastPacketInBlock - } - return false -} - -func (m *PacketHeaderProto) GetDataLen() int32 { - if m != nil && m.DataLen != nil { - return *m.DataLen - } - return 0 -} - -func (m *PacketHeaderProto) GetSyncBlock() bool { - if m != nil && m.SyncBlock != nil { - return *m.SyncBlock - } - return Default_PacketHeaderProto_SyncBlock -} - -type PipelineAckProto struct { - Seqno *int64 `protobuf:"zigzag64,1,req,name=seqno" json:"seqno,omitempty"` - Reply []Status `protobuf:"varint,2,rep,name=reply,enum=hadoop.hdfs.Status" json:"reply,omitempty"` - DownstreamAckTimeNanos *uint64 `protobuf:"varint,3,opt,name=downstreamAckTimeNanos,def=0" json:"downstreamAckTimeNanos,omitempty"` - Flag []uint32 `protobuf:"varint,4,rep,packed,name=flag" json:"flag,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PipelineAckProto) Reset() { *m = PipelineAckProto{} } -func (m *PipelineAckProto) String() string { return proto.CompactTextString(m) } -func (*PipelineAckProto) ProtoMessage() {} -func (*PipelineAckProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{21} } - -const Default_PipelineAckProto_DownstreamAckTimeNanos uint64 = 0 - -func (m *PipelineAckProto) GetSeqno() int64 { - if m != nil && m.Seqno != nil { - return *m.Seqno - } - return 0 -} - -func (m *PipelineAckProto) GetReply() []Status { - if m != nil { - return m.Reply - } - return nil -} - -func (m *PipelineAckProto) GetDownstreamAckTimeNanos() uint64 { - if m != nil && m.DownstreamAckTimeNanos != nil { - return *m.DownstreamAckTimeNanos - } - return Default_PipelineAckProto_DownstreamAckTimeNanos -} - -func (m *PipelineAckProto) GetFlag() []uint32 { - if m != nil { - return m.Flag - } - return nil -} - -// * -// Sent as part of the BlockOpResponseProto -// for READ_BLOCK and COPY_BLOCK operations. -type ReadOpChecksumInfoProto struct { - Checksum *ChecksumProto `protobuf:"bytes,1,req,name=checksum" json:"checksum,omitempty"` - // * - // The offset into the block at which the first packet - // will start. This is necessary since reads will align - // backwards to a checksum chunk boundary. - ChunkOffset *uint64 `protobuf:"varint,2,req,name=chunkOffset" json:"chunkOffset,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ReadOpChecksumInfoProto) Reset() { *m = ReadOpChecksumInfoProto{} } -func (m *ReadOpChecksumInfoProto) String() string { return proto.CompactTextString(m) } -func (*ReadOpChecksumInfoProto) ProtoMessage() {} -func (*ReadOpChecksumInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{22} } - -func (m *ReadOpChecksumInfoProto) GetChecksum() *ChecksumProto { - if m != nil { - return m.Checksum - } - return nil -} - -func (m *ReadOpChecksumInfoProto) GetChunkOffset() uint64 { - if m != nil && m.ChunkOffset != nil { - return *m.ChunkOffset - } - return 0 -} - -type BlockOpResponseProto struct { - Status *Status `protobuf:"varint,1,req,name=status,enum=hadoop.hdfs.Status" json:"status,omitempty"` - FirstBadLink *string `protobuf:"bytes,2,opt,name=firstBadLink" json:"firstBadLink,omitempty"` - ChecksumResponse *OpBlockChecksumResponseProto `protobuf:"bytes,3,opt,name=checksumResponse" json:"checksumResponse,omitempty"` - ReadOpChecksumInfo *ReadOpChecksumInfoProto `protobuf:"bytes,4,opt,name=readOpChecksumInfo" json:"readOpChecksumInfo,omitempty"` - // * explanatory text which may be useful to log on the client side - Message *string `protobuf:"bytes,5,opt,name=message" json:"message,omitempty"` - // * If the server chooses to agree to the request of a client for - // short-circuit access, it will send a response message with the relevant - // file descriptors attached. - // - // In the body of the message, this version number will be set to the - // specific version number of the block data that the client is about to - // read. - ShortCircuitAccessVersion *uint32 `protobuf:"varint,6,opt,name=shortCircuitAccessVersion" json:"shortCircuitAccessVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BlockOpResponseProto) Reset() { *m = BlockOpResponseProto{} } -func (m *BlockOpResponseProto) String() string { return proto.CompactTextString(m) } -func (*BlockOpResponseProto) ProtoMessage() {} -func (*BlockOpResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{23} } - -func (m *BlockOpResponseProto) GetStatus() Status { - if m != nil && m.Status != nil { - return *m.Status - } - return Status_SUCCESS -} - -func (m *BlockOpResponseProto) GetFirstBadLink() string { - if m != nil && m.FirstBadLink != nil { - return *m.FirstBadLink - } - return "" -} - -func (m *BlockOpResponseProto) GetChecksumResponse() *OpBlockChecksumResponseProto { - if m != nil { - return m.ChecksumResponse - } - return nil -} - -func (m *BlockOpResponseProto) GetReadOpChecksumInfo() *ReadOpChecksumInfoProto { - if m != nil { - return m.ReadOpChecksumInfo - } - return nil -} - -func (m *BlockOpResponseProto) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *BlockOpResponseProto) GetShortCircuitAccessVersion() uint32 { - if m != nil && m.ShortCircuitAccessVersion != nil { - return *m.ShortCircuitAccessVersion - } - return 0 -} - -// * -// Message sent from the client to the DN after reading the entire -// read request. -type ClientReadStatusProto struct { - Status *Status `protobuf:"varint,1,req,name=status,enum=hadoop.hdfs.Status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ClientReadStatusProto) Reset() { *m = ClientReadStatusProto{} } -func (m *ClientReadStatusProto) String() string { return proto.CompactTextString(m) } -func (*ClientReadStatusProto) ProtoMessage() {} -func (*ClientReadStatusProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{24} } - -func (m *ClientReadStatusProto) GetStatus() Status { - if m != nil && m.Status != nil { - return *m.Status - } - return Status_SUCCESS -} - -type DNTransferAckProto struct { - Status *Status `protobuf:"varint,1,req,name=status,enum=hadoop.hdfs.Status" json:"status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DNTransferAckProto) Reset() { *m = DNTransferAckProto{} } -func (m *DNTransferAckProto) String() string { return proto.CompactTextString(m) } -func (*DNTransferAckProto) ProtoMessage() {} -func (*DNTransferAckProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{25} } - -func (m *DNTransferAckProto) GetStatus() Status { - if m != nil && m.Status != nil { - return *m.Status - } - return Status_SUCCESS -} - -type OpBlockChecksumResponseProto struct { - BytesPerCrc *uint32 `protobuf:"varint,1,req,name=bytesPerCrc" json:"bytesPerCrc,omitempty"` - CrcPerBlock *uint64 `protobuf:"varint,2,req,name=crcPerBlock" json:"crcPerBlock,omitempty"` - Md5 []byte `protobuf:"bytes,3,req,name=md5" json:"md5,omitempty"` - CrcType *ChecksumTypeProto `protobuf:"varint,4,opt,name=crcType,enum=hadoop.hdfs.ChecksumTypeProto" json:"crcType,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpBlockChecksumResponseProto) Reset() { *m = OpBlockChecksumResponseProto{} } -func (m *OpBlockChecksumResponseProto) String() string { return proto.CompactTextString(m) } -func (*OpBlockChecksumResponseProto) ProtoMessage() {} -func (*OpBlockChecksumResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{26} } - -func (m *OpBlockChecksumResponseProto) GetBytesPerCrc() uint32 { - if m != nil && m.BytesPerCrc != nil { - return *m.BytesPerCrc - } - return 0 -} - -func (m *OpBlockChecksumResponseProto) GetCrcPerBlock() uint64 { - if m != nil && m.CrcPerBlock != nil { - return *m.CrcPerBlock - } - return 0 -} - -func (m *OpBlockChecksumResponseProto) GetMd5() []byte { - if m != nil { - return m.Md5 - } - return nil -} - -func (m *OpBlockChecksumResponseProto) GetCrcType() ChecksumTypeProto { - if m != nil && m.CrcType != nil { - return *m.CrcType - } - return ChecksumTypeProto_CHECKSUM_NULL -} - -type OpCustomProto struct { - CustomId *string `protobuf:"bytes,1,req,name=customId" json:"customId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OpCustomProto) Reset() { *m = OpCustomProto{} } -func (m *OpCustomProto) String() string { return proto.CompactTextString(m) } -func (*OpCustomProto) ProtoMessage() {} -func (*OpCustomProto) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{27} } - -func (m *OpCustomProto) GetCustomId() string { - if m != nil && m.CustomId != nil { - return *m.CustomId - } - return "" -} - -func init() { - proto.RegisterType((*DataTransferEncryptorMessageProto)(nil), "hadoop.hdfs.DataTransferEncryptorMessageProto") - proto.RegisterType((*BaseHeaderProto)(nil), "hadoop.hdfs.BaseHeaderProto") - proto.RegisterType((*DataTransferTraceInfoProto)(nil), "hadoop.hdfs.DataTransferTraceInfoProto") - proto.RegisterType((*ClientOperationHeaderProto)(nil), "hadoop.hdfs.ClientOperationHeaderProto") - proto.RegisterType((*CachingStrategyProto)(nil), "hadoop.hdfs.CachingStrategyProto") - proto.RegisterType((*OpReadBlockProto)(nil), "hadoop.hdfs.OpReadBlockProto") - proto.RegisterType((*ChecksumProto)(nil), "hadoop.hdfs.ChecksumProto") - proto.RegisterType((*OpWriteBlockProto)(nil), "hadoop.hdfs.OpWriteBlockProto") - proto.RegisterType((*OpTransferBlockProto)(nil), "hadoop.hdfs.OpTransferBlockProto") - proto.RegisterType((*OpReplaceBlockProto)(nil), "hadoop.hdfs.OpReplaceBlockProto") - proto.RegisterType((*OpCopyBlockProto)(nil), "hadoop.hdfs.OpCopyBlockProto") - proto.RegisterType((*OpBlockChecksumProto)(nil), "hadoop.hdfs.OpBlockChecksumProto") - proto.RegisterType((*OpBlockGroupChecksumProto)(nil), "hadoop.hdfs.OpBlockGroupChecksumProto") - proto.RegisterType((*ShortCircuitShmIdProto)(nil), "hadoop.hdfs.ShortCircuitShmIdProto") - proto.RegisterType((*ShortCircuitShmSlotProto)(nil), "hadoop.hdfs.ShortCircuitShmSlotProto") - proto.RegisterType((*OpRequestShortCircuitAccessProto)(nil), "hadoop.hdfs.OpRequestShortCircuitAccessProto") - proto.RegisterType((*ReleaseShortCircuitAccessRequestProto)(nil), "hadoop.hdfs.ReleaseShortCircuitAccessRequestProto") - proto.RegisterType((*ReleaseShortCircuitAccessResponseProto)(nil), "hadoop.hdfs.ReleaseShortCircuitAccessResponseProto") - proto.RegisterType((*ShortCircuitShmRequestProto)(nil), "hadoop.hdfs.ShortCircuitShmRequestProto") - proto.RegisterType((*ShortCircuitShmResponseProto)(nil), "hadoop.hdfs.ShortCircuitShmResponseProto") - proto.RegisterType((*PacketHeaderProto)(nil), "hadoop.hdfs.PacketHeaderProto") - proto.RegisterType((*PipelineAckProto)(nil), "hadoop.hdfs.PipelineAckProto") - proto.RegisterType((*ReadOpChecksumInfoProto)(nil), "hadoop.hdfs.ReadOpChecksumInfoProto") - proto.RegisterType((*BlockOpResponseProto)(nil), "hadoop.hdfs.BlockOpResponseProto") - proto.RegisterType((*ClientReadStatusProto)(nil), "hadoop.hdfs.ClientReadStatusProto") - proto.RegisterType((*DNTransferAckProto)(nil), "hadoop.hdfs.DNTransferAckProto") - proto.RegisterType((*OpBlockChecksumResponseProto)(nil), "hadoop.hdfs.OpBlockChecksumResponseProto") - proto.RegisterType((*OpCustomProto)(nil), "hadoop.hdfs.OpCustomProto") - proto.RegisterEnum("hadoop.hdfs.Status", Status_name, Status_value) - proto.RegisterEnum("hadoop.hdfs.ShortCircuitFdResponse", ShortCircuitFdResponse_name, ShortCircuitFdResponse_value) - proto.RegisterEnum("hadoop.hdfs.DataTransferEncryptorMessageProto_DataTransferEncryptorStatus", DataTransferEncryptorMessageProto_DataTransferEncryptorStatus_name, DataTransferEncryptorMessageProto_DataTransferEncryptorStatus_value) - proto.RegisterEnum("hadoop.hdfs.OpWriteBlockProto_BlockConstructionStage", OpWriteBlockProto_BlockConstructionStage_name, OpWriteBlockProto_BlockConstructionStage_value) -} - -func init() { proto.RegisterFile("datatransfer.proto", fileDescriptor5) } - -var fileDescriptor5 = []byte{ - // 2039 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4b, 0x73, 0xdc, 0xc6, - 0xf1, 0x17, 0xf6, 0x41, 0xee, 0xf6, 0x92, 0x14, 0x38, 0x96, 0x68, 0x88, 0xd2, 0x5f, 0xa2, 0x20, - 0xcb, 0x7f, 0x5a, 0x4e, 0x31, 0x31, 0x6d, 0xb9, 0x6c, 0xc5, 0x4e, 0x6a, 0x1f, 0x90, 0xb4, 0x21, - 0xb5, 0xd8, 0x1a, 0x2c, 0xa9, 0x3c, 0x0e, 0x5b, 0x23, 0x60, 0xc8, 0x45, 0x11, 0x0b, 0xc0, 0x98, - 0xd9, 0x58, 0xab, 0x53, 0x0e, 0x39, 0xe4, 0x98, 0x53, 0xce, 0x39, 0xe5, 0x96, 0x7c, 0x82, 0x1c, - 0xf2, 0x05, 0xf2, 0x1d, 0x72, 0xc8, 0x25, 0x55, 0x39, 0xa6, 0x72, 0x4e, 0xcd, 0x0c, 0xb0, 0x04, - 0x96, 0x4b, 0x32, 0xa2, 0x75, 0xc8, 0x0d, 0xd3, 0xd3, 0xdd, 0xe8, 0xfe, 0x4d, 0x4f, 0x3f, 0x06, - 0x90, 0x47, 0x38, 0xe1, 0x09, 0x09, 0xd9, 0x11, 0x4d, 0x76, 0xe2, 0x24, 0xe2, 0x11, 0x6a, 0x8c, - 0x88, 0x17, 0x45, 0xf1, 0xce, 0xc8, 0x3b, 0x62, 0x9b, 0x6b, 0x0e, 0x75, 0x27, 0x89, 0xcf, 0xa7, - 0x6a, 0x73, 0x13, 0x04, 0x55, 0x7d, 0x9b, 0x7f, 0x2d, 0xc1, 0xfd, 0x0e, 0xe1, 0x64, 0x90, 0xca, - 0x5b, 0xa1, 0x9b, 0x4c, 0x63, 0x1e, 0x25, 0x2f, 0x28, 0x63, 0xe4, 0x98, 0xf6, 0xa5, 0xba, 0x57, - 0xb0, 0xc4, 0x38, 0xe1, 0x13, 0x66, 0x68, 0x5b, 0xa5, 0xed, 0xb5, 0xdd, 0x9f, 0xec, 0xe4, 0xf4, - 0xef, 0x5c, 0x2a, 0xbf, 0x98, 0xc3, 0x91, 0x1a, 0x71, 0xaa, 0x19, 0x19, 0xb0, 0x1c, 0x93, 0x69, - 0x10, 0x11, 0xcf, 0x28, 0x6d, 0x69, 0xdb, 0x2b, 0x38, 0x5b, 0x8a, 0x9d, 0xb1, 0xd2, 0x66, 0x94, - 0xb7, 0xb4, 0xed, 0x3a, 0xce, 0x96, 0xa8, 0x05, 0x2b, 0xae, 0x1f, 0x8f, 0x68, 0x62, 0xc7, 0xdc, - 0x8f, 0x42, 0xa3, 0xb2, 0x55, 0xde, 0x6e, 0xec, 0xde, 0x2d, 0x58, 0xd7, 0xce, 0x31, 0x48, 0x6b, - 0x70, 0x41, 0xc6, 0xdc, 0x87, 0xdb, 0x17, 0x98, 0x87, 0x1a, 0xb0, 0xec, 0x1c, 0xb4, 0xdb, 0x96, - 0xe3, 0xe8, 0xd7, 0xd0, 0x4d, 0x58, 0xb7, 0x30, 0xb6, 0xf1, 0xf0, 0xa0, 0xb7, 0xd7, 0xb3, 0x5f, - 0xf6, 0x86, 0x7b, 0xd6, 0xcf, 0x74, 0x0d, 0xd5, 0xa1, 0x2a, 0xc9, 0x7a, 0xc9, 0xfc, 0x8b, 0x06, - 0xd7, 0x5b, 0x84, 0xd1, 0xe7, 0x94, 0x78, 0x34, 0x51, 0xe8, 0x3d, 0x86, 0xea, 0xab, 0x20, 0x72, - 0x4f, 0x24, 0x78, 0x8d, 0xdd, 0x7b, 0x05, 0xf3, 0xac, 0xd7, 0x9c, 0x86, 0x1e, 0xf5, 0x5a, 0x82, - 0x43, 0xd9, 0xa7, 0xb8, 0xd1, 0xf7, 0xa1, 0xca, 0xa3, 0x13, 0x1a, 0x4a, 0x38, 0x1a, 0xbb, 0xb7, - 0x32, 0x31, 0x37, 0x1a, 0x8f, 0xa3, 0x70, 0x67, 0x20, 0xf6, 0x52, 0x01, 0xc9, 0x87, 0x2c, 0xa8, - 0xf3, 0x84, 0xb8, 0xb4, 0x1b, 0x1e, 0x45, 0x12, 0xa9, 0xc6, 0xee, 0xff, 0x9f, 0x7b, 0x50, 0x83, - 0x8c, 0x53, 0xa9, 0x38, 0x95, 0x34, 0x31, 0x6c, 0x9e, 0xcf, 0x28, 0x0e, 0x43, 0xb1, 0x7a, 0xd2, - 0x9d, 0x0a, 0xce, 0x96, 0x68, 0x13, 0x6a, 0x31, 0x49, 0x68, 0xc8, 0xbb, 0xe2, 0x04, 0xc5, 0xd6, - 0x6c, 0x6d, 0xbe, 0x81, 0xcd, 0x76, 0xe0, 0xd3, 0x90, 0xdb, 0x31, 0x4d, 0x88, 0xc0, 0x3d, 0x0f, - 0xd0, 0x57, 0x00, 0xaf, 0x66, 0x98, 0xa5, 0x28, 0xdd, 0x29, 0x58, 0x3e, 0x07, 0x29, 0xce, 0xf1, - 0xa3, 0xbb, 0x00, 0xae, 0xd4, 0xdd, 0x23, 0x63, 0x2a, 0xff, 0x5c, 0xc7, 0x39, 0x8a, 0x39, 0x80, - 0x1b, 0x6d, 0xe2, 0x8e, 0xfc, 0xf0, 0xd8, 0xe1, 0x09, 0xe1, 0xf4, 0x78, 0xaa, 0xfe, 0x7a, 0x17, - 0xc0, 0x4b, 0xa2, 0xb8, 0x45, 0x47, 0x7e, 0x28, 0x9c, 0xd1, 0xb6, 0x6b, 0x38, 0x47, 0x41, 0x77, - 0xa0, 0x9e, 0x50, 0xe2, 0x91, 0x11, 0x4d, 0x43, 0xb2, 0x8c, 0x4f, 0x09, 0xe6, 0xbf, 0x35, 0xd0, - 0xed, 0x18, 0x53, 0x92, 0x3b, 0x39, 0xf4, 0x63, 0x58, 0x1a, 0xe5, 0x9d, 0x28, 0xc2, 0x7f, 0x3e, - 0x02, 0x38, 0x15, 0x43, 0x1b, 0xb0, 0x14, 0x1d, 0x1d, 0x31, 0xca, 0x53, 0x04, 0xd3, 0x15, 0xd2, - 0xa1, 0x1c, 0xd0, 0xd0, 0x28, 0x4b, 0xa2, 0xf8, 0x44, 0x8f, 0x60, 0x95, 0xd1, 0xd0, 0x6b, 0x8f, - 0xa8, 0x7b, 0xc2, 0x26, 0x63, 0x66, 0x54, 0x84, 0x03, 0x4f, 0x2a, 0x3c, 0x99, 0x50, 0x5c, 0xdc, - 0x42, 0x7b, 0x70, 0xdd, 0x2d, 0x22, 0x60, 0x54, 0x65, 0x78, 0xdc, 0x2f, 0xda, 0xb7, 0x00, 0x25, - 0x3c, 0x2f, 0x69, 0x46, 0xb0, 0x9a, 0x69, 0x56, 0x4e, 0xef, 0x42, 0x85, 0x4f, 0x63, 0x9a, 0xa6, - 0x86, 0xb9, 0xcb, 0x97, 0x72, 0x0e, 0xa6, 0xb1, 0x4a, 0x05, 0x58, 0xf2, 0xa2, 0x47, 0xa0, 0xbf, - 0x9a, 0x72, 0xca, 0xfa, 0x34, 0xc9, 0x58, 0xa4, 0xc7, 0xab, 0xf8, 0x0c, 0xdd, 0xfc, 0x47, 0x0d, - 0xd6, 0xed, 0xf8, 0x65, 0xe2, 0x73, 0xfa, 0x2e, 0xa1, 0xfe, 0x02, 0x96, 0x39, 0x49, 0x8e, 0x29, - 0x67, 0x46, 0x69, 0x41, 0xda, 0x10, 0x57, 0x20, 0x8c, 0xbc, 0xdc, 0x15, 0xc9, 0xd8, 0xd1, 0xe7, - 0xb0, 0xc4, 0xa2, 0x49, 0xe2, 0xd2, 0xf4, 0x92, 0x5d, 0x26, 0x98, 0x72, 0xa3, 0x3d, 0xa8, 0x32, - 0x2e, 0xb2, 0x58, 0x45, 0x22, 0xf5, 0xb8, 0x20, 0x76, 0xc6, 0xc3, 0x1d, 0xf9, 0xd9, 0x8e, 0x42, - 0xc6, 0x93, 0x89, 0x2b, 0xbc, 0x70, 0x84, 0x30, 0x56, 0x3a, 0x90, 0x09, 0x2b, 0xb1, 0x1f, 0xd3, - 0xc0, 0x0f, 0xa9, 0xe3, 0xbf, 0xa1, 0x46, 0x55, 0xa2, 0x57, 0xa0, 0x09, 0x9e, 0xb1, 0x1f, 0xb6, - 0x04, 0xa0, 0xd8, 0xfd, 0xa5, 0x67, 0x2c, 0xc9, 0xf0, 0x29, 0xd0, 0x24, 0x0f, 0x79, 0x7d, 0xca, - 0xb3, 0x9c, 0xf2, 0xe4, 0x68, 0xe8, 0x33, 0xb8, 0x19, 0x10, 0x4e, 0x19, 0x7f, 0x46, 0xc3, 0x14, - 0x51, 0x87, 0x93, 0x71, 0x6c, 0xd4, 0x24, 0xf3, 0xe2, 0x4d, 0xf4, 0x1c, 0xd6, 0x13, 0xfa, 0xcd, - 0x84, 0x32, 0x4e, 0x67, 0xb1, 0x68, 0xd4, 0xe5, 0x61, 0x6d, 0x2e, 0x0c, 0x12, 0x85, 0xd6, 0x59, - 0xa1, 0x45, 0xf1, 0x0b, 0x57, 0x8d, 0x5f, 0x64, 0x41, 0x83, 0xf1, 0x28, 0x21, 0xc7, 0x54, 0x04, - 0xa5, 0xd1, 0xd8, 0xd2, 0xb6, 0xd7, 0x76, 0xff, 0xaf, 0xa0, 0xc8, 0x39, 0xdd, 0x97, 0x4a, 0x9e, - 0x54, 0x3a, 0x5d, 0x67, 0x0f, 0xe7, 0xe5, 0xd0, 0x0b, 0x40, 0x2a, 0x1e, 0x72, 0xcc, 0xcc, 0x58, - 0xd9, 0x2a, 0x5f, 0xaa, 0x0d, 0x2f, 0x10, 0x44, 0x9f, 0x80, 0x4e, 0x82, 0x20, 0xfa, 0x76, 0x9f, - 0xbc, 0x99, 0xf6, 0x69, 0xc2, 0x7c, 0xc6, 0x8d, 0x55, 0x79, 0xa3, 0xab, 0x47, 0x24, 0x60, 0x14, - 0x9f, 0xd9, 0x46, 0xf7, 0x60, 0x39, 0xf6, 0xc3, 0xd0, 0x0f, 0x8f, 0x8d, 0xb5, 0x3c, 0x67, 0x46, - 0x45, 0x1f, 0xc2, 0x9a, 0xfa, 0x53, 0x5f, 0x11, 0x98, 0x71, 0x7d, 0xab, 0xbc, 0x5d, 0xc3, 0x73, - 0x54, 0xf3, 0x37, 0x25, 0xd8, 0x58, 0x1c, 0x6c, 0xe8, 0x16, 0xdc, 0xec, 0x77, 0xfb, 0xd6, 0x7e, - 0xb7, 0x67, 0x0d, 0x1d, 0x6b, 0x70, 0xd0, 0x1f, 0x36, 0xfb, 0x7d, 0xab, 0xd7, 0xd1, 0xaf, 0x21, - 0x13, 0xee, 0x2e, 0xdc, 0x1a, 0x62, 0xab, 0x6d, 0x1f, 0x5a, 0x58, 0x14, 0x46, 0x04, 0x6b, 0x9d, - 0xe6, 0xa0, 0x39, 0x74, 0x06, 0xd8, 0x6a, 0xbe, 0xe8, 0xf6, 0x9e, 0xe9, 0x25, 0xf4, 0x10, 0xee, - 0xcf, 0xc9, 0xcd, 0x76, 0x4f, 0x45, 0xcb, 0x42, 0x74, 0xc6, 0xd6, 0xde, 0xb7, 0x1d, 0x4b, 0xaf, - 0xa0, 0xdb, 0xf0, 0x7e, 0x91, 0x76, 0x2a, 0x50, 0x5d, 0x60, 0x6a, 0x1b, 0x5b, 0xcd, 0x81, 0xa5, - 0x2f, 0x21, 0x1d, 0x56, 0x06, 0xb8, 0xd9, 0x73, 0x9e, 0x5a, 0x78, 0x88, 0x5b, 0x2f, 0xf5, 0x65, - 0xb4, 0x01, 0x68, 0x46, 0x79, 0xda, 0xed, 0x35, 0xf7, 0xbb, 0x3f, 0xb7, 0x3a, 0x7a, 0xcd, 0xfc, - 0x9b, 0x06, 0x37, 0xec, 0x38, 0x2b, 0x7d, 0xff, 0x1b, 0xe9, 0x66, 0x71, 0xa4, 0x95, 0xaf, 0x18, - 0x69, 0xe6, 0xdf, 0x35, 0x78, 0x4f, 0x14, 0xae, 0x38, 0x20, 0x6e, 0x3e, 0xa1, 0x7e, 0x36, 0xe7, - 0xe1, 0xc5, 0x05, 0x38, 0x73, 0xcb, 0x80, 0x65, 0x8f, 0x06, 0xcf, 0xfd, 0x90, 0xa7, 0x95, 0x37, - 0x5b, 0x16, 0xb2, 0x64, 0xe9, 0x2d, 0xb2, 0xe4, 0xdc, 0xfd, 0xac, 0x5c, 0xed, 0x7e, 0x9a, 0xcf, - 0x45, 0x79, 0x6e, 0x47, 0xf1, 0xf4, 0xbb, 0xba, 0x68, 0xee, 0x8b, 0x90, 0x50, 0xf7, 0xa3, 0x50, - 0xf7, 0xae, 0xa6, 0xed, 0xb7, 0x25, 0xb8, 0x95, 0xaa, 0x7b, 0x96, 0x44, 0x93, 0xf8, 0x1d, 0xe8, - 0x44, 0x5f, 0x43, 0xdd, 0x4b, 0xf1, 0x64, 0xf2, 0x18, 0xe6, 0x9b, 0xcc, 0x3c, 0xda, 0x2c, 0x6d, - 0xf8, 0x66, 0x12, 0xe8, 0x87, 0xd0, 0x90, 0x1d, 0xa7, 0xec, 0x28, 0x55, 0x64, 0x5d, 0xd8, 0x6e, - 0xe6, 0xb9, 0x51, 0x13, 0x6a, 0xd4, 0xed, 0x47, 0x81, 0xef, 0x4e, 0x65, 0x5d, 0x6b, 0xec, 0x3e, - 0x2c, 0xf6, 0xb7, 0x09, 0x61, 0x93, 0x84, 0xb6, 0x23, 0xcf, 0x0f, 0x8f, 0x15, 0x9f, 0xd2, 0x32, - 0x13, 0x33, 0xbf, 0x80, 0x0d, 0x67, 0x14, 0x25, 0xbc, 0xed, 0x27, 0xee, 0xc4, 0xe7, 0xce, 0x68, - 0xdc, 0xf5, 0x14, 0x1c, 0x6b, 0x50, 0x1a, 0xf9, 0x12, 0x8a, 0x32, 0x2e, 0x8d, 0x7c, 0xb1, 0x0e, - 0x22, 0xe9, 0x61, 0x19, 0x97, 0x82, 0xc8, 0x8c, 0xc0, 0x98, 0x93, 0x74, 0x82, 0x88, 0x2b, 0xd9, - 0x2f, 0xa1, 0xca, 0x84, 0xa6, 0x14, 0xc9, 0x07, 0xc5, 0x08, 0x5a, 0xf8, 0x3f, 0xac, 0x24, 0x44, - 0x50, 0xb3, 0x20, 0xe2, 0x5d, 0xef, 0xb5, 0xfc, 0x57, 0x15, 0x67, 0x4b, 0xf3, 0x57, 0x25, 0xd8, - 0x12, 0x97, 0x47, 0x56, 0xa8, 0xbc, 0x92, 0xa6, 0xeb, 0x52, 0xc6, 0xbe, 0xcb, 0x21, 0xde, 0x05, - 0x18, 0x93, 0xd7, 0x87, 0x22, 0xb9, 0x47, 0x61, 0xda, 0x0c, 0xe5, 0x28, 0xe8, 0x6b, 0x58, 0x52, - 0x56, 0xa4, 0x5d, 0xc7, 0xc3, 0x8b, 0x1c, 0x9a, 0xc1, 0x80, 0x53, 0x21, 0xf4, 0x0c, 0x6e, 0xb3, - 0x49, 0x1c, 0x47, 0x09, 0x67, 0x98, 0xba, 0xd4, 0x8f, 0xf9, 0x21, 0x4d, 0xfc, 0x23, 0xdf, 0x25, - 0xe9, 0xe4, 0x94, 0xab, 0x20, 0x17, 0x71, 0x9a, 0x7f, 0xd4, 0xe0, 0x21, 0xa6, 0x01, 0x25, 0x8c, - 0x9e, 0x05, 0x20, 0x45, 0x46, 0xe1, 0x70, 0x6a, 0xb1, 0xb6, 0x20, 0x30, 0x2e, 0xb5, 0xb8, 0x30, - 0xce, 0x94, 0xae, 0x3c, 0xce, 0x9c, 0xc0, 0x87, 0x17, 0x98, 0xcb, 0xe2, 0x28, 0x64, 0xe9, 0x94, - 0xfb, 0xf1, 0xdc, 0x94, 0xfb, 0xde, 0x5c, 0xd2, 0x29, 0x8c, 0xab, 0x37, 0xa0, 0x4a, 0x93, 0x24, - 0x4a, 0xa4, 0x65, 0x75, 0xac, 0x16, 0xe6, 0xaf, 0x35, 0xb8, 0x3d, 0xe7, 0x58, 0x01, 0x92, 0xe2, - 0xac, 0xa2, 0xcd, 0xcf, 0x2a, 0xef, 0xca, 0xe7, 0xdf, 0x69, 0x70, 0xe7, 0x8c, 0x19, 0xef, 0xd6, - 0x55, 0xf4, 0x29, 0x94, 0xfc, 0x2c, 0x16, 0xff, 0xab, 0xcb, 0x55, 0xf2, 0x3d, 0xf3, 0xcf, 0x1a, - 0xac, 0xf7, 0x89, 0x7b, 0x42, 0x79, 0x7e, 0xfe, 0xfb, 0x00, 0x56, 0xd5, 0x9c, 0xd3, 0x0d, 0x5b, - 0xb3, 0x41, 0x59, 0xc7, 0x45, 0xa2, 0x30, 0x83, 0xd1, 0x6f, 0x42, 0x75, 0xff, 0x75, 0xac, 0x16, - 0xe8, 0x7b, 0xb0, 0x1e, 0x10, 0xc6, 0x95, 0xd2, 0x4c, 0x5e, 0x54, 0x9c, 0x1a, 0x3e, 0xbb, 0x21, - 0xcb, 0x15, 0xe1, 0x64, 0x9f, 0x86, 0x32, 0x59, 0x5d, 0xc7, 0xd9, 0x12, 0x3d, 0x80, 0x3a, 0x9b, - 0x86, 0xae, 0x92, 0xaf, 0xe6, 0x6f, 0xc3, 0x29, 0xdd, 0xfc, 0x83, 0x06, 0x7a, 0x3f, 0xed, 0xb0, - 0x9b, 0x59, 0x55, 0x99, 0xd9, 0x25, 0xac, 0x46, 0x99, 0x5d, 0x1f, 0x41, 0x35, 0xa1, 0x71, 0x30, - 0x95, 0xd5, 0xfe, 0x1c, 0x80, 0x15, 0x07, 0xfa, 0x12, 0x36, 0xbc, 0xe8, 0x5b, 0xd1, 0x7a, 0x51, - 0x32, 0x6e, 0xba, 0x27, 0x03, 0x7f, 0x4c, 0x7b, 0x24, 0x8c, 0x98, 0x44, 0xb7, 0xf2, 0x44, 0xfb, - 0x01, 0x3e, 0x87, 0x01, 0x6d, 0x40, 0xe5, 0x28, 0x20, 0xc7, 0xf2, 0xe1, 0x63, 0xb5, 0x55, 0xd2, - 0x35, 0x2c, 0xd7, 0x26, 0x83, 0xf7, 0xc5, 0x68, 0x6a, 0xcf, 0xca, 0xcb, 0xe9, 0x00, 0xff, 0x39, - 0xd4, 0xdc, 0xac, 0x1b, 0xd7, 0x2e, 0xed, 0xc6, 0x67, 0xbc, 0x68, 0x0b, 0x1a, 0xee, 0x68, 0x12, - 0x9e, 0xd8, 0xf9, 0xf9, 0x34, 0x4f, 0x32, 0xff, 0x59, 0x82, 0x1b, 0x12, 0x27, 0x91, 0x21, 0xaf, - 0x1c, 0x6d, 0x26, 0xac, 0x1c, 0xf9, 0x09, 0xe3, 0x2d, 0xe2, 0xed, 0xfb, 0xe1, 0x49, 0x1a, 0x74, - 0x05, 0x1a, 0x3a, 0x00, 0x3d, 0xb3, 0x2b, 0xfb, 0x53, 0x1a, 0x89, 0x1f, 0xcd, 0x0d, 0x55, 0x85, - 0xba, 0x5d, 0xb0, 0x0a, 0x9f, 0x51, 0x81, 0x06, 0x80, 0x92, 0x33, 0xa8, 0xc9, 0xd4, 0xd8, 0xd8, - 0xfd, 0xa0, 0xa0, 0xf8, 0x1c, 0x70, 0xf1, 0x02, 0xf9, 0xfc, 0xf3, 0x55, 0xb5, 0xf8, 0x7c, 0xf5, - 0x15, 0xdc, 0x62, 0x67, 0x72, 0x52, 0x56, 0x01, 0x96, 0xb6, 0xb4, 0xed, 0x55, 0x7c, 0x3e, 0x83, - 0xd9, 0x81, 0x9b, 0xaa, 0xef, 0x14, 0xc6, 0x28, 0x10, 0xdf, 0x1e, 0x6e, 0xb3, 0x09, 0xa8, 0xd3, - 0xcb, 0x32, 0xca, 0x2c, 0xa6, 0xdf, 0x4a, 0xc5, 0x9f, 0x34, 0xb8, 0x73, 0x11, 0xd2, 0x22, 0x74, - 0x66, 0x53, 0x7d, 0xe2, 0x4a, 0x95, 0xab, 0x38, 0x4f, 0x92, 0xc1, 0x95, 0xb8, 0xfd, 0xb4, 0xe5, - 0x9e, 0x05, 0xd7, 0x29, 0x09, 0xe9, 0x50, 0x1e, 0x7b, 0x8f, 0xe5, 0xcd, 0x5e, 0xc1, 0xe2, 0x53, - 0x74, 0xd4, 0x6e, 0xe2, 0xe6, 0x9a, 0xc4, 0xcb, 0x9e, 0x1e, 0x32, 0x76, 0xf3, 0x63, 0x58, 0xb5, - 0xe3, 0xf6, 0x84, 0xf1, 0x28, 0x6d, 0xbb, 0x36, 0xa1, 0xe6, 0xca, 0x65, 0x5a, 0xab, 0xea, 0x78, - 0xb6, 0x7e, 0xf4, 0x2f, 0x0d, 0x96, 0x16, 0xbd, 0x05, 0xce, 0x1e, 0xfd, 0xe4, 0x98, 0xa3, 0x9e, - 0x05, 0xdb, 0xcf, 0xad, 0xf6, 0x9e, 0x73, 0xf0, 0x42, 0x2f, 0xa1, 0x75, 0x58, 0x55, 0xb4, 0x6e, - 0xef, 0xb0, 0xb9, 0xdf, 0xed, 0xe8, 0x65, 0x31, 0x86, 0x28, 0x92, 0xf5, 0xd3, 0xae, 0x33, 0x70, - 0xf4, 0x8a, 0x18, 0x43, 0x14, 0xa5, 0x29, 0xb5, 0x0e, 0x07, 0xf6, 0x9e, 0xd5, 0xd3, 0xab, 0xe8, - 0x3a, 0x34, 0x32, 0x55, 0x43, 0x7b, 0x4f, 0x5f, 0xca, 0x3f, 0x3c, 0x3a, 0x07, 0xfd, 0xbe, 0x8d, - 0x07, 0x56, 0x47, 0x5f, 0x16, 0x7c, 0xb6, 0xdd, 0x1a, 0x62, 0xcb, 0x19, 0x34, 0xf1, 0x40, 0xaf, - 0x89, 0xbf, 0xa6, 0x04, 0x0b, 0x1f, 0x5a, 0x9d, 0x4f, 0xf4, 0xfa, 0x3c, 0x69, 0x57, 0x87, 0x79, - 0xd2, 0xa7, 0x7a, 0x43, 0x68, 0xea, 0xf6, 0x86, 0x7d, 0x6c, 0x3f, 0xc3, 0xc2, 0xbd, 0x95, 0x47, - 0xbf, 0x28, 0x36, 0x65, 0x4f, 0xbd, 0xd9, 0x2d, 0x79, 0x00, 0xf7, 0x3a, 0xf6, 0xb0, 0x67, 0x0f, - 0x86, 0x07, 0x6a, 0x02, 0xb3, 0xba, 0xfd, 0xc1, 0xf0, 0xd0, 0xc2, 0xdd, 0xa7, 0xdd, 0x76, 0x73, - 0xd0, 0xb5, 0x7b, 0xfa, 0x35, 0x74, 0x07, 0x8c, 0x73, 0x77, 0xb5, 0xd6, 0x8f, 0xe0, 0x61, 0x94, - 0x1c, 0xef, 0x90, 0x98, 0xb8, 0x23, 0x5a, 0x38, 0x35, 0xf9, 0x2a, 0xed, 0x46, 0x81, 0xfa, 0x68, - 0xa1, 0x7c, 0xbd, 0x93, 0x67, 0xc5, 0x7e, 0xaf, 0x69, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xfa, - 0x1e, 0x11, 0x57, 0xf3, 0x16, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.proto deleted file mode 100644 index 522ee06b68b..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/datatransfer.proto +++ /dev/null @@ -1,315 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -// This file contains protocol buffers that are used to transfer data -// to and from the datanode, as well as between datanodes. - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "DataTransferProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "Security.proto"; -import "hdfs.proto"; - -message DataTransferEncryptorMessageProto { - enum DataTransferEncryptorStatus { - SUCCESS = 0; - ERROR_UNKNOWN_KEY = 1; - ERROR = 2; - } - required DataTransferEncryptorStatus status = 1; - optional bytes payload = 2; - optional string message = 3; - repeated CipherOptionProto cipherOption = 4; -} - -message BaseHeaderProto { - required ExtendedBlockProto block = 1; - optional hadoop.common.TokenProto token = 2; - optional DataTransferTraceInfoProto traceInfo = 3; -} - -message DataTransferTraceInfoProto { - required uint64 traceId = 1; - required uint64 parentId = 2; -} - -message ClientOperationHeaderProto { - required BaseHeaderProto baseHeader = 1; - required string clientName = 2; -} - -message CachingStrategyProto { - optional bool dropBehind = 1; - optional int64 readahead = 2; -} - -message OpReadBlockProto { - required ClientOperationHeaderProto header = 1; - required uint64 offset = 2; - required uint64 len = 3; - optional bool sendChecksums = 4 [default = true]; - optional CachingStrategyProto cachingStrategy = 5; -} - -message ChecksumProto { - required ChecksumTypeProto type = 1; - required uint32 bytesPerChecksum = 2; -} - -message OpWriteBlockProto { - required ClientOperationHeaderProto header = 1; - repeated DatanodeInfoProto targets = 2; - optional DatanodeInfoProto source = 3; - enum BlockConstructionStage { - PIPELINE_SETUP_APPEND = 0; - // pipeline set up for failed PIPELINE_SETUP_APPEND recovery - PIPELINE_SETUP_APPEND_RECOVERY = 1; - // data streaming - DATA_STREAMING = 2; - // pipeline setup for failed data streaming recovery - PIPELINE_SETUP_STREAMING_RECOVERY = 3; - // close the block and pipeline - PIPELINE_CLOSE = 4; - // Recover a failed PIPELINE_CLOSE - PIPELINE_CLOSE_RECOVERY = 5; - // pipeline set up for block creation - PIPELINE_SETUP_CREATE = 6; - // transfer RBW for adding datanodes - TRANSFER_RBW = 7; - // transfer Finalized for adding datanodes - TRANSFER_FINALIZED = 8; - } - required BlockConstructionStage stage = 4; - required uint32 pipelineSize = 5; - required uint64 minBytesRcvd = 6; - required uint64 maxBytesRcvd = 7; - required uint64 latestGenerationStamp = 8; - - /** - * The requested checksum mechanism for this block write. - */ - required ChecksumProto requestedChecksum = 9; - optional CachingStrategyProto cachingStrategy = 10; - optional StorageTypeProto storageType = 11 [default = DISK]; - repeated StorageTypeProto targetStorageTypes = 12; - - /** - * Hint to the DataNode that the block can be allocated on transient - * storage i.e. memory and written to disk lazily. The DataNode is free - * to ignore this hint. - */ - optional bool allowLazyPersist = 13 [default = false]; - //whether to pin the block, so Balancer won't move it. - optional bool pinning = 14 [default = false]; - repeated bool targetPinnings = 15; -} - -message OpTransferBlockProto { - required ClientOperationHeaderProto header = 1; - repeated DatanodeInfoProto targets = 2; - repeated StorageTypeProto targetStorageTypes = 3; -} - -message OpReplaceBlockProto { - required BaseHeaderProto header = 1; - required string delHint = 2; - required DatanodeInfoProto source = 3; - optional StorageTypeProto storageType = 4 [default = DISK]; -} - -message OpCopyBlockProto { - required BaseHeaderProto header = 1; -} - -message OpBlockChecksumProto { - required BaseHeaderProto header = 1; -} - -message OpBlockGroupChecksumProto { - required BaseHeaderProto header = 1; - required DatanodeInfosProto datanodes = 2; - // each internal block has a block token - repeated hadoop.common.TokenProto blockTokens = 3; - required ErasureCodingPolicyProto ecPolicy = 4; -} - -/** - * An ID uniquely identifying a shared memory segment. - */ -message ShortCircuitShmIdProto { - required int64 hi = 1; - required int64 lo = 2; -} - -/** - * An ID uniquely identifying a slot within a shared memory segment. - */ -message ShortCircuitShmSlotProto { - required ShortCircuitShmIdProto shmId = 1; - required int32 slotIdx = 2; -} - -message OpRequestShortCircuitAccessProto { - required BaseHeaderProto header = 1; - - /** In order to get short-circuit access to block data, clients must set this - * to the highest version of the block data that they can understand. - * Currently 1 is the only version, but more versions may exist in the future - * if the on-disk format changes. - */ - required uint32 maxVersion = 2; - - /** - * The shared memory slot to use, if we are using one. - */ - optional ShortCircuitShmSlotProto slotId = 3; - - /** - * True if the client supports verifying that the file descriptor has been - * sent successfully. - */ - optional bool supportsReceiptVerification = 4 [default = false]; -} - -message ReleaseShortCircuitAccessRequestProto { - required ShortCircuitShmSlotProto slotId = 1; - optional DataTransferTraceInfoProto traceInfo = 2; -} - -message ReleaseShortCircuitAccessResponseProto { - required Status status = 1; - optional string error = 2; -} - -message ShortCircuitShmRequestProto { - // The name of the client requesting the shared memory segment. This is - // purely for logging / debugging purposes. - required string clientName = 1; - optional DataTransferTraceInfoProto traceInfo = 2; -} - -message ShortCircuitShmResponseProto { - required Status status = 1; - optional string error = 2; - optional ShortCircuitShmIdProto id = 3; -} - -message PacketHeaderProto { - // All fields must be fixed-length! - required sfixed64 offsetInBlock = 1; - required sfixed64 seqno = 2; - required bool lastPacketInBlock = 3; - required sfixed32 dataLen = 4; - optional bool syncBlock = 5 [default = false]; -} - -// Status is a 4-bit enum -enum Status { - SUCCESS = 0; - ERROR = 1; - ERROR_CHECKSUM = 2; - ERROR_INVALID = 3; - ERROR_EXISTS = 4; - ERROR_ACCESS_TOKEN = 5; - CHECKSUM_OK = 6; - ERROR_UNSUPPORTED = 7; - OOB_RESTART = 8; // Quick restart - OOB_RESERVED1 = 9; // Reserved - OOB_RESERVED2 = 10; // Reserved - OOB_RESERVED3 = 11; // Reserved - IN_PROGRESS = 12; -} - -enum ShortCircuitFdResponse { - DO_NOT_USE_RECEIPT_VERIFICATION = 0; - USE_RECEIPT_VERIFICATION = 1; -} - -message PipelineAckProto { - required sint64 seqno = 1; - repeated Status reply = 2; - optional uint64 downstreamAckTimeNanos = 3 [default = 0]; - repeated uint32 flag = 4 [packed=true]; -} - -/** - * Sent as part of the BlockOpResponseProto - * for READ_BLOCK and COPY_BLOCK operations. - */ -message ReadOpChecksumInfoProto { - required ChecksumProto checksum = 1; - - /** - * The offset into the block at which the first packet - * will start. This is necessary since reads will align - * backwards to a checksum chunk boundary. - */ - required uint64 chunkOffset = 2; -} - -message BlockOpResponseProto { - required Status status = 1; - - optional string firstBadLink = 2; - optional OpBlockChecksumResponseProto checksumResponse = 3; - optional ReadOpChecksumInfoProto readOpChecksumInfo = 4; - - /** explanatory text which may be useful to log on the client side */ - optional string message = 5; - - /** If the server chooses to agree to the request of a client for - * short-circuit access, it will send a response message with the relevant - * file descriptors attached. - * - * In the body of the message, this version number will be set to the - * specific version number of the block data that the client is about to - * read. - */ - optional uint32 shortCircuitAccessVersion = 6; -} - -/** - * Message sent from the client to the DN after reading the entire - * read request. - */ -message ClientReadStatusProto { - required Status status = 1; -} - -message DNTransferAckProto { - required Status status = 1; -} - -message OpBlockChecksumResponseProto { - required uint32 bytesPerCrc = 1; - required uint64 crcPerBlock = 2; - required bytes md5 = 3; - optional ChecksumTypeProto crcType = 4; -} - -message OpCustomProto { - required string customId = 1; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.pb.go deleted file mode 100644 index 0e265eb7dcb..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.pb.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: encryption.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type CreateEncryptionZoneRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - KeyName *string `protobuf:"bytes,2,opt,name=keyName" json:"keyName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateEncryptionZoneRequestProto) Reset() { *m = CreateEncryptionZoneRequestProto{} } -func (m *CreateEncryptionZoneRequestProto) String() string { return proto.CompactTextString(m) } -func (*CreateEncryptionZoneRequestProto) ProtoMessage() {} -func (*CreateEncryptionZoneRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor2, []int{0} -} - -func (m *CreateEncryptionZoneRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *CreateEncryptionZoneRequestProto) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -type CreateEncryptionZoneResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateEncryptionZoneResponseProto) Reset() { *m = CreateEncryptionZoneResponseProto{} } -func (m *CreateEncryptionZoneResponseProto) String() string { return proto.CompactTextString(m) } -func (*CreateEncryptionZoneResponseProto) ProtoMessage() {} -func (*CreateEncryptionZoneResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor2, []int{1} -} - -type ListEncryptionZonesRequestProto struct { - Id *int64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListEncryptionZonesRequestProto) Reset() { *m = ListEncryptionZonesRequestProto{} } -func (m *ListEncryptionZonesRequestProto) String() string { return proto.CompactTextString(m) } -func (*ListEncryptionZonesRequestProto) ProtoMessage() {} -func (*ListEncryptionZonesRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } - -func (m *ListEncryptionZonesRequestProto) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -type EncryptionZoneProto struct { - Id *int64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"` - Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"` - Suite *CipherSuiteProto `protobuf:"varint,3,req,name=suite,enum=hadoop.hdfs.CipherSuiteProto" json:"suite,omitempty"` - CryptoProtocolVersion *CryptoProtocolVersionProto `protobuf:"varint,4,req,name=cryptoProtocolVersion,enum=hadoop.hdfs.CryptoProtocolVersionProto" json:"cryptoProtocolVersion,omitempty"` - KeyName *string `protobuf:"bytes,5,req,name=keyName" json:"keyName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EncryptionZoneProto) Reset() { *m = EncryptionZoneProto{} } -func (m *EncryptionZoneProto) String() string { return proto.CompactTextString(m) } -func (*EncryptionZoneProto) ProtoMessage() {} -func (*EncryptionZoneProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } - -func (m *EncryptionZoneProto) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *EncryptionZoneProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *EncryptionZoneProto) GetSuite() CipherSuiteProto { - if m != nil && m.Suite != nil { - return *m.Suite - } - return CipherSuiteProto_UNKNOWN -} - -func (m *EncryptionZoneProto) GetCryptoProtocolVersion() CryptoProtocolVersionProto { - if m != nil && m.CryptoProtocolVersion != nil { - return *m.CryptoProtocolVersion - } - return CryptoProtocolVersionProto_UNKNOWN_PROTOCOL_VERSION -} - -func (m *EncryptionZoneProto) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -type ListEncryptionZonesResponseProto struct { - Zones []*EncryptionZoneProto `protobuf:"bytes,1,rep,name=zones" json:"zones,omitempty"` - HasMore *bool `protobuf:"varint,2,req,name=hasMore" json:"hasMore,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListEncryptionZonesResponseProto) Reset() { *m = ListEncryptionZonesResponseProto{} } -func (m *ListEncryptionZonesResponseProto) String() string { return proto.CompactTextString(m) } -func (*ListEncryptionZonesResponseProto) ProtoMessage() {} -func (*ListEncryptionZonesResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor2, []int{4} -} - -func (m *ListEncryptionZonesResponseProto) GetZones() []*EncryptionZoneProto { - if m != nil { - return m.Zones - } - return nil -} - -func (m *ListEncryptionZonesResponseProto) GetHasMore() bool { - if m != nil && m.HasMore != nil { - return *m.HasMore - } - return false -} - -type GetEZForPathRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetEZForPathRequestProto) Reset() { *m = GetEZForPathRequestProto{} } -func (m *GetEZForPathRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetEZForPathRequestProto) ProtoMessage() {} -func (*GetEZForPathRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } - -func (m *GetEZForPathRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type GetEZForPathResponseProto struct { - Zone *EncryptionZoneProto `protobuf:"bytes,1,opt,name=zone" json:"zone,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetEZForPathResponseProto) Reset() { *m = GetEZForPathResponseProto{} } -func (m *GetEZForPathResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetEZForPathResponseProto) ProtoMessage() {} -func (*GetEZForPathResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } - -func (m *GetEZForPathResponseProto) GetZone() *EncryptionZoneProto { - if m != nil { - return m.Zone - } - return nil -} - -func init() { - proto.RegisterType((*CreateEncryptionZoneRequestProto)(nil), "hadoop.hdfs.CreateEncryptionZoneRequestProto") - proto.RegisterType((*CreateEncryptionZoneResponseProto)(nil), "hadoop.hdfs.CreateEncryptionZoneResponseProto") - proto.RegisterType((*ListEncryptionZonesRequestProto)(nil), "hadoop.hdfs.ListEncryptionZonesRequestProto") - proto.RegisterType((*EncryptionZoneProto)(nil), "hadoop.hdfs.EncryptionZoneProto") - proto.RegisterType((*ListEncryptionZonesResponseProto)(nil), "hadoop.hdfs.ListEncryptionZonesResponseProto") - proto.RegisterType((*GetEZForPathRequestProto)(nil), "hadoop.hdfs.GetEZForPathRequestProto") - proto.RegisterType((*GetEZForPathResponseProto)(nil), "hadoop.hdfs.GetEZForPathResponseProto") -} - -func init() { proto.RegisterFile("encryption.proto", fileDescriptor2) } - -var fileDescriptor2 = []byte{ - // 361 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xeb, 0x40, - 0x14, 0x85, 0x99, 0xa4, 0xe5, 0xbd, 0xde, 0x42, 0x29, 0x79, 0x14, 0xe6, 0x3d, 0x78, 0x38, 0x46, - 0xc4, 0x2c, 0x24, 0x60, 0x15, 0xf7, 0xb6, 0x54, 0x37, 0x5a, 0x6a, 0x04, 0x17, 0x05, 0x17, 0x43, - 0x72, 0x35, 0x41, 0xcd, 0x8c, 0x33, 0xd3, 0x45, 0xfd, 0x35, 0xfe, 0x3b, 0xff, 0x86, 0x64, 0x62, - 0x4b, 0x46, 0x82, 0xb8, 0x09, 0x77, 0x66, 0xce, 0x3d, 0xe7, 0xf0, 0x05, 0x86, 0x58, 0xa6, 0x6a, - 0x2d, 0x4d, 0x21, 0xca, 0x58, 0x2a, 0x61, 0x44, 0xd0, 0xcf, 0x79, 0x26, 0x84, 0x8c, 0xf3, 0xec, - 0x5e, 0xff, 0x83, 0xea, 0x5b, 0x3f, 0x84, 0x73, 0x60, 0x53, 0x85, 0xdc, 0xe0, 0x6c, 0xbb, 0xb2, - 0x14, 0x25, 0x26, 0xf8, 0xb2, 0x42, 0x6d, 0x16, 0x76, 0x79, 0x08, 0xbe, 0x56, 0x29, 0x25, 0xcc, - 0x8b, 0x7a, 0x49, 0x35, 0x06, 0x14, 0x7e, 0x3d, 0xe2, 0x7a, 0xce, 0x9f, 0x91, 0x7a, 0x8c, 0x44, - 0xbd, 0x64, 0x73, 0x0c, 0xf7, 0x60, 0xb7, 0xdd, 0x4f, 0x4b, 0x51, 0x6a, 0xb4, 0x86, 0xe1, 0x11, - 0xec, 0x5c, 0x16, 0xda, 0xb8, 0x12, 0xed, 0x64, 0x0e, 0xc0, 0x2b, 0x32, 0x1b, 0xe9, 0x27, 0x5e, - 0x91, 0x85, 0xef, 0x04, 0xfe, 0xb8, 0xfa, 0x56, 0x5d, 0x10, 0x40, 0x47, 0x72, 0x93, 0x53, 0xcf, - 0x96, 0xb5, 0x73, 0x70, 0x0c, 0x5d, 0xbd, 0x2a, 0x0c, 0x52, 0x9f, 0x79, 0xd1, 0x60, 0xfc, 0x3f, - 0x6e, 0xc0, 0x88, 0xa7, 0x85, 0xcc, 0x51, 0xdd, 0x54, 0xef, 0xd6, 0x31, 0xa9, 0xb5, 0xc1, 0x1d, - 0x8c, 0x6c, 0x9a, 0xb0, 0xb7, 0xa9, 0x78, 0xba, 0x45, 0xa5, 0x0b, 0x51, 0xd2, 0x8e, 0x35, 0x39, - 0x70, 0x4d, 0xda, 0x94, 0xb5, 0x5d, 0xbb, 0x4b, 0x93, 0x60, 0xd7, 0x56, 0xdd, 0x12, 0x34, 0xc0, - 0x5a, 0xe1, 0x34, 0x00, 0x06, 0xa7, 0xd0, 0x7d, 0xad, 0x6e, 0x29, 0x61, 0x7e, 0xd4, 0x1f, 0x33, - 0xa7, 0x4c, 0x0b, 0xa6, 0xa4, 0x96, 0x57, 0xa9, 0x39, 0xd7, 0x57, 0x42, 0xa1, 0x05, 0xf4, 0x3b, - 0xd9, 0x1c, 0xc3, 0x43, 0xa0, 0x17, 0x68, 0x66, 0xcb, 0x73, 0xa1, 0x16, 0xdc, 0xe4, 0xdf, 0xff, - 0xff, 0xf0, 0x1a, 0xfe, 0xba, 0xea, 0x66, 0xb9, 0x13, 0xe8, 0x54, 0x69, 0x94, 0x30, 0xf2, 0xa3, - 0x6e, 0x56, 0x3d, 0x39, 0x83, 0x7d, 0xa1, 0x1e, 0x62, 0x2e, 0x79, 0x9a, 0xa3, 0xb3, 0x23, 0x3f, - 0xd1, 0xd5, 0xc3, 0x64, 0xf4, 0x85, 0x8c, 0x35, 0xd1, 0x6f, 0x84, 0x7c, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x8a, 0xb2, 0xc6, 0x30, 0xf7, 0x02, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.proto deleted file mode 100644 index 68b2f3af29c..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/encryption.proto +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -// This file contains protocol buffers that are used throughout HDFS -- i.e. -// by the client, server, and data transfer protocols. - - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "EncryptionZonesProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "hdfs.proto"; - -message CreateEncryptionZoneRequestProto { - required string src = 1; - optional string keyName = 2; -} - -message CreateEncryptionZoneResponseProto { -} - -message ListEncryptionZonesRequestProto { - required int64 id = 1; -} - -message EncryptionZoneProto { - required int64 id = 1; - required string path = 2; - required CipherSuiteProto suite = 3; - required CryptoProtocolVersionProto cryptoProtocolVersion = 4; - required string keyName = 5; -} - -message ListEncryptionZonesResponseProto { - repeated EncryptionZoneProto zones = 1; - required bool hasMore = 2; -} - -message GetEZForPathRequestProto { - required string src = 1; -} - -message GetEZForPathResponseProto { - optional EncryptionZoneProto zone = 1; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.pb.go deleted file mode 100644 index 3d91d8bf9bf..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.pb.go +++ /dev/null @@ -1,228 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: erasurecoding.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type SetErasureCodingPolicyRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - EcPolicy *ErasureCodingPolicyProto `protobuf:"bytes,2,opt,name=ecPolicy" json:"ecPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetErasureCodingPolicyRequestProto) Reset() { *m = SetErasureCodingPolicyRequestProto{} } -func (m *SetErasureCodingPolicyRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetErasureCodingPolicyRequestProto) ProtoMessage() {} -func (*SetErasureCodingPolicyRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor3, []int{0} -} - -func (m *SetErasureCodingPolicyRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetErasureCodingPolicyRequestProto) GetEcPolicy() *ErasureCodingPolicyProto { - if m != nil { - return m.EcPolicy - } - return nil -} - -type SetErasureCodingPolicyResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetErasureCodingPolicyResponseProto) Reset() { *m = SetErasureCodingPolicyResponseProto{} } -func (m *SetErasureCodingPolicyResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetErasureCodingPolicyResponseProto) ProtoMessage() {} -func (*SetErasureCodingPolicyResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor3, []int{1} -} - -type GetErasureCodingPoliciesRequestProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetErasureCodingPoliciesRequestProto) Reset() { *m = GetErasureCodingPoliciesRequestProto{} } -func (m *GetErasureCodingPoliciesRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetErasureCodingPoliciesRequestProto) ProtoMessage() {} -func (*GetErasureCodingPoliciesRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor3, []int{2} -} - -type GetErasureCodingPoliciesResponseProto struct { - EcPolicies []*ErasureCodingPolicyProto `protobuf:"bytes,1,rep,name=ecPolicies" json:"ecPolicies,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetErasureCodingPoliciesResponseProto) Reset() { *m = GetErasureCodingPoliciesResponseProto{} } -func (m *GetErasureCodingPoliciesResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetErasureCodingPoliciesResponseProto) ProtoMessage() {} -func (*GetErasureCodingPoliciesResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor3, []int{3} -} - -func (m *GetErasureCodingPoliciesResponseProto) GetEcPolicies() []*ErasureCodingPolicyProto { - if m != nil { - return m.EcPolicies - } - return nil -} - -type GetErasureCodingPolicyRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetErasureCodingPolicyRequestProto) Reset() { *m = GetErasureCodingPolicyRequestProto{} } -func (m *GetErasureCodingPolicyRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetErasureCodingPolicyRequestProto) ProtoMessage() {} -func (*GetErasureCodingPolicyRequestProto) Descriptor() ([]byte, []int) { - return fileDescriptor3, []int{4} -} - -func (m *GetErasureCodingPolicyRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type GetErasureCodingPolicyResponseProto struct { - EcPolicy *ErasureCodingPolicyProto `protobuf:"bytes,1,opt,name=ecPolicy" json:"ecPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetErasureCodingPolicyResponseProto) Reset() { *m = GetErasureCodingPolicyResponseProto{} } -func (m *GetErasureCodingPolicyResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetErasureCodingPolicyResponseProto) ProtoMessage() {} -func (*GetErasureCodingPolicyResponseProto) Descriptor() ([]byte, []int) { - return fileDescriptor3, []int{5} -} - -func (m *GetErasureCodingPolicyResponseProto) GetEcPolicy() *ErasureCodingPolicyProto { - if m != nil { - return m.EcPolicy - } - return nil -} - -// * -// Block erasure coding reconstruction info -type BlockECReconstructionInfoProto struct { - Block *ExtendedBlockProto `protobuf:"bytes,1,req,name=block" json:"block,omitempty"` - SourceDnInfos *DatanodeInfosProto `protobuf:"bytes,2,req,name=sourceDnInfos" json:"sourceDnInfos,omitempty"` - TargetDnInfos *DatanodeInfosProto `protobuf:"bytes,3,req,name=targetDnInfos" json:"targetDnInfos,omitempty"` - TargetStorageUuids *StorageUuidsProto `protobuf:"bytes,4,req,name=targetStorageUuids" json:"targetStorageUuids,omitempty"` - TargetStorageTypes *StorageTypesProto `protobuf:"bytes,5,req,name=targetStorageTypes" json:"targetStorageTypes,omitempty"` - LiveBlockIndices []byte `protobuf:"bytes,6,req,name=liveBlockIndices" json:"liveBlockIndices,omitempty"` - EcPolicy *ErasureCodingPolicyProto `protobuf:"bytes,7,req,name=ecPolicy" json:"ecPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BlockECReconstructionInfoProto) Reset() { *m = BlockECReconstructionInfoProto{} } -func (m *BlockECReconstructionInfoProto) String() string { return proto.CompactTextString(m) } -func (*BlockECReconstructionInfoProto) ProtoMessage() {} -func (*BlockECReconstructionInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{6} } - -func (m *BlockECReconstructionInfoProto) GetBlock() *ExtendedBlockProto { - if m != nil { - return m.Block - } - return nil -} - -func (m *BlockECReconstructionInfoProto) GetSourceDnInfos() *DatanodeInfosProto { - if m != nil { - return m.SourceDnInfos - } - return nil -} - -func (m *BlockECReconstructionInfoProto) GetTargetDnInfos() *DatanodeInfosProto { - if m != nil { - return m.TargetDnInfos - } - return nil -} - -func (m *BlockECReconstructionInfoProto) GetTargetStorageUuids() *StorageUuidsProto { - if m != nil { - return m.TargetStorageUuids - } - return nil -} - -func (m *BlockECReconstructionInfoProto) GetTargetStorageTypes() *StorageTypesProto { - if m != nil { - return m.TargetStorageTypes - } - return nil -} - -func (m *BlockECReconstructionInfoProto) GetLiveBlockIndices() []byte { - if m != nil { - return m.LiveBlockIndices - } - return nil -} - -func (m *BlockECReconstructionInfoProto) GetEcPolicy() *ErasureCodingPolicyProto { - if m != nil { - return m.EcPolicy - } - return nil -} - -func init() { - proto.RegisterType((*SetErasureCodingPolicyRequestProto)(nil), "hadoop.hdfs.SetErasureCodingPolicyRequestProto") - proto.RegisterType((*SetErasureCodingPolicyResponseProto)(nil), "hadoop.hdfs.SetErasureCodingPolicyResponseProto") - proto.RegisterType((*GetErasureCodingPoliciesRequestProto)(nil), "hadoop.hdfs.GetErasureCodingPoliciesRequestProto") - proto.RegisterType((*GetErasureCodingPoliciesResponseProto)(nil), "hadoop.hdfs.GetErasureCodingPoliciesResponseProto") - proto.RegisterType((*GetErasureCodingPolicyRequestProto)(nil), "hadoop.hdfs.GetErasureCodingPolicyRequestProto") - proto.RegisterType((*GetErasureCodingPolicyResponseProto)(nil), "hadoop.hdfs.GetErasureCodingPolicyResponseProto") - proto.RegisterType((*BlockECReconstructionInfoProto)(nil), "hadoop.hdfs.BlockECReconstructionInfoProto") -} - -func init() { proto.RegisterFile("erasurecoding.proto", fileDescriptor3) } - -var fileDescriptor3 = []byte{ - // 404 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd4, 0x30, - 0x10, 0x87, 0x95, 0x5d, 0xca, 0x9f, 0x59, 0x90, 0xaa, 0xf4, 0x12, 0x71, 0x28, 0x91, 0x4b, 0xd0, - 0x8a, 0x43, 0x0e, 0x95, 0xe0, 0x8a, 0xd8, 0x36, 0x5a, 0xf5, 0x82, 0xaa, 0x14, 0x1e, 0xc0, 0xd8, - 0xd3, 0xc4, 0x22, 0xf2, 0x04, 0xdb, 0x41, 0xec, 0xdb, 0xf0, 0x90, 0x3c, 0x00, 0x8a, 0x0d, 0x28, - 0x56, 0xb3, 0xa2, 0xec, 0x65, 0x65, 0x8d, 0x7f, 0xdf, 0x37, 0xb3, 0x63, 0x05, 0x4e, 0xd0, 0x70, - 0x3b, 0x18, 0x14, 0x24, 0x95, 0x6e, 0xca, 0xde, 0x90, 0xa3, 0x74, 0xd5, 0x72, 0x49, 0xd4, 0x97, - 0xad, 0xbc, 0xb5, 0xcf, 0x61, 0xfc, 0x0d, 0x17, 0x6c, 0x07, 0xec, 0x06, 0x5d, 0x15, 0x90, 0x0b, - 0x8f, 0x5c, 0x53, 0xa7, 0xc4, 0xae, 0xc6, 0xaf, 0x03, 0x5a, 0x77, 0xed, 0xf1, 0x63, 0x58, 0x5a, - 0x23, 0xb2, 0x24, 0x5f, 0xac, 0x9f, 0xd4, 0xe3, 0x31, 0x7d, 0x0f, 0x8f, 0x51, 0x84, 0x64, 0xb6, - 0xc8, 0x93, 0xf5, 0xea, 0xbc, 0x28, 0x27, 0x3d, 0xca, 0x19, 0xa3, 0x57, 0xd5, 0x7f, 0x31, 0x56, - 0xc0, 0xd9, 0xbe, 0xd6, 0xb6, 0x27, 0x6d, 0xd1, 0x03, 0xec, 0x15, 0xbc, 0xdc, 0xce, 0xc5, 0x14, - 0xda, 0xe9, 0x8c, 0x4c, 0x43, 0xb1, 0x3f, 0x37, 0x11, 0xa6, 0x15, 0xc0, 0xef, 0x19, 0x14, 0xda, - 0x2c, 0xc9, 0x97, 0xf7, 0x1f, 0x7e, 0x02, 0xb2, 0xb7, 0xc0, 0xb6, 0x07, 0x6c, 0x8e, 0xb5, 0x70, - 0xb6, 0xfd, 0xf7, 0xdf, 0x8e, 0x16, 0x9c, 0x1c, 0xb6, 0xe0, 0x9f, 0x4b, 0x38, 0xdd, 0x74, 0x24, - 0xbe, 0x54, 0x17, 0x35, 0x0a, 0xd2, 0xd6, 0x99, 0x41, 0x38, 0x45, 0xfa, 0x4a, 0xdf, 0x52, 0xe8, - 0xf2, 0x06, 0x8e, 0x3e, 0x8f, 0x09, 0x3f, 0xe0, 0xea, 0xfc, 0x45, 0xdc, 0xe2, 0xbb, 0x43, 0x2d, - 0x51, 0x7a, 0x47, 0x90, 0x87, 0x74, 0x5a, 0xc1, 0x33, 0x4b, 0x83, 0x11, 0x78, 0xe9, 0x5d, 0x36, - 0x5b, 0xcc, 0xe0, 0x97, 0xdc, 0x71, 0x4d, 0x12, 0x7d, 0x22, 0xe0, 0x31, 0x35, 0x6a, 0x1c, 0x37, - 0x0d, 0xba, 0x3f, 0x9a, 0xe5, 0x3d, 0x35, 0x11, 0x95, 0x7e, 0x80, 0x34, 0x14, 0x6e, 0x1c, 0x19, - 0xde, 0xe0, 0xa7, 0x41, 0x49, 0x9b, 0x3d, 0xf0, 0xae, 0xd3, 0xc8, 0x35, 0x0d, 0x04, 0xd5, 0x0c, - 0x79, 0xc7, 0xf7, 0x71, 0xd7, 0xa3, 0xcd, 0x8e, 0xf6, 0xfb, 0x7c, 0x60, 0xce, 0xe7, 0x2f, 0xd2, - 0xd7, 0x70, 0xdc, 0xa9, 0x6f, 0xe8, 0xd7, 0x78, 0xa5, 0xa5, 0x12, 0x68, 0xb3, 0x87, 0xf9, 0x62, - 0xfd, 0xb4, 0xbe, 0x53, 0x8f, 0x9e, 0xfd, 0x91, 0xef, 0xf8, 0xbf, 0xcf, 0xbe, 0x79, 0x07, 0x05, - 0x99, 0xa6, 0xe4, 0x3d, 0x17, 0x2d, 0x46, 0xb0, 0xff, 0xe4, 0x05, 0x75, 0xe1, 0xb0, 0x39, 0x89, - 0x65, 0x63, 0xcd, 0xfe, 0x48, 0x92, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x01, 0xdd, 0x67, 0x30, - 0x42, 0x04, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.proto deleted file mode 100644 index 4bb44fb4077..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/erasurecoding.proto +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "ErasureCodingProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "hdfs.proto"; - -message SetErasureCodingPolicyRequestProto { - required string src = 1; - optional ErasureCodingPolicyProto ecPolicy = 2; -} - -message SetErasureCodingPolicyResponseProto { -} - -message GetErasureCodingPoliciesRequestProto { // void request -} - -message GetErasureCodingPoliciesResponseProto { - repeated ErasureCodingPolicyProto ecPolicies = 1; -} - -message GetErasureCodingPolicyRequestProto { - required string src = 1; // path to get the policy info -} - -message GetErasureCodingPolicyResponseProto { - optional ErasureCodingPolicyProto ecPolicy = 1; -} - -/** - * Block erasure coding reconstruction info - */ -message BlockECReconstructionInfoProto { - required ExtendedBlockProto block = 1; - required DatanodeInfosProto sourceDnInfos = 2; - required DatanodeInfosProto targetDnInfos = 3; - required StorageUuidsProto targetStorageUuids = 4; - required StorageTypesProto targetStorageTypes = 5; - required bytes liveBlockIndices = 6; - required ErasureCodingPolicyProto ecPolicy = 7; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.pb.go deleted file mode 100644 index 109720fb328..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.pb.go +++ /dev/null @@ -1,2289 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: hdfs.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import hadoop_common "github.com/colinmarc/hdfs/protocol/hadoop_common" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// * -// Types of recognized storage media. -type StorageTypeProto int32 - -const ( - StorageTypeProto_DISK StorageTypeProto = 1 - StorageTypeProto_SSD StorageTypeProto = 2 - StorageTypeProto_ARCHIVE StorageTypeProto = 3 - StorageTypeProto_RAM_DISK StorageTypeProto = 4 -) - -var StorageTypeProto_name = map[int32]string{ - 1: "DISK", - 2: "SSD", - 3: "ARCHIVE", - 4: "RAM_DISK", -} -var StorageTypeProto_value = map[string]int32{ - "DISK": 1, - "SSD": 2, - "ARCHIVE": 3, - "RAM_DISK": 4, -} - -func (x StorageTypeProto) Enum() *StorageTypeProto { - p := new(StorageTypeProto) - *p = x - return p -} -func (x StorageTypeProto) String() string { - return proto.EnumName(StorageTypeProto_name, int32(x)) -} -func (x *StorageTypeProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(StorageTypeProto_value, data, "StorageTypeProto") - if err != nil { - return err - } - *x = StorageTypeProto(value) - return nil -} -func (StorageTypeProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } - -// * -// Cipher suite. -type CipherSuiteProto int32 - -const ( - CipherSuiteProto_UNKNOWN CipherSuiteProto = 1 - CipherSuiteProto_AES_CTR_NOPADDING CipherSuiteProto = 2 -) - -var CipherSuiteProto_name = map[int32]string{ - 1: "UNKNOWN", - 2: "AES_CTR_NOPADDING", -} -var CipherSuiteProto_value = map[string]int32{ - "UNKNOWN": 1, - "AES_CTR_NOPADDING": 2, -} - -func (x CipherSuiteProto) Enum() *CipherSuiteProto { - p := new(CipherSuiteProto) - *p = x - return p -} -func (x CipherSuiteProto) String() string { - return proto.EnumName(CipherSuiteProto_name, int32(x)) -} -func (x *CipherSuiteProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CipherSuiteProto_value, data, "CipherSuiteProto") - if err != nil { - return err - } - *x = CipherSuiteProto(value) - return nil -} -func (CipherSuiteProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{1} } - -// * -// Crypto protocol version used to access encrypted files. -type CryptoProtocolVersionProto int32 - -const ( - CryptoProtocolVersionProto_UNKNOWN_PROTOCOL_VERSION CryptoProtocolVersionProto = 1 - CryptoProtocolVersionProto_ENCRYPTION_ZONES CryptoProtocolVersionProto = 2 -) - -var CryptoProtocolVersionProto_name = map[int32]string{ - 1: "UNKNOWN_PROTOCOL_VERSION", - 2: "ENCRYPTION_ZONES", -} -var CryptoProtocolVersionProto_value = map[string]int32{ - "UNKNOWN_PROTOCOL_VERSION": 1, - "ENCRYPTION_ZONES": 2, -} - -func (x CryptoProtocolVersionProto) Enum() *CryptoProtocolVersionProto { - p := new(CryptoProtocolVersionProto) - *p = x - return p -} -func (x CryptoProtocolVersionProto) String() string { - return proto.EnumName(CryptoProtocolVersionProto_name, int32(x)) -} -func (x *CryptoProtocolVersionProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CryptoProtocolVersionProto_value, data, "CryptoProtocolVersionProto") - if err != nil { - return err - } - *x = CryptoProtocolVersionProto(value) - return nil -} -func (CryptoProtocolVersionProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{2} } - -// * -// Checksum algorithms/types used in HDFS -// Make sure this enum's integer values match enum values' id properties defined -// in org.apache.hadoop.util.DataChecksum.Type -type ChecksumTypeProto int32 - -const ( - ChecksumTypeProto_CHECKSUM_NULL ChecksumTypeProto = 0 - ChecksumTypeProto_CHECKSUM_CRC32 ChecksumTypeProto = 1 - ChecksumTypeProto_CHECKSUM_CRC32C ChecksumTypeProto = 2 -) - -var ChecksumTypeProto_name = map[int32]string{ - 0: "CHECKSUM_NULL", - 1: "CHECKSUM_CRC32", - 2: "CHECKSUM_CRC32C", -} -var ChecksumTypeProto_value = map[string]int32{ - "CHECKSUM_NULL": 0, - "CHECKSUM_CRC32": 1, - "CHECKSUM_CRC32C": 2, -} - -func (x ChecksumTypeProto) Enum() *ChecksumTypeProto { - p := new(ChecksumTypeProto) - *p = x - return p -} -func (x ChecksumTypeProto) String() string { - return proto.EnumName(ChecksumTypeProto_name, int32(x)) -} -func (x *ChecksumTypeProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ChecksumTypeProto_value, data, "ChecksumTypeProto") - if err != nil { - return err - } - *x = ChecksumTypeProto(value) - return nil -} -func (ChecksumTypeProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{3} } - -type DatanodeInfoProto_AdminState int32 - -const ( - DatanodeInfoProto_NORMAL DatanodeInfoProto_AdminState = 0 - DatanodeInfoProto_DECOMMISSION_INPROGRESS DatanodeInfoProto_AdminState = 1 - DatanodeInfoProto_DECOMMISSIONED DatanodeInfoProto_AdminState = 2 -) - -var DatanodeInfoProto_AdminState_name = map[int32]string{ - 0: "NORMAL", - 1: "DECOMMISSION_INPROGRESS", - 2: "DECOMMISSIONED", -} -var DatanodeInfoProto_AdminState_value = map[string]int32{ - "NORMAL": 0, - "DECOMMISSION_INPROGRESS": 1, - "DECOMMISSIONED": 2, -} - -func (x DatanodeInfoProto_AdminState) Enum() *DatanodeInfoProto_AdminState { - p := new(DatanodeInfoProto_AdminState) - *p = x - return p -} -func (x DatanodeInfoProto_AdminState) String() string { - return proto.EnumName(DatanodeInfoProto_AdminState_name, int32(x)) -} -func (x *DatanodeInfoProto_AdminState) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DatanodeInfoProto_AdminState_value, data, "DatanodeInfoProto_AdminState") - if err != nil { - return err - } - *x = DatanodeInfoProto_AdminState(value) - return nil -} -func (DatanodeInfoProto_AdminState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor8, []int{4, 0} -} - -type DatanodeStorageProto_StorageState int32 - -const ( - DatanodeStorageProto_NORMAL DatanodeStorageProto_StorageState = 0 - DatanodeStorageProto_READ_ONLY_SHARED DatanodeStorageProto_StorageState = 1 -) - -var DatanodeStorageProto_StorageState_name = map[int32]string{ - 0: "NORMAL", - 1: "READ_ONLY_SHARED", -} -var DatanodeStorageProto_StorageState_value = map[string]int32{ - "NORMAL": 0, - "READ_ONLY_SHARED": 1, -} - -func (x DatanodeStorageProto_StorageState) Enum() *DatanodeStorageProto_StorageState { - p := new(DatanodeStorageProto_StorageState) - *p = x - return p -} -func (x DatanodeStorageProto_StorageState) String() string { - return proto.EnumName(DatanodeStorageProto_StorageState_name, int32(x)) -} -func (x *DatanodeStorageProto_StorageState) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DatanodeStorageProto_StorageState_value, data, "DatanodeStorageProto_StorageState") - if err != nil { - return err - } - *x = DatanodeStorageProto_StorageState(value) - return nil -} -func (DatanodeStorageProto_StorageState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor8, []int{5, 0} -} - -type HdfsFileStatusProto_FileType int32 - -const ( - HdfsFileStatusProto_IS_DIR HdfsFileStatusProto_FileType = 1 - HdfsFileStatusProto_IS_FILE HdfsFileStatusProto_FileType = 2 - HdfsFileStatusProto_IS_SYMLINK HdfsFileStatusProto_FileType = 3 -) - -var HdfsFileStatusProto_FileType_name = map[int32]string{ - 1: "IS_DIR", - 2: "IS_FILE", - 3: "IS_SYMLINK", -} -var HdfsFileStatusProto_FileType_value = map[string]int32{ - "IS_DIR": 1, - "IS_FILE": 2, - "IS_SYMLINK": 3, -} - -func (x HdfsFileStatusProto_FileType) Enum() *HdfsFileStatusProto_FileType { - p := new(HdfsFileStatusProto_FileType) - *p = x - return p -} -func (x HdfsFileStatusProto_FileType) String() string { - return proto.EnumName(HdfsFileStatusProto_FileType_name, int32(x)) -} -func (x *HdfsFileStatusProto_FileType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(HdfsFileStatusProto_FileType_value, data, "HdfsFileStatusProto_FileType") - if err != nil { - return err - } - *x = HdfsFileStatusProto_FileType(value) - return nil -} -func (HdfsFileStatusProto_FileType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor8, []int{25, 0} -} - -// * -// Extended block idenfies a block -type ExtendedBlockProto struct { - PoolId *string `protobuf:"bytes,1,req,name=poolId" json:"poolId,omitempty"` - BlockId *uint64 `protobuf:"varint,2,req,name=blockId" json:"blockId,omitempty"` - GenerationStamp *uint64 `protobuf:"varint,3,req,name=generationStamp" json:"generationStamp,omitempty"` - NumBytes *uint64 `protobuf:"varint,4,opt,name=numBytes,def=0" json:"numBytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ExtendedBlockProto) Reset() { *m = ExtendedBlockProto{} } -func (m *ExtendedBlockProto) String() string { return proto.CompactTextString(m) } -func (*ExtendedBlockProto) ProtoMessage() {} -func (*ExtendedBlockProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } - -const Default_ExtendedBlockProto_NumBytes uint64 = 0 - -func (m *ExtendedBlockProto) GetPoolId() string { - if m != nil && m.PoolId != nil { - return *m.PoolId - } - return "" -} - -func (m *ExtendedBlockProto) GetBlockId() uint64 { - if m != nil && m.BlockId != nil { - return *m.BlockId - } - return 0 -} - -func (m *ExtendedBlockProto) GetGenerationStamp() uint64 { - if m != nil && m.GenerationStamp != nil { - return *m.GenerationStamp - } - return 0 -} - -func (m *ExtendedBlockProto) GetNumBytes() uint64 { - if m != nil && m.NumBytes != nil { - return *m.NumBytes - } - return Default_ExtendedBlockProto_NumBytes -} - -// * -// Identifies a Datanode -type DatanodeIDProto struct { - IpAddr *string `protobuf:"bytes,1,req,name=ipAddr" json:"ipAddr,omitempty"` - HostName *string `protobuf:"bytes,2,req,name=hostName" json:"hostName,omitempty"` - DatanodeUuid *string `protobuf:"bytes,3,req,name=datanodeUuid" json:"datanodeUuid,omitempty"` - // upgraded clusters this is the same - // as the original StorageID of the - // Datanode. - XferPort *uint32 `protobuf:"varint,4,req,name=xferPort" json:"xferPort,omitempty"` - InfoPort *uint32 `protobuf:"varint,5,req,name=infoPort" json:"infoPort,omitempty"` - IpcPort *uint32 `protobuf:"varint,6,req,name=ipcPort" json:"ipcPort,omitempty"` - InfoSecurePort *uint32 `protobuf:"varint,7,opt,name=infoSecurePort,def=0" json:"infoSecurePort,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DatanodeIDProto) Reset() { *m = DatanodeIDProto{} } -func (m *DatanodeIDProto) String() string { return proto.CompactTextString(m) } -func (*DatanodeIDProto) ProtoMessage() {} -func (*DatanodeIDProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{1} } - -const Default_DatanodeIDProto_InfoSecurePort uint32 = 0 - -func (m *DatanodeIDProto) GetIpAddr() string { - if m != nil && m.IpAddr != nil { - return *m.IpAddr - } - return "" -} - -func (m *DatanodeIDProto) GetHostName() string { - if m != nil && m.HostName != nil { - return *m.HostName - } - return "" -} - -func (m *DatanodeIDProto) GetDatanodeUuid() string { - if m != nil && m.DatanodeUuid != nil { - return *m.DatanodeUuid - } - return "" -} - -func (m *DatanodeIDProto) GetXferPort() uint32 { - if m != nil && m.XferPort != nil { - return *m.XferPort - } - return 0 -} - -func (m *DatanodeIDProto) GetInfoPort() uint32 { - if m != nil && m.InfoPort != nil { - return *m.InfoPort - } - return 0 -} - -func (m *DatanodeIDProto) GetIpcPort() uint32 { - if m != nil && m.IpcPort != nil { - return *m.IpcPort - } - return 0 -} - -func (m *DatanodeIDProto) GetInfoSecurePort() uint32 { - if m != nil && m.InfoSecurePort != nil { - return *m.InfoSecurePort - } - return Default_DatanodeIDProto_InfoSecurePort -} - -// * -// Datanode local information -type DatanodeLocalInfoProto struct { - SoftwareVersion *string `protobuf:"bytes,1,req,name=softwareVersion" json:"softwareVersion,omitempty"` - ConfigVersion *string `protobuf:"bytes,2,req,name=configVersion" json:"configVersion,omitempty"` - Uptime *uint64 `protobuf:"varint,3,req,name=uptime" json:"uptime,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DatanodeLocalInfoProto) Reset() { *m = DatanodeLocalInfoProto{} } -func (m *DatanodeLocalInfoProto) String() string { return proto.CompactTextString(m) } -func (*DatanodeLocalInfoProto) ProtoMessage() {} -func (*DatanodeLocalInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{2} } - -func (m *DatanodeLocalInfoProto) GetSoftwareVersion() string { - if m != nil && m.SoftwareVersion != nil { - return *m.SoftwareVersion - } - return "" -} - -func (m *DatanodeLocalInfoProto) GetConfigVersion() string { - if m != nil && m.ConfigVersion != nil { - return *m.ConfigVersion - } - return "" -} - -func (m *DatanodeLocalInfoProto) GetUptime() uint64 { - if m != nil && m.Uptime != nil { - return *m.Uptime - } - return 0 -} - -// * -// DatanodeInfo array -type DatanodeInfosProto struct { - Datanodes []*DatanodeInfoProto `protobuf:"bytes,1,rep,name=datanodes" json:"datanodes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DatanodeInfosProto) Reset() { *m = DatanodeInfosProto{} } -func (m *DatanodeInfosProto) String() string { return proto.CompactTextString(m) } -func (*DatanodeInfosProto) ProtoMessage() {} -func (*DatanodeInfosProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{3} } - -func (m *DatanodeInfosProto) GetDatanodes() []*DatanodeInfoProto { - if m != nil { - return m.Datanodes - } - return nil -} - -// * -// The status of a Datanode -type DatanodeInfoProto struct { - Id *DatanodeIDProto `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` - Capacity *uint64 `protobuf:"varint,2,opt,name=capacity,def=0" json:"capacity,omitempty"` - DfsUsed *uint64 `protobuf:"varint,3,opt,name=dfsUsed,def=0" json:"dfsUsed,omitempty"` - Remaining *uint64 `protobuf:"varint,4,opt,name=remaining,def=0" json:"remaining,omitempty"` - BlockPoolUsed *uint64 `protobuf:"varint,5,opt,name=blockPoolUsed,def=0" json:"blockPoolUsed,omitempty"` - LastUpdate *uint64 `protobuf:"varint,6,opt,name=lastUpdate,def=0" json:"lastUpdate,omitempty"` - XceiverCount *uint32 `protobuf:"varint,7,opt,name=xceiverCount,def=0" json:"xceiverCount,omitempty"` - Location *string `protobuf:"bytes,8,opt,name=location" json:"location,omitempty"` - AdminState *DatanodeInfoProto_AdminState `protobuf:"varint,10,opt,name=adminState,enum=hadoop.hdfs.DatanodeInfoProto_AdminState,def=0" json:"adminState,omitempty"` - CacheCapacity *uint64 `protobuf:"varint,11,opt,name=cacheCapacity,def=0" json:"cacheCapacity,omitempty"` - CacheUsed *uint64 `protobuf:"varint,12,opt,name=cacheUsed,def=0" json:"cacheUsed,omitempty"` - LastUpdateMonotonic *uint64 `protobuf:"varint,13,opt,name=lastUpdateMonotonic,def=0" json:"lastUpdateMonotonic,omitempty"` - UpgradeDomain *string `protobuf:"bytes,14,opt,name=upgradeDomain" json:"upgradeDomain,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DatanodeInfoProto) Reset() { *m = DatanodeInfoProto{} } -func (m *DatanodeInfoProto) String() string { return proto.CompactTextString(m) } -func (*DatanodeInfoProto) ProtoMessage() {} -func (*DatanodeInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{4} } - -const Default_DatanodeInfoProto_Capacity uint64 = 0 -const Default_DatanodeInfoProto_DfsUsed uint64 = 0 -const Default_DatanodeInfoProto_Remaining uint64 = 0 -const Default_DatanodeInfoProto_BlockPoolUsed uint64 = 0 -const Default_DatanodeInfoProto_LastUpdate uint64 = 0 -const Default_DatanodeInfoProto_XceiverCount uint32 = 0 -const Default_DatanodeInfoProto_AdminState DatanodeInfoProto_AdminState = DatanodeInfoProto_NORMAL -const Default_DatanodeInfoProto_CacheCapacity uint64 = 0 -const Default_DatanodeInfoProto_CacheUsed uint64 = 0 -const Default_DatanodeInfoProto_LastUpdateMonotonic uint64 = 0 - -func (m *DatanodeInfoProto) GetId() *DatanodeIDProto { - if m != nil { - return m.Id - } - return nil -} - -func (m *DatanodeInfoProto) GetCapacity() uint64 { - if m != nil && m.Capacity != nil { - return *m.Capacity - } - return Default_DatanodeInfoProto_Capacity -} - -func (m *DatanodeInfoProto) GetDfsUsed() uint64 { - if m != nil && m.DfsUsed != nil { - return *m.DfsUsed - } - return Default_DatanodeInfoProto_DfsUsed -} - -func (m *DatanodeInfoProto) GetRemaining() uint64 { - if m != nil && m.Remaining != nil { - return *m.Remaining - } - return Default_DatanodeInfoProto_Remaining -} - -func (m *DatanodeInfoProto) GetBlockPoolUsed() uint64 { - if m != nil && m.BlockPoolUsed != nil { - return *m.BlockPoolUsed - } - return Default_DatanodeInfoProto_BlockPoolUsed -} - -func (m *DatanodeInfoProto) GetLastUpdate() uint64 { - if m != nil && m.LastUpdate != nil { - return *m.LastUpdate - } - return Default_DatanodeInfoProto_LastUpdate -} - -func (m *DatanodeInfoProto) GetXceiverCount() uint32 { - if m != nil && m.XceiverCount != nil { - return *m.XceiverCount - } - return Default_DatanodeInfoProto_XceiverCount -} - -func (m *DatanodeInfoProto) GetLocation() string { - if m != nil && m.Location != nil { - return *m.Location - } - return "" -} - -func (m *DatanodeInfoProto) GetAdminState() DatanodeInfoProto_AdminState { - if m != nil && m.AdminState != nil { - return *m.AdminState - } - return Default_DatanodeInfoProto_AdminState -} - -func (m *DatanodeInfoProto) GetCacheCapacity() uint64 { - if m != nil && m.CacheCapacity != nil { - return *m.CacheCapacity - } - return Default_DatanodeInfoProto_CacheCapacity -} - -func (m *DatanodeInfoProto) GetCacheUsed() uint64 { - if m != nil && m.CacheUsed != nil { - return *m.CacheUsed - } - return Default_DatanodeInfoProto_CacheUsed -} - -func (m *DatanodeInfoProto) GetLastUpdateMonotonic() uint64 { - if m != nil && m.LastUpdateMonotonic != nil { - return *m.LastUpdateMonotonic - } - return Default_DatanodeInfoProto_LastUpdateMonotonic -} - -func (m *DatanodeInfoProto) GetUpgradeDomain() string { - if m != nil && m.UpgradeDomain != nil { - return *m.UpgradeDomain - } - return "" -} - -// * -// Represents a storage available on the datanode -type DatanodeStorageProto struct { - StorageUuid *string `protobuf:"bytes,1,req,name=storageUuid" json:"storageUuid,omitempty"` - State *DatanodeStorageProto_StorageState `protobuf:"varint,2,opt,name=state,enum=hadoop.hdfs.DatanodeStorageProto_StorageState,def=0" json:"state,omitempty"` - StorageType *StorageTypeProto `protobuf:"varint,3,opt,name=storageType,enum=hadoop.hdfs.StorageTypeProto,def=1" json:"storageType,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DatanodeStorageProto) Reset() { *m = DatanodeStorageProto{} } -func (m *DatanodeStorageProto) String() string { return proto.CompactTextString(m) } -func (*DatanodeStorageProto) ProtoMessage() {} -func (*DatanodeStorageProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{5} } - -const Default_DatanodeStorageProto_State DatanodeStorageProto_StorageState = DatanodeStorageProto_NORMAL -const Default_DatanodeStorageProto_StorageType StorageTypeProto = StorageTypeProto_DISK - -func (m *DatanodeStorageProto) GetStorageUuid() string { - if m != nil && m.StorageUuid != nil { - return *m.StorageUuid - } - return "" -} - -func (m *DatanodeStorageProto) GetState() DatanodeStorageProto_StorageState { - if m != nil && m.State != nil { - return *m.State - } - return Default_DatanodeStorageProto_State -} - -func (m *DatanodeStorageProto) GetStorageType() StorageTypeProto { - if m != nil && m.StorageType != nil { - return *m.StorageType - } - return Default_DatanodeStorageProto_StorageType -} - -type StorageReportProto struct { - StorageUuid *string `protobuf:"bytes,1,req,name=storageUuid" json:"storageUuid,omitempty"` - Failed *bool `protobuf:"varint,2,opt,name=failed,def=0" json:"failed,omitempty"` - Capacity *uint64 `protobuf:"varint,3,opt,name=capacity,def=0" json:"capacity,omitempty"` - DfsUsed *uint64 `protobuf:"varint,4,opt,name=dfsUsed,def=0" json:"dfsUsed,omitempty"` - Remaining *uint64 `protobuf:"varint,5,opt,name=remaining,def=0" json:"remaining,omitempty"` - BlockPoolUsed *uint64 `protobuf:"varint,6,opt,name=blockPoolUsed,def=0" json:"blockPoolUsed,omitempty"` - Storage *DatanodeStorageProto `protobuf:"bytes,7,opt,name=storage" json:"storage,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StorageReportProto) Reset() { *m = StorageReportProto{} } -func (m *StorageReportProto) String() string { return proto.CompactTextString(m) } -func (*StorageReportProto) ProtoMessage() {} -func (*StorageReportProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{6} } - -const Default_StorageReportProto_Failed bool = false -const Default_StorageReportProto_Capacity uint64 = 0 -const Default_StorageReportProto_DfsUsed uint64 = 0 -const Default_StorageReportProto_Remaining uint64 = 0 -const Default_StorageReportProto_BlockPoolUsed uint64 = 0 - -func (m *StorageReportProto) GetStorageUuid() string { - if m != nil && m.StorageUuid != nil { - return *m.StorageUuid - } - return "" -} - -func (m *StorageReportProto) GetFailed() bool { - if m != nil && m.Failed != nil { - return *m.Failed - } - return Default_StorageReportProto_Failed -} - -func (m *StorageReportProto) GetCapacity() uint64 { - if m != nil && m.Capacity != nil { - return *m.Capacity - } - return Default_StorageReportProto_Capacity -} - -func (m *StorageReportProto) GetDfsUsed() uint64 { - if m != nil && m.DfsUsed != nil { - return *m.DfsUsed - } - return Default_StorageReportProto_DfsUsed -} - -func (m *StorageReportProto) GetRemaining() uint64 { - if m != nil && m.Remaining != nil { - return *m.Remaining - } - return Default_StorageReportProto_Remaining -} - -func (m *StorageReportProto) GetBlockPoolUsed() uint64 { - if m != nil && m.BlockPoolUsed != nil { - return *m.BlockPoolUsed - } - return Default_StorageReportProto_BlockPoolUsed -} - -func (m *StorageReportProto) GetStorage() *DatanodeStorageProto { - if m != nil { - return m.Storage - } - return nil -} - -// * -// Summary of a file or directory -type ContentSummaryProto struct { - Length *uint64 `protobuf:"varint,1,req,name=length" json:"length,omitempty"` - FileCount *uint64 `protobuf:"varint,2,req,name=fileCount" json:"fileCount,omitempty"` - DirectoryCount *uint64 `protobuf:"varint,3,req,name=directoryCount" json:"directoryCount,omitempty"` - Quota *uint64 `protobuf:"varint,4,req,name=quota" json:"quota,omitempty"` - SpaceConsumed *uint64 `protobuf:"varint,5,req,name=spaceConsumed" json:"spaceConsumed,omitempty"` - SpaceQuota *uint64 `protobuf:"varint,6,req,name=spaceQuota" json:"spaceQuota,omitempty"` - TypeQuotaInfos *StorageTypeQuotaInfosProto `protobuf:"bytes,7,opt,name=typeQuotaInfos" json:"typeQuotaInfos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ContentSummaryProto) Reset() { *m = ContentSummaryProto{} } -func (m *ContentSummaryProto) String() string { return proto.CompactTextString(m) } -func (*ContentSummaryProto) ProtoMessage() {} -func (*ContentSummaryProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{7} } - -func (m *ContentSummaryProto) GetLength() uint64 { - if m != nil && m.Length != nil { - return *m.Length - } - return 0 -} - -func (m *ContentSummaryProto) GetFileCount() uint64 { - if m != nil && m.FileCount != nil { - return *m.FileCount - } - return 0 -} - -func (m *ContentSummaryProto) GetDirectoryCount() uint64 { - if m != nil && m.DirectoryCount != nil { - return *m.DirectoryCount - } - return 0 -} - -func (m *ContentSummaryProto) GetQuota() uint64 { - if m != nil && m.Quota != nil { - return *m.Quota - } - return 0 -} - -func (m *ContentSummaryProto) GetSpaceConsumed() uint64 { - if m != nil && m.SpaceConsumed != nil { - return *m.SpaceConsumed - } - return 0 -} - -func (m *ContentSummaryProto) GetSpaceQuota() uint64 { - if m != nil && m.SpaceQuota != nil { - return *m.SpaceQuota - } - return 0 -} - -func (m *ContentSummaryProto) GetTypeQuotaInfos() *StorageTypeQuotaInfosProto { - if m != nil { - return m.TypeQuotaInfos - } - return nil -} - -// * -// Summary of quota usage of a directory -type QuotaUsageProto struct { - FileAndDirectoryCount *uint64 `protobuf:"varint,1,req,name=fileAndDirectoryCount" json:"fileAndDirectoryCount,omitempty"` - Quota *uint64 `protobuf:"varint,2,req,name=quota" json:"quota,omitempty"` - SpaceConsumed *uint64 `protobuf:"varint,3,req,name=spaceConsumed" json:"spaceConsumed,omitempty"` - SpaceQuota *uint64 `protobuf:"varint,4,req,name=spaceQuota" json:"spaceQuota,omitempty"` - TypeQuotaInfos *StorageTypeQuotaInfosProto `protobuf:"bytes,5,opt,name=typeQuotaInfos" json:"typeQuotaInfos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *QuotaUsageProto) Reset() { *m = QuotaUsageProto{} } -func (m *QuotaUsageProto) String() string { return proto.CompactTextString(m) } -func (*QuotaUsageProto) ProtoMessage() {} -func (*QuotaUsageProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{8} } - -func (m *QuotaUsageProto) GetFileAndDirectoryCount() uint64 { - if m != nil && m.FileAndDirectoryCount != nil { - return *m.FileAndDirectoryCount - } - return 0 -} - -func (m *QuotaUsageProto) GetQuota() uint64 { - if m != nil && m.Quota != nil { - return *m.Quota - } - return 0 -} - -func (m *QuotaUsageProto) GetSpaceConsumed() uint64 { - if m != nil && m.SpaceConsumed != nil { - return *m.SpaceConsumed - } - return 0 -} - -func (m *QuotaUsageProto) GetSpaceQuota() uint64 { - if m != nil && m.SpaceQuota != nil { - return *m.SpaceQuota - } - return 0 -} - -func (m *QuotaUsageProto) GetTypeQuotaInfos() *StorageTypeQuotaInfosProto { - if m != nil { - return m.TypeQuotaInfos - } - return nil -} - -// * -// Storage type quota and usage information of a file or directory -type StorageTypeQuotaInfosProto struct { - TypeQuotaInfo []*StorageTypeQuotaInfoProto `protobuf:"bytes,1,rep,name=typeQuotaInfo" json:"typeQuotaInfo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StorageTypeQuotaInfosProto) Reset() { *m = StorageTypeQuotaInfosProto{} } -func (m *StorageTypeQuotaInfosProto) String() string { return proto.CompactTextString(m) } -func (*StorageTypeQuotaInfosProto) ProtoMessage() {} -func (*StorageTypeQuotaInfosProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{9} } - -func (m *StorageTypeQuotaInfosProto) GetTypeQuotaInfo() []*StorageTypeQuotaInfoProto { - if m != nil { - return m.TypeQuotaInfo - } - return nil -} - -type StorageTypeQuotaInfoProto struct { - Type *StorageTypeProto `protobuf:"varint,1,req,name=type,enum=hadoop.hdfs.StorageTypeProto" json:"type,omitempty"` - Quota *uint64 `protobuf:"varint,2,req,name=quota" json:"quota,omitempty"` - Consumed *uint64 `protobuf:"varint,3,req,name=consumed" json:"consumed,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StorageTypeQuotaInfoProto) Reset() { *m = StorageTypeQuotaInfoProto{} } -func (m *StorageTypeQuotaInfoProto) String() string { return proto.CompactTextString(m) } -func (*StorageTypeQuotaInfoProto) ProtoMessage() {} -func (*StorageTypeQuotaInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{10} } - -func (m *StorageTypeQuotaInfoProto) GetType() StorageTypeProto { - if m != nil && m.Type != nil { - return *m.Type - } - return StorageTypeProto_DISK -} - -func (m *StorageTypeQuotaInfoProto) GetQuota() uint64 { - if m != nil && m.Quota != nil { - return *m.Quota - } - return 0 -} - -func (m *StorageTypeQuotaInfoProto) GetConsumed() uint64 { - if m != nil && m.Consumed != nil { - return *m.Consumed - } - return 0 -} - -// * -// Contains a list of paths corresponding to corrupt files and a cookie -// used for iterative calls to NameNode.listCorruptFileBlocks. -// -type CorruptFileBlocksProto struct { - Files []string `protobuf:"bytes,1,rep,name=files" json:"files,omitempty"` - Cookie *string `protobuf:"bytes,2,req,name=cookie" json:"cookie,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CorruptFileBlocksProto) Reset() { *m = CorruptFileBlocksProto{} } -func (m *CorruptFileBlocksProto) String() string { return proto.CompactTextString(m) } -func (*CorruptFileBlocksProto) ProtoMessage() {} -func (*CorruptFileBlocksProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{11} } - -func (m *CorruptFileBlocksProto) GetFiles() []string { - if m != nil { - return m.Files - } - return nil -} - -func (m *CorruptFileBlocksProto) GetCookie() string { - if m != nil && m.Cookie != nil { - return *m.Cookie - } - return "" -} - -// * -// File or Directory permision - same spec as posix -type FsPermissionProto struct { - Perm *uint32 `protobuf:"varint,1,req,name=perm" json:"perm,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FsPermissionProto) Reset() { *m = FsPermissionProto{} } -func (m *FsPermissionProto) String() string { return proto.CompactTextString(m) } -func (*FsPermissionProto) ProtoMessage() {} -func (*FsPermissionProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{12} } - -func (m *FsPermissionProto) GetPerm() uint32 { - if m != nil && m.Perm != nil { - return *m.Perm - } - return 0 -} - -// * -// A list of storage types. -type StorageTypesProto struct { - StorageTypes []StorageTypeProto `protobuf:"varint,1,rep,name=storageTypes,enum=hadoop.hdfs.StorageTypeProto" json:"storageTypes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StorageTypesProto) Reset() { *m = StorageTypesProto{} } -func (m *StorageTypesProto) String() string { return proto.CompactTextString(m) } -func (*StorageTypesProto) ProtoMessage() {} -func (*StorageTypesProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{13} } - -func (m *StorageTypesProto) GetStorageTypes() []StorageTypeProto { - if m != nil { - return m.StorageTypes - } - return nil -} - -// * -// Block replica storage policy. -type BlockStoragePolicyProto struct { - PolicyId *uint32 `protobuf:"varint,1,req,name=policyId" json:"policyId,omitempty"` - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - // a list of storage types for storing the block replicas when creating a - // block. - CreationPolicy *StorageTypesProto `protobuf:"bytes,3,req,name=creationPolicy" json:"creationPolicy,omitempty"` - // A list of storage types for creation fallback storage. - CreationFallbackPolicy *StorageTypesProto `protobuf:"bytes,4,opt,name=creationFallbackPolicy" json:"creationFallbackPolicy,omitempty"` - ReplicationFallbackPolicy *StorageTypesProto `protobuf:"bytes,5,opt,name=replicationFallbackPolicy" json:"replicationFallbackPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BlockStoragePolicyProto) Reset() { *m = BlockStoragePolicyProto{} } -func (m *BlockStoragePolicyProto) String() string { return proto.CompactTextString(m) } -func (*BlockStoragePolicyProto) ProtoMessage() {} -func (*BlockStoragePolicyProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{14} } - -func (m *BlockStoragePolicyProto) GetPolicyId() uint32 { - if m != nil && m.PolicyId != nil { - return *m.PolicyId - } - return 0 -} - -func (m *BlockStoragePolicyProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *BlockStoragePolicyProto) GetCreationPolicy() *StorageTypesProto { - if m != nil { - return m.CreationPolicy - } - return nil -} - -func (m *BlockStoragePolicyProto) GetCreationFallbackPolicy() *StorageTypesProto { - if m != nil { - return m.CreationFallbackPolicy - } - return nil -} - -func (m *BlockStoragePolicyProto) GetReplicationFallbackPolicy() *StorageTypesProto { - if m != nil { - return m.ReplicationFallbackPolicy - } - return nil -} - -// * -// A LocatedBlock gives information about a block and its location. -type LocatedBlockProto struct { - B *ExtendedBlockProto `protobuf:"bytes,1,req,name=b" json:"b,omitempty"` - Offset *uint64 `protobuf:"varint,2,req,name=offset" json:"offset,omitempty"` - Locs []*DatanodeInfoProto `protobuf:"bytes,3,rep,name=locs" json:"locs,omitempty"` - Corrupt *bool `protobuf:"varint,4,req,name=corrupt" json:"corrupt,omitempty"` - BlockToken *hadoop_common.TokenProto `protobuf:"bytes,5,req,name=blockToken" json:"blockToken,omitempty"` - IsCached []bool `protobuf:"varint,6,rep,packed,name=isCached" json:"isCached,omitempty"` - StorageTypes []StorageTypeProto `protobuf:"varint,7,rep,name=storageTypes,enum=hadoop.hdfs.StorageTypeProto" json:"storageTypes,omitempty"` - StorageIDs []string `protobuf:"bytes,8,rep,name=storageIDs" json:"storageIDs,omitempty"` - // striped block related fields - BlockIndices []byte `protobuf:"bytes,9,opt,name=blockIndices" json:"blockIndices,omitempty"` - BlockTokens []*hadoop_common.TokenProto `protobuf:"bytes,10,rep,name=blockTokens" json:"blockTokens,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LocatedBlockProto) Reset() { *m = LocatedBlockProto{} } -func (m *LocatedBlockProto) String() string { return proto.CompactTextString(m) } -func (*LocatedBlockProto) ProtoMessage() {} -func (*LocatedBlockProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{15} } - -func (m *LocatedBlockProto) GetB() *ExtendedBlockProto { - if m != nil { - return m.B - } - return nil -} - -func (m *LocatedBlockProto) GetOffset() uint64 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return 0 -} - -func (m *LocatedBlockProto) GetLocs() []*DatanodeInfoProto { - if m != nil { - return m.Locs - } - return nil -} - -func (m *LocatedBlockProto) GetCorrupt() bool { - if m != nil && m.Corrupt != nil { - return *m.Corrupt - } - return false -} - -func (m *LocatedBlockProto) GetBlockToken() *hadoop_common.TokenProto { - if m != nil { - return m.BlockToken - } - return nil -} - -func (m *LocatedBlockProto) GetIsCached() []bool { - if m != nil { - return m.IsCached - } - return nil -} - -func (m *LocatedBlockProto) GetStorageTypes() []StorageTypeProto { - if m != nil { - return m.StorageTypes - } - return nil -} - -func (m *LocatedBlockProto) GetStorageIDs() []string { - if m != nil { - return m.StorageIDs - } - return nil -} - -func (m *LocatedBlockProto) GetBlockIndices() []byte { - if m != nil { - return m.BlockIndices - } - return nil -} - -func (m *LocatedBlockProto) GetBlockTokens() []*hadoop_common.TokenProto { - if m != nil { - return m.BlockTokens - } - return nil -} - -type DataEncryptionKeyProto struct { - KeyId *uint32 `protobuf:"varint,1,req,name=keyId" json:"keyId,omitempty"` - BlockPoolId *string `protobuf:"bytes,2,req,name=blockPoolId" json:"blockPoolId,omitempty"` - Nonce []byte `protobuf:"bytes,3,req,name=nonce" json:"nonce,omitempty"` - EncryptionKey []byte `protobuf:"bytes,4,req,name=encryptionKey" json:"encryptionKey,omitempty"` - ExpiryDate *uint64 `protobuf:"varint,5,req,name=expiryDate" json:"expiryDate,omitempty"` - EncryptionAlgorithm *string `protobuf:"bytes,6,opt,name=encryptionAlgorithm" json:"encryptionAlgorithm,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DataEncryptionKeyProto) Reset() { *m = DataEncryptionKeyProto{} } -func (m *DataEncryptionKeyProto) String() string { return proto.CompactTextString(m) } -func (*DataEncryptionKeyProto) ProtoMessage() {} -func (*DataEncryptionKeyProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{16} } - -func (m *DataEncryptionKeyProto) GetKeyId() uint32 { - if m != nil && m.KeyId != nil { - return *m.KeyId - } - return 0 -} - -func (m *DataEncryptionKeyProto) GetBlockPoolId() string { - if m != nil && m.BlockPoolId != nil { - return *m.BlockPoolId - } - return "" -} - -func (m *DataEncryptionKeyProto) GetNonce() []byte { - if m != nil { - return m.Nonce - } - return nil -} - -func (m *DataEncryptionKeyProto) GetEncryptionKey() []byte { - if m != nil { - return m.EncryptionKey - } - return nil -} - -func (m *DataEncryptionKeyProto) GetExpiryDate() uint64 { - if m != nil && m.ExpiryDate != nil { - return *m.ExpiryDate - } - return 0 -} - -func (m *DataEncryptionKeyProto) GetEncryptionAlgorithm() string { - if m != nil && m.EncryptionAlgorithm != nil { - return *m.EncryptionAlgorithm - } - return "" -} - -// * -// Encryption information for a file. -type FileEncryptionInfoProto struct { - Suite *CipherSuiteProto `protobuf:"varint,1,req,name=suite,enum=hadoop.hdfs.CipherSuiteProto" json:"suite,omitempty"` - CryptoProtocolVersion *CryptoProtocolVersionProto `protobuf:"varint,2,req,name=cryptoProtocolVersion,enum=hadoop.hdfs.CryptoProtocolVersionProto" json:"cryptoProtocolVersion,omitempty"` - Key []byte `protobuf:"bytes,3,req,name=key" json:"key,omitempty"` - Iv []byte `protobuf:"bytes,4,req,name=iv" json:"iv,omitempty"` - KeyName *string `protobuf:"bytes,5,req,name=keyName" json:"keyName,omitempty"` - EzKeyVersionName *string `protobuf:"bytes,6,req,name=ezKeyVersionName" json:"ezKeyVersionName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FileEncryptionInfoProto) Reset() { *m = FileEncryptionInfoProto{} } -func (m *FileEncryptionInfoProto) String() string { return proto.CompactTextString(m) } -func (*FileEncryptionInfoProto) ProtoMessage() {} -func (*FileEncryptionInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{17} } - -func (m *FileEncryptionInfoProto) GetSuite() CipherSuiteProto { - if m != nil && m.Suite != nil { - return *m.Suite - } - return CipherSuiteProto_UNKNOWN -} - -func (m *FileEncryptionInfoProto) GetCryptoProtocolVersion() CryptoProtocolVersionProto { - if m != nil && m.CryptoProtocolVersion != nil { - return *m.CryptoProtocolVersion - } - return CryptoProtocolVersionProto_UNKNOWN_PROTOCOL_VERSION -} - -func (m *FileEncryptionInfoProto) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *FileEncryptionInfoProto) GetIv() []byte { - if m != nil { - return m.Iv - } - return nil -} - -func (m *FileEncryptionInfoProto) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *FileEncryptionInfoProto) GetEzKeyVersionName() string { - if m != nil && m.EzKeyVersionName != nil { - return *m.EzKeyVersionName - } - return "" -} - -// * -// Encryption information for an individual -// file within an encryption zone -type PerFileEncryptionInfoProto struct { - Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` - Iv []byte `protobuf:"bytes,2,req,name=iv" json:"iv,omitempty"` - EzKeyVersionName *string `protobuf:"bytes,3,req,name=ezKeyVersionName" json:"ezKeyVersionName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PerFileEncryptionInfoProto) Reset() { *m = PerFileEncryptionInfoProto{} } -func (m *PerFileEncryptionInfoProto) String() string { return proto.CompactTextString(m) } -func (*PerFileEncryptionInfoProto) ProtoMessage() {} -func (*PerFileEncryptionInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{18} } - -func (m *PerFileEncryptionInfoProto) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *PerFileEncryptionInfoProto) GetIv() []byte { - if m != nil { - return m.Iv - } - return nil -} - -func (m *PerFileEncryptionInfoProto) GetEzKeyVersionName() string { - if m != nil && m.EzKeyVersionName != nil { - return *m.EzKeyVersionName - } - return "" -} - -// * -// Encryption information for an encryption -// zone -type ZoneEncryptionInfoProto struct { - Suite *CipherSuiteProto `protobuf:"varint,1,req,name=suite,enum=hadoop.hdfs.CipherSuiteProto" json:"suite,omitempty"` - CryptoProtocolVersion *CryptoProtocolVersionProto `protobuf:"varint,2,req,name=cryptoProtocolVersion,enum=hadoop.hdfs.CryptoProtocolVersionProto" json:"cryptoProtocolVersion,omitempty"` - KeyName *string `protobuf:"bytes,3,req,name=keyName" json:"keyName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ZoneEncryptionInfoProto) Reset() { *m = ZoneEncryptionInfoProto{} } -func (m *ZoneEncryptionInfoProto) String() string { return proto.CompactTextString(m) } -func (*ZoneEncryptionInfoProto) ProtoMessage() {} -func (*ZoneEncryptionInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{19} } - -func (m *ZoneEncryptionInfoProto) GetSuite() CipherSuiteProto { - if m != nil && m.Suite != nil { - return *m.Suite - } - return CipherSuiteProto_UNKNOWN -} - -func (m *ZoneEncryptionInfoProto) GetCryptoProtocolVersion() CryptoProtocolVersionProto { - if m != nil && m.CryptoProtocolVersion != nil { - return *m.CryptoProtocolVersion - } - return CryptoProtocolVersionProto_UNKNOWN_PROTOCOL_VERSION -} - -func (m *ZoneEncryptionInfoProto) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -// * -// Cipher option -type CipherOptionProto struct { - Suite *CipherSuiteProto `protobuf:"varint,1,req,name=suite,enum=hadoop.hdfs.CipherSuiteProto" json:"suite,omitempty"` - InKey []byte `protobuf:"bytes,2,opt,name=inKey" json:"inKey,omitempty"` - InIv []byte `protobuf:"bytes,3,opt,name=inIv" json:"inIv,omitempty"` - OutKey []byte `protobuf:"bytes,4,opt,name=outKey" json:"outKey,omitempty"` - OutIv []byte `protobuf:"bytes,5,opt,name=outIv" json:"outIv,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CipherOptionProto) Reset() { *m = CipherOptionProto{} } -func (m *CipherOptionProto) String() string { return proto.CompactTextString(m) } -func (*CipherOptionProto) ProtoMessage() {} -func (*CipherOptionProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{20} } - -func (m *CipherOptionProto) GetSuite() CipherSuiteProto { - if m != nil && m.Suite != nil { - return *m.Suite - } - return CipherSuiteProto_UNKNOWN -} - -func (m *CipherOptionProto) GetInKey() []byte { - if m != nil { - return m.InKey - } - return nil -} - -func (m *CipherOptionProto) GetInIv() []byte { - if m != nil { - return m.InIv - } - return nil -} - -func (m *CipherOptionProto) GetOutKey() []byte { - if m != nil { - return m.OutKey - } - return nil -} - -func (m *CipherOptionProto) GetOutIv() []byte { - if m != nil { - return m.OutIv - } - return nil -} - -// * -// A set of file blocks and their locations. -type LocatedBlocksProto struct { - FileLength *uint64 `protobuf:"varint,1,req,name=fileLength" json:"fileLength,omitempty"` - Blocks []*LocatedBlockProto `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` - UnderConstruction *bool `protobuf:"varint,3,req,name=underConstruction" json:"underConstruction,omitempty"` - LastBlock *LocatedBlockProto `protobuf:"bytes,4,opt,name=lastBlock" json:"lastBlock,omitempty"` - IsLastBlockComplete *bool `protobuf:"varint,5,req,name=isLastBlockComplete" json:"isLastBlockComplete,omitempty"` - FileEncryptionInfo *FileEncryptionInfoProto `protobuf:"bytes,6,opt,name=fileEncryptionInfo" json:"fileEncryptionInfo,omitempty"` - // Optional field for erasure coding - EcPolicy *ErasureCodingPolicyProto `protobuf:"bytes,7,opt,name=ecPolicy" json:"ecPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LocatedBlocksProto) Reset() { *m = LocatedBlocksProto{} } -func (m *LocatedBlocksProto) String() string { return proto.CompactTextString(m) } -func (*LocatedBlocksProto) ProtoMessage() {} -func (*LocatedBlocksProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{21} } - -func (m *LocatedBlocksProto) GetFileLength() uint64 { - if m != nil && m.FileLength != nil { - return *m.FileLength - } - return 0 -} - -func (m *LocatedBlocksProto) GetBlocks() []*LocatedBlockProto { - if m != nil { - return m.Blocks - } - return nil -} - -func (m *LocatedBlocksProto) GetUnderConstruction() bool { - if m != nil && m.UnderConstruction != nil { - return *m.UnderConstruction - } - return false -} - -func (m *LocatedBlocksProto) GetLastBlock() *LocatedBlockProto { - if m != nil { - return m.LastBlock - } - return nil -} - -func (m *LocatedBlocksProto) GetIsLastBlockComplete() bool { - if m != nil && m.IsLastBlockComplete != nil { - return *m.IsLastBlockComplete - } - return false -} - -func (m *LocatedBlocksProto) GetFileEncryptionInfo() *FileEncryptionInfoProto { - if m != nil { - return m.FileEncryptionInfo - } - return nil -} - -func (m *LocatedBlocksProto) GetEcPolicy() *ErasureCodingPolicyProto { - if m != nil { - return m.EcPolicy - } - return nil -} - -// * -// ECSchema options entry -type ECSchemaOptionEntryProto struct { - Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` - Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ECSchemaOptionEntryProto) Reset() { *m = ECSchemaOptionEntryProto{} } -func (m *ECSchemaOptionEntryProto) String() string { return proto.CompactTextString(m) } -func (*ECSchemaOptionEntryProto) ProtoMessage() {} -func (*ECSchemaOptionEntryProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{22} } - -func (m *ECSchemaOptionEntryProto) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *ECSchemaOptionEntryProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -// * -// ECSchema for erasurecoding -type ECSchemaProto struct { - CodecName *string `protobuf:"bytes,1,req,name=codecName" json:"codecName,omitempty"` - DataUnits *uint32 `protobuf:"varint,2,req,name=dataUnits" json:"dataUnits,omitempty"` - ParityUnits *uint32 `protobuf:"varint,3,req,name=parityUnits" json:"parityUnits,omitempty"` - Options []*ECSchemaOptionEntryProto `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ECSchemaProto) Reset() { *m = ECSchemaProto{} } -func (m *ECSchemaProto) String() string { return proto.CompactTextString(m) } -func (*ECSchemaProto) ProtoMessage() {} -func (*ECSchemaProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{23} } - -func (m *ECSchemaProto) GetCodecName() string { - if m != nil && m.CodecName != nil { - return *m.CodecName - } - return "" -} - -func (m *ECSchemaProto) GetDataUnits() uint32 { - if m != nil && m.DataUnits != nil { - return *m.DataUnits - } - return 0 -} - -func (m *ECSchemaProto) GetParityUnits() uint32 { - if m != nil && m.ParityUnits != nil { - return *m.ParityUnits - } - return 0 -} - -func (m *ECSchemaProto) GetOptions() []*ECSchemaOptionEntryProto { - if m != nil { - return m.Options - } - return nil -} - -type ErasureCodingPolicyProto struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - Schema *ECSchemaProto `protobuf:"bytes,2,req,name=schema" json:"schema,omitempty"` - CellSize *uint32 `protobuf:"varint,3,req,name=cellSize" json:"cellSize,omitempty"` - Id *uint32 `protobuf:"varint,4,req,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ErasureCodingPolicyProto) Reset() { *m = ErasureCodingPolicyProto{} } -func (m *ErasureCodingPolicyProto) String() string { return proto.CompactTextString(m) } -func (*ErasureCodingPolicyProto) ProtoMessage() {} -func (*ErasureCodingPolicyProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{24} } - -func (m *ErasureCodingPolicyProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ErasureCodingPolicyProto) GetSchema() *ECSchemaProto { - if m != nil { - return m.Schema - } - return nil -} - -func (m *ErasureCodingPolicyProto) GetCellSize() uint32 { - if m != nil && m.CellSize != nil { - return *m.CellSize - } - return 0 -} - -func (m *ErasureCodingPolicyProto) GetId() uint32 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -// * -// Status of a file, directory or symlink -// Optionally includes a file's block locations if requested by client on the rpc call. -type HdfsFileStatusProto struct { - FileType *HdfsFileStatusProto_FileType `protobuf:"varint,1,req,name=fileType,enum=hadoop.hdfs.HdfsFileStatusProto_FileType" json:"fileType,omitempty"` - Path []byte `protobuf:"bytes,2,req,name=path" json:"path,omitempty"` - Length *uint64 `protobuf:"varint,3,req,name=length" json:"length,omitempty"` - Permission *FsPermissionProto `protobuf:"bytes,4,req,name=permission" json:"permission,omitempty"` - Owner *string `protobuf:"bytes,5,req,name=owner" json:"owner,omitempty"` - Group *string `protobuf:"bytes,6,req,name=group" json:"group,omitempty"` - ModificationTime *uint64 `protobuf:"varint,7,req,name=modification_time,json=modificationTime" json:"modification_time,omitempty"` - AccessTime *uint64 `protobuf:"varint,8,req,name=access_time,json=accessTime" json:"access_time,omitempty"` - // Optional fields for symlink - Symlink []byte `protobuf:"bytes,9,opt,name=symlink" json:"symlink,omitempty"` - // Optional fields for file - BlockReplication *uint32 `protobuf:"varint,10,opt,name=block_replication,json=blockReplication,def=0" json:"block_replication,omitempty"` - Blocksize *uint64 `protobuf:"varint,11,opt,name=blocksize,def=0" json:"blocksize,omitempty"` - Locations *LocatedBlocksProto `protobuf:"bytes,12,opt,name=locations" json:"locations,omitempty"` - // Optional field for fileId - FileId *uint64 `protobuf:"varint,13,opt,name=fileId,def=0" json:"fileId,omitempty"` - ChildrenNum *int32 `protobuf:"varint,14,opt,name=childrenNum,def=-1" json:"childrenNum,omitempty"` - // Optional field for file encryption - FileEncryptionInfo *FileEncryptionInfoProto `protobuf:"bytes,15,opt,name=fileEncryptionInfo" json:"fileEncryptionInfo,omitempty"` - StoragePolicy *uint32 `protobuf:"varint,16,opt,name=storagePolicy,def=0" json:"storagePolicy,omitempty"` - // Optional field for erasure coding - EcPolicy *ErasureCodingPolicyProto `protobuf:"bytes,17,opt,name=ecPolicy" json:"ecPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HdfsFileStatusProto) Reset() { *m = HdfsFileStatusProto{} } -func (m *HdfsFileStatusProto) String() string { return proto.CompactTextString(m) } -func (*HdfsFileStatusProto) ProtoMessage() {} -func (*HdfsFileStatusProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{25} } - -const Default_HdfsFileStatusProto_BlockReplication uint32 = 0 -const Default_HdfsFileStatusProto_Blocksize uint64 = 0 -const Default_HdfsFileStatusProto_FileId uint64 = 0 -const Default_HdfsFileStatusProto_ChildrenNum int32 = -1 -const Default_HdfsFileStatusProto_StoragePolicy uint32 = 0 - -func (m *HdfsFileStatusProto) GetFileType() HdfsFileStatusProto_FileType { - if m != nil && m.FileType != nil { - return *m.FileType - } - return HdfsFileStatusProto_IS_DIR -} - -func (m *HdfsFileStatusProto) GetPath() []byte { - if m != nil { - return m.Path - } - return nil -} - -func (m *HdfsFileStatusProto) GetLength() uint64 { - if m != nil && m.Length != nil { - return *m.Length - } - return 0 -} - -func (m *HdfsFileStatusProto) GetPermission() *FsPermissionProto { - if m != nil { - return m.Permission - } - return nil -} - -func (m *HdfsFileStatusProto) GetOwner() string { - if m != nil && m.Owner != nil { - return *m.Owner - } - return "" -} - -func (m *HdfsFileStatusProto) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *HdfsFileStatusProto) GetModificationTime() uint64 { - if m != nil && m.ModificationTime != nil { - return *m.ModificationTime - } - return 0 -} - -func (m *HdfsFileStatusProto) GetAccessTime() uint64 { - if m != nil && m.AccessTime != nil { - return *m.AccessTime - } - return 0 -} - -func (m *HdfsFileStatusProto) GetSymlink() []byte { - if m != nil { - return m.Symlink - } - return nil -} - -func (m *HdfsFileStatusProto) GetBlockReplication() uint32 { - if m != nil && m.BlockReplication != nil { - return *m.BlockReplication - } - return Default_HdfsFileStatusProto_BlockReplication -} - -func (m *HdfsFileStatusProto) GetBlocksize() uint64 { - if m != nil && m.Blocksize != nil { - return *m.Blocksize - } - return Default_HdfsFileStatusProto_Blocksize -} - -func (m *HdfsFileStatusProto) GetLocations() *LocatedBlocksProto { - if m != nil { - return m.Locations - } - return nil -} - -func (m *HdfsFileStatusProto) GetFileId() uint64 { - if m != nil && m.FileId != nil { - return *m.FileId - } - return Default_HdfsFileStatusProto_FileId -} - -func (m *HdfsFileStatusProto) GetChildrenNum() int32 { - if m != nil && m.ChildrenNum != nil { - return *m.ChildrenNum - } - return Default_HdfsFileStatusProto_ChildrenNum -} - -func (m *HdfsFileStatusProto) GetFileEncryptionInfo() *FileEncryptionInfoProto { - if m != nil { - return m.FileEncryptionInfo - } - return nil -} - -func (m *HdfsFileStatusProto) GetStoragePolicy() uint32 { - if m != nil && m.StoragePolicy != nil { - return *m.StoragePolicy - } - return Default_HdfsFileStatusProto_StoragePolicy -} - -func (m *HdfsFileStatusProto) GetEcPolicy() *ErasureCodingPolicyProto { - if m != nil { - return m.EcPolicy - } - return nil -} - -// * -// HDFS Server Defaults -type FsServerDefaultsProto struct { - BlockSize *uint64 `protobuf:"varint,1,req,name=blockSize" json:"blockSize,omitempty"` - BytesPerChecksum *uint32 `protobuf:"varint,2,req,name=bytesPerChecksum" json:"bytesPerChecksum,omitempty"` - WritePacketSize *uint32 `protobuf:"varint,3,req,name=writePacketSize" json:"writePacketSize,omitempty"` - Replication *uint32 `protobuf:"varint,4,req,name=replication" json:"replication,omitempty"` - FileBufferSize *uint32 `protobuf:"varint,5,req,name=fileBufferSize" json:"fileBufferSize,omitempty"` - EncryptDataTransfer *bool `protobuf:"varint,6,opt,name=encryptDataTransfer,def=0" json:"encryptDataTransfer,omitempty"` - TrashInterval *uint64 `protobuf:"varint,7,opt,name=trashInterval,def=0" json:"trashInterval,omitempty"` - ChecksumType *ChecksumTypeProto `protobuf:"varint,8,opt,name=checksumType,enum=hadoop.hdfs.ChecksumTypeProto,def=1" json:"checksumType,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FsServerDefaultsProto) Reset() { *m = FsServerDefaultsProto{} } -func (m *FsServerDefaultsProto) String() string { return proto.CompactTextString(m) } -func (*FsServerDefaultsProto) ProtoMessage() {} -func (*FsServerDefaultsProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{26} } - -const Default_FsServerDefaultsProto_EncryptDataTransfer bool = false -const Default_FsServerDefaultsProto_TrashInterval uint64 = 0 -const Default_FsServerDefaultsProto_ChecksumType ChecksumTypeProto = ChecksumTypeProto_CHECKSUM_CRC32 - -func (m *FsServerDefaultsProto) GetBlockSize() uint64 { - if m != nil && m.BlockSize != nil { - return *m.BlockSize - } - return 0 -} - -func (m *FsServerDefaultsProto) GetBytesPerChecksum() uint32 { - if m != nil && m.BytesPerChecksum != nil { - return *m.BytesPerChecksum - } - return 0 -} - -func (m *FsServerDefaultsProto) GetWritePacketSize() uint32 { - if m != nil && m.WritePacketSize != nil { - return *m.WritePacketSize - } - return 0 -} - -func (m *FsServerDefaultsProto) GetReplication() uint32 { - if m != nil && m.Replication != nil { - return *m.Replication - } - return 0 -} - -func (m *FsServerDefaultsProto) GetFileBufferSize() uint32 { - if m != nil && m.FileBufferSize != nil { - return *m.FileBufferSize - } - return 0 -} - -func (m *FsServerDefaultsProto) GetEncryptDataTransfer() bool { - if m != nil && m.EncryptDataTransfer != nil { - return *m.EncryptDataTransfer - } - return Default_FsServerDefaultsProto_EncryptDataTransfer -} - -func (m *FsServerDefaultsProto) GetTrashInterval() uint64 { - if m != nil && m.TrashInterval != nil { - return *m.TrashInterval - } - return Default_FsServerDefaultsProto_TrashInterval -} - -func (m *FsServerDefaultsProto) GetChecksumType() ChecksumTypeProto { - if m != nil && m.ChecksumType != nil { - return *m.ChecksumType - } - return Default_FsServerDefaultsProto_ChecksumType -} - -// * -// Directory listing -type DirectoryListingProto struct { - PartialListing []*HdfsFileStatusProto `protobuf:"bytes,1,rep,name=partialListing" json:"partialListing,omitempty"` - RemainingEntries *uint32 `protobuf:"varint,2,req,name=remainingEntries" json:"remainingEntries,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DirectoryListingProto) Reset() { *m = DirectoryListingProto{} } -func (m *DirectoryListingProto) String() string { return proto.CompactTextString(m) } -func (*DirectoryListingProto) ProtoMessage() {} -func (*DirectoryListingProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{27} } - -func (m *DirectoryListingProto) GetPartialListing() []*HdfsFileStatusProto { - if m != nil { - return m.PartialListing - } - return nil -} - -func (m *DirectoryListingProto) GetRemainingEntries() uint32 { - if m != nil && m.RemainingEntries != nil { - return *m.RemainingEntries - } - return 0 -} - -// * -// Status of a snapshottable directory: besides the normal information for -// a directory status, also include snapshot quota, number of snapshots, and -// the full path of the parent directory. -type SnapshottableDirectoryStatusProto struct { - DirStatus *HdfsFileStatusProto `protobuf:"bytes,1,req,name=dirStatus" json:"dirStatus,omitempty"` - // Fields specific for snapshottable directory - SnapshotQuota *uint32 `protobuf:"varint,2,req,name=snapshot_quota,json=snapshotQuota" json:"snapshot_quota,omitempty"` - SnapshotNumber *uint32 `protobuf:"varint,3,req,name=snapshot_number,json=snapshotNumber" json:"snapshot_number,omitempty"` - ParentFullpath []byte `protobuf:"bytes,4,req,name=parent_fullpath,json=parentFullpath" json:"parent_fullpath,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshottableDirectoryStatusProto) Reset() { *m = SnapshottableDirectoryStatusProto{} } -func (m *SnapshottableDirectoryStatusProto) String() string { return proto.CompactTextString(m) } -func (*SnapshottableDirectoryStatusProto) ProtoMessage() {} -func (*SnapshottableDirectoryStatusProto) Descriptor() ([]byte, []int) { - return fileDescriptor8, []int{28} -} - -func (m *SnapshottableDirectoryStatusProto) GetDirStatus() *HdfsFileStatusProto { - if m != nil { - return m.DirStatus - } - return nil -} - -func (m *SnapshottableDirectoryStatusProto) GetSnapshotQuota() uint32 { - if m != nil && m.SnapshotQuota != nil { - return *m.SnapshotQuota - } - return 0 -} - -func (m *SnapshottableDirectoryStatusProto) GetSnapshotNumber() uint32 { - if m != nil && m.SnapshotNumber != nil { - return *m.SnapshotNumber - } - return 0 -} - -func (m *SnapshottableDirectoryStatusProto) GetParentFullpath() []byte { - if m != nil { - return m.ParentFullpath - } - return nil -} - -// * -// Snapshottable directory listing -type SnapshottableDirectoryListingProto struct { - SnapshottableDirListing []*SnapshottableDirectoryStatusProto `protobuf:"bytes,1,rep,name=snapshottableDirListing" json:"snapshottableDirListing,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshottableDirectoryListingProto) Reset() { *m = SnapshottableDirectoryListingProto{} } -func (m *SnapshottableDirectoryListingProto) String() string { return proto.CompactTextString(m) } -func (*SnapshottableDirectoryListingProto) ProtoMessage() {} -func (*SnapshottableDirectoryListingProto) Descriptor() ([]byte, []int) { - return fileDescriptor8, []int{29} -} - -func (m *SnapshottableDirectoryListingProto) GetSnapshottableDirListing() []*SnapshottableDirectoryStatusProto { - if m != nil { - return m.SnapshottableDirListing - } - return nil -} - -// * -// Snapshot diff report entry -type SnapshotDiffReportEntryProto struct { - Fullpath []byte `protobuf:"bytes,1,req,name=fullpath" json:"fullpath,omitempty"` - ModificationLabel *string `protobuf:"bytes,2,req,name=modificationLabel" json:"modificationLabel,omitempty"` - TargetPath []byte `protobuf:"bytes,3,opt,name=targetPath" json:"targetPath,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshotDiffReportEntryProto) Reset() { *m = SnapshotDiffReportEntryProto{} } -func (m *SnapshotDiffReportEntryProto) String() string { return proto.CompactTextString(m) } -func (*SnapshotDiffReportEntryProto) ProtoMessage() {} -func (*SnapshotDiffReportEntryProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{30} } - -func (m *SnapshotDiffReportEntryProto) GetFullpath() []byte { - if m != nil { - return m.Fullpath - } - return nil -} - -func (m *SnapshotDiffReportEntryProto) GetModificationLabel() string { - if m != nil && m.ModificationLabel != nil { - return *m.ModificationLabel - } - return "" -} - -func (m *SnapshotDiffReportEntryProto) GetTargetPath() []byte { - if m != nil { - return m.TargetPath - } - return nil -} - -// * -// Snapshot diff report -type SnapshotDiffReportProto struct { - // full path of the directory where snapshots were taken - SnapshotRoot *string `protobuf:"bytes,1,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - FromSnapshot *string `protobuf:"bytes,2,req,name=fromSnapshot" json:"fromSnapshot,omitempty"` - ToSnapshot *string `protobuf:"bytes,3,req,name=toSnapshot" json:"toSnapshot,omitempty"` - DiffReportEntries []*SnapshotDiffReportEntryProto `protobuf:"bytes,4,rep,name=diffReportEntries" json:"diffReportEntries,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshotDiffReportProto) Reset() { *m = SnapshotDiffReportProto{} } -func (m *SnapshotDiffReportProto) String() string { return proto.CompactTextString(m) } -func (*SnapshotDiffReportProto) ProtoMessage() {} -func (*SnapshotDiffReportProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{31} } - -func (m *SnapshotDiffReportProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -func (m *SnapshotDiffReportProto) GetFromSnapshot() string { - if m != nil && m.FromSnapshot != nil { - return *m.FromSnapshot - } - return "" -} - -func (m *SnapshotDiffReportProto) GetToSnapshot() string { - if m != nil && m.ToSnapshot != nil { - return *m.ToSnapshot - } - return "" -} - -func (m *SnapshotDiffReportProto) GetDiffReportEntries() []*SnapshotDiffReportEntryProto { - if m != nil { - return m.DiffReportEntries - } - return nil -} - -// * -// Block information -// -// Please be wary of adding additional fields here, since INodeFiles -// need to fit in PB's default max message size of 64MB. -// We restrict the max # of blocks per file -// (dfs.namenode.fs-limits.max-blocks-per-file), but it's better -// to avoid changing this. -type BlockProto struct { - BlockId *uint64 `protobuf:"varint,1,req,name=blockId" json:"blockId,omitempty"` - GenStamp *uint64 `protobuf:"varint,2,req,name=genStamp" json:"genStamp,omitempty"` - NumBytes *uint64 `protobuf:"varint,3,opt,name=numBytes,def=0" json:"numBytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *BlockProto) Reset() { *m = BlockProto{} } -func (m *BlockProto) String() string { return proto.CompactTextString(m) } -func (*BlockProto) ProtoMessage() {} -func (*BlockProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{32} } - -const Default_BlockProto_NumBytes uint64 = 0 - -func (m *BlockProto) GetBlockId() uint64 { - if m != nil && m.BlockId != nil { - return *m.BlockId - } - return 0 -} - -func (m *BlockProto) GetGenStamp() uint64 { - if m != nil && m.GenStamp != nil { - return *m.GenStamp - } - return 0 -} - -func (m *BlockProto) GetNumBytes() uint64 { - if m != nil && m.NumBytes != nil { - return *m.NumBytes - } - return Default_BlockProto_NumBytes -} - -// * -// Information related to a snapshot -// TODO: add more information -type SnapshotInfoProto struct { - SnapshotName *string `protobuf:"bytes,1,req,name=snapshotName" json:"snapshotName,omitempty"` - SnapshotRoot *string `protobuf:"bytes,2,req,name=snapshotRoot" json:"snapshotRoot,omitempty"` - Permission *FsPermissionProto `protobuf:"bytes,3,req,name=permission" json:"permission,omitempty"` - Owner *string `protobuf:"bytes,4,req,name=owner" json:"owner,omitempty"` - Group *string `protobuf:"bytes,5,req,name=group" json:"group,omitempty"` - CreateTime *string `protobuf:"bytes,6,req,name=createTime" json:"createTime,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshotInfoProto) Reset() { *m = SnapshotInfoProto{} } -func (m *SnapshotInfoProto) String() string { return proto.CompactTextString(m) } -func (*SnapshotInfoProto) ProtoMessage() {} -func (*SnapshotInfoProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{33} } - -func (m *SnapshotInfoProto) GetSnapshotName() string { - if m != nil && m.SnapshotName != nil { - return *m.SnapshotName - } - return "" -} - -func (m *SnapshotInfoProto) GetSnapshotRoot() string { - if m != nil && m.SnapshotRoot != nil { - return *m.SnapshotRoot - } - return "" -} - -func (m *SnapshotInfoProto) GetPermission() *FsPermissionProto { - if m != nil { - return m.Permission - } - return nil -} - -func (m *SnapshotInfoProto) GetOwner() string { - if m != nil && m.Owner != nil { - return *m.Owner - } - return "" -} - -func (m *SnapshotInfoProto) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *SnapshotInfoProto) GetCreateTime() string { - if m != nil && m.CreateTime != nil { - return *m.CreateTime - } - return "" -} - -// * -// Rolling upgrade status -type RollingUpgradeStatusProto struct { - BlockPoolId *string `protobuf:"bytes,1,req,name=blockPoolId" json:"blockPoolId,omitempty"` - Finalized *bool `protobuf:"varint,2,opt,name=finalized,def=0" json:"finalized,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RollingUpgradeStatusProto) Reset() { *m = RollingUpgradeStatusProto{} } -func (m *RollingUpgradeStatusProto) String() string { return proto.CompactTextString(m) } -func (*RollingUpgradeStatusProto) ProtoMessage() {} -func (*RollingUpgradeStatusProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{34} } - -const Default_RollingUpgradeStatusProto_Finalized bool = false - -func (m *RollingUpgradeStatusProto) GetBlockPoolId() string { - if m != nil && m.BlockPoolId != nil { - return *m.BlockPoolId - } - return "" -} - -func (m *RollingUpgradeStatusProto) GetFinalized() bool { - if m != nil && m.Finalized != nil { - return *m.Finalized - } - return Default_RollingUpgradeStatusProto_Finalized -} - -// * -// A list of storage IDs. -type StorageUuidsProto struct { - StorageUuids []string `protobuf:"bytes,1,rep,name=storageUuids" json:"storageUuids,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StorageUuidsProto) Reset() { *m = StorageUuidsProto{} } -func (m *StorageUuidsProto) String() string { return proto.CompactTextString(m) } -func (*StorageUuidsProto) ProtoMessage() {} -func (*StorageUuidsProto) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{35} } - -func (m *StorageUuidsProto) GetStorageUuids() []string { - if m != nil { - return m.StorageUuids - } - return nil -} - -func init() { - proto.RegisterType((*ExtendedBlockProto)(nil), "hadoop.hdfs.ExtendedBlockProto") - proto.RegisterType((*DatanodeIDProto)(nil), "hadoop.hdfs.DatanodeIDProto") - proto.RegisterType((*DatanodeLocalInfoProto)(nil), "hadoop.hdfs.DatanodeLocalInfoProto") - proto.RegisterType((*DatanodeInfosProto)(nil), "hadoop.hdfs.DatanodeInfosProto") - proto.RegisterType((*DatanodeInfoProto)(nil), "hadoop.hdfs.DatanodeInfoProto") - proto.RegisterType((*DatanodeStorageProto)(nil), "hadoop.hdfs.DatanodeStorageProto") - proto.RegisterType((*StorageReportProto)(nil), "hadoop.hdfs.StorageReportProto") - proto.RegisterType((*ContentSummaryProto)(nil), "hadoop.hdfs.ContentSummaryProto") - proto.RegisterType((*QuotaUsageProto)(nil), "hadoop.hdfs.QuotaUsageProto") - proto.RegisterType((*StorageTypeQuotaInfosProto)(nil), "hadoop.hdfs.StorageTypeQuotaInfosProto") - proto.RegisterType((*StorageTypeQuotaInfoProto)(nil), "hadoop.hdfs.StorageTypeQuotaInfoProto") - proto.RegisterType((*CorruptFileBlocksProto)(nil), "hadoop.hdfs.CorruptFileBlocksProto") - proto.RegisterType((*FsPermissionProto)(nil), "hadoop.hdfs.FsPermissionProto") - proto.RegisterType((*StorageTypesProto)(nil), "hadoop.hdfs.StorageTypesProto") - proto.RegisterType((*BlockStoragePolicyProto)(nil), "hadoop.hdfs.BlockStoragePolicyProto") - proto.RegisterType((*LocatedBlockProto)(nil), "hadoop.hdfs.LocatedBlockProto") - proto.RegisterType((*DataEncryptionKeyProto)(nil), "hadoop.hdfs.DataEncryptionKeyProto") - proto.RegisterType((*FileEncryptionInfoProto)(nil), "hadoop.hdfs.FileEncryptionInfoProto") - proto.RegisterType((*PerFileEncryptionInfoProto)(nil), "hadoop.hdfs.PerFileEncryptionInfoProto") - proto.RegisterType((*ZoneEncryptionInfoProto)(nil), "hadoop.hdfs.ZoneEncryptionInfoProto") - proto.RegisterType((*CipherOptionProto)(nil), "hadoop.hdfs.CipherOptionProto") - proto.RegisterType((*LocatedBlocksProto)(nil), "hadoop.hdfs.LocatedBlocksProto") - proto.RegisterType((*ECSchemaOptionEntryProto)(nil), "hadoop.hdfs.ECSchemaOptionEntryProto") - proto.RegisterType((*ECSchemaProto)(nil), "hadoop.hdfs.ECSchemaProto") - proto.RegisterType((*ErasureCodingPolicyProto)(nil), "hadoop.hdfs.ErasureCodingPolicyProto") - proto.RegisterType((*HdfsFileStatusProto)(nil), "hadoop.hdfs.HdfsFileStatusProto") - proto.RegisterType((*FsServerDefaultsProto)(nil), "hadoop.hdfs.FsServerDefaultsProto") - proto.RegisterType((*DirectoryListingProto)(nil), "hadoop.hdfs.DirectoryListingProto") - proto.RegisterType((*SnapshottableDirectoryStatusProto)(nil), "hadoop.hdfs.SnapshottableDirectoryStatusProto") - proto.RegisterType((*SnapshottableDirectoryListingProto)(nil), "hadoop.hdfs.SnapshottableDirectoryListingProto") - proto.RegisterType((*SnapshotDiffReportEntryProto)(nil), "hadoop.hdfs.SnapshotDiffReportEntryProto") - proto.RegisterType((*SnapshotDiffReportProto)(nil), "hadoop.hdfs.SnapshotDiffReportProto") - proto.RegisterType((*BlockProto)(nil), "hadoop.hdfs.BlockProto") - proto.RegisterType((*SnapshotInfoProto)(nil), "hadoop.hdfs.SnapshotInfoProto") - proto.RegisterType((*RollingUpgradeStatusProto)(nil), "hadoop.hdfs.RollingUpgradeStatusProto") - proto.RegisterType((*StorageUuidsProto)(nil), "hadoop.hdfs.StorageUuidsProto") - proto.RegisterEnum("hadoop.hdfs.StorageTypeProto", StorageTypeProto_name, StorageTypeProto_value) - proto.RegisterEnum("hadoop.hdfs.CipherSuiteProto", CipherSuiteProto_name, CipherSuiteProto_value) - proto.RegisterEnum("hadoop.hdfs.CryptoProtocolVersionProto", CryptoProtocolVersionProto_name, CryptoProtocolVersionProto_value) - proto.RegisterEnum("hadoop.hdfs.ChecksumTypeProto", ChecksumTypeProto_name, ChecksumTypeProto_value) - proto.RegisterEnum("hadoop.hdfs.DatanodeInfoProto_AdminState", DatanodeInfoProto_AdminState_name, DatanodeInfoProto_AdminState_value) - proto.RegisterEnum("hadoop.hdfs.DatanodeStorageProto_StorageState", DatanodeStorageProto_StorageState_name, DatanodeStorageProto_StorageState_value) - proto.RegisterEnum("hadoop.hdfs.HdfsFileStatusProto_FileType", HdfsFileStatusProto_FileType_name, HdfsFileStatusProto_FileType_value) -} - -func init() { proto.RegisterFile("hdfs.proto", fileDescriptor8) } - -var fileDescriptor8 = []byte{ - // 2923 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x73, 0x1b, 0xc7, - 0xb1, 0xf7, 0x2e, 0x00, 0x12, 0x68, 0x92, 0x20, 0x38, 0xfa, 0x82, 0x68, 0x59, 0xa6, 0xf7, 0x49, - 0x16, 0xad, 0x67, 0xb3, 0x6c, 0xea, 0x95, 0x5d, 0x4f, 0xf6, 0xf3, 0x0b, 0x09, 0x80, 0x16, 0x4a, - 0x10, 0x40, 0x0f, 0x48, 0xb9, 0xec, 0x4a, 0x0a, 0xb5, 0xdc, 0x1d, 0x10, 0x1b, 0x2e, 0x76, 0x36, - 0xbb, 0x0b, 0x5a, 0xf0, 0x29, 0xc7, 0x54, 0xa5, 0x92, 0x9c, 0x72, 0xcb, 0xc1, 0x55, 0xc9, 0x39, - 0xff, 0x86, 0xff, 0x87, 0x54, 0x2a, 0xc7, 0xe4, 0x9a, 0x43, 0xee, 0x49, 0x4d, 0xcf, 0xec, 0x17, - 0x3e, 0x44, 0xc5, 0x3e, 0xe5, 0x86, 0xfe, 0x4d, 0x77, 0xef, 0x74, 0xcf, 0xf4, 0xc7, 0x34, 0x00, - 0x46, 0xf6, 0x30, 0xdc, 0xf3, 0x03, 0x1e, 0x71, 0xb2, 0x36, 0x32, 0x6d, 0xce, 0xfd, 0x3d, 0x01, - 0x6d, 0x57, 0xfb, 0xcc, 0x9a, 0x04, 0x4e, 0x34, 0x95, 0x8b, 0xc6, 0x6f, 0x34, 0x20, 0xad, 0x17, - 0x11, 0xf3, 0x6c, 0x66, 0x1f, 0xba, 0xdc, 0xba, 0x38, 0x46, 0x99, 0x9b, 0xb0, 0xe2, 0x73, 0xee, - 0xb6, 0xed, 0xba, 0xb6, 0xa3, 0xef, 0x56, 0xa8, 0xa2, 0x48, 0x1d, 0x56, 0xcf, 0x04, 0x57, 0xdb, - 0xae, 0xeb, 0x3b, 0xfa, 0x6e, 0x91, 0xc6, 0x24, 0xd9, 0x85, 0xcd, 0x73, 0xe6, 0xb1, 0xc0, 0x8c, - 0x1c, 0xee, 0xf5, 0x23, 0x73, 0xec, 0xd7, 0x0b, 0xc8, 0x31, 0x0b, 0x93, 0x37, 0xa0, 0xec, 0x4d, - 0xc6, 0x87, 0xd3, 0x88, 0x85, 0xf5, 0xe2, 0x8e, 0xb6, 0x5b, 0x7c, 0xac, 0xbd, 0x4f, 0x13, 0xc8, - 0xf8, 0xab, 0x06, 0x9b, 0x4d, 0x33, 0x32, 0x3d, 0x6e, 0xb3, 0x76, 0x33, 0xd9, 0x8e, 0xe3, 0x1f, - 0xd8, 0x76, 0x10, 0x6f, 0x47, 0x52, 0x64, 0x1b, 0xca, 0x23, 0x1e, 0x46, 0x5d, 0x73, 0xcc, 0x70, - 0x3f, 0x15, 0x9a, 0xd0, 0xc4, 0x80, 0x75, 0x5b, 0xa9, 0x39, 0x9d, 0x38, 0x36, 0xee, 0xa6, 0x42, - 0x73, 0x98, 0x90, 0x7f, 0x31, 0x64, 0xc1, 0x31, 0x0f, 0xa2, 0x7a, 0x71, 0x47, 0xdf, 0xdd, 0xa0, - 0x09, 0x2d, 0xd6, 0x1c, 0x6f, 0xc8, 0x71, 0xad, 0x24, 0xd7, 0x62, 0x5a, 0xb8, 0xc1, 0xf1, 0x2d, - 0x5c, 0x5a, 0xc1, 0xa5, 0x98, 0x24, 0xef, 0x40, 0x55, 0x70, 0xa1, 0x97, 0x19, 0x32, 0xac, 0xee, - 0x68, 0xbb, 0x1b, 0xc2, 0xc4, 0x99, 0x05, 0xe3, 0xe7, 0x1a, 0xdc, 0x8c, 0x0d, 0xed, 0x70, 0xcb, - 0x74, 0xdb, 0x42, 0x3d, 0xda, 0xbb, 0x0b, 0x9b, 0x21, 0x1f, 0x46, 0x5f, 0x9b, 0x01, 0x7b, 0xce, - 0x82, 0xd0, 0xe1, 0x9e, 0x32, 0x7c, 0x16, 0x26, 0xf7, 0x60, 0xc3, 0xe2, 0xde, 0xd0, 0x39, 0x8f, - 0xf9, 0xa4, 0x1b, 0xf2, 0xa0, 0xf0, 0xdf, 0xc4, 0x8f, 0x9c, 0x31, 0x53, 0x67, 0xa2, 0x28, 0x83, - 0x02, 0x49, 0x5c, 0xed, 0x0d, 0x79, 0x28, 0xbf, 0xfe, 0x09, 0x54, 0x62, 0x2f, 0x85, 0x75, 0x6d, - 0xa7, 0xb0, 0xbb, 0xb6, 0x7f, 0x77, 0x2f, 0x73, 0x89, 0xf6, 0xb2, 0x32, 0x28, 0x42, 0x53, 0x01, - 0xe3, 0xef, 0x45, 0xd8, 0x9a, 0x63, 0x20, 0xef, 0x82, 0xee, 0xc8, 0xcb, 0xb4, 0xb6, 0x7f, 0x67, - 0xb1, 0x32, 0x79, 0xd6, 0x54, 0x77, 0x6c, 0x71, 0x45, 0x2c, 0xd3, 0x37, 0x2d, 0x27, 0x9a, 0xd6, - 0xf5, 0xe4, 0x8a, 0xc4, 0x10, 0x79, 0x1d, 0x56, 0xed, 0x61, 0x78, 0x1a, 0x32, 0x71, 0xaa, 0x6a, - 0x35, 0x46, 0xc8, 0x9b, 0x50, 0x09, 0xd8, 0xd8, 0x74, 0x3c, 0xc7, 0x3b, 0x4f, 0xef, 0x57, 0x8a, - 0x91, 0x07, 0xb0, 0x81, 0x97, 0xf6, 0x98, 0x73, 0x17, 0x75, 0x94, 0x62, 0xa6, 0x3c, 0x4e, 0xde, - 0x02, 0x70, 0xcd, 0x30, 0x3a, 0xf5, 0x6d, 0x33, 0x62, 0xf5, 0x95, 0x98, 0x2b, 0x03, 0x92, 0xfb, - 0xb0, 0xfe, 0xc2, 0x62, 0xce, 0x25, 0x0b, 0x1a, 0x7c, 0xe2, 0x65, 0x0e, 0x3b, 0x07, 0x8b, 0xbb, - 0xe4, 0x72, 0x0b, 0x63, 0xa0, 0x5e, 0xde, 0xd1, 0xc4, 0x3d, 0x8d, 0x69, 0xf2, 0x39, 0x80, 0x69, - 0x8f, 0x1d, 0x11, 0x1c, 0x11, 0xab, 0xc3, 0x8e, 0xb6, 0x5b, 0xdd, 0x7f, 0xe7, 0xe5, 0xee, 0xde, - 0x3b, 0x48, 0x04, 0x1e, 0xaf, 0x74, 0x7b, 0xf4, 0xd9, 0x41, 0x87, 0x66, 0x94, 0x08, 0x0b, 0x2d, - 0xd3, 0x1a, 0xb1, 0x46, 0xec, 0xc3, 0xb5, 0xc4, 0xc2, 0x1c, 0x2e, 0x7c, 0x85, 0x00, 0xba, 0x61, - 0x3d, 0xf1, 0x55, 0x82, 0x91, 0x47, 0x70, 0x2d, 0xb5, 0xf6, 0x19, 0xf7, 0x78, 0xc4, 0x3d, 0xc7, - 0xaa, 0x6f, 0xc4, 0xac, 0x8b, 0x56, 0xc5, 0x9d, 0x9c, 0xf8, 0xe7, 0x81, 0x69, 0xb3, 0x26, 0x17, - 0x4e, 0xaf, 0x57, 0xd1, 0xe4, 0x3c, 0x68, 0xb4, 0x01, 0x52, 0x33, 0x08, 0x80, 0x32, 0xa4, 0xf6, - 0x1a, 0x79, 0x1d, 0x6e, 0x35, 0x5b, 0x8d, 0xde, 0xb3, 0x67, 0xed, 0x7e, 0xbf, 0xdd, 0xeb, 0x0e, - 0xda, 0xdd, 0x63, 0xda, 0xfb, 0x8c, 0xb6, 0xfa, 0xfd, 0x9a, 0x46, 0x08, 0x54, 0xb3, 0x8b, 0xad, - 0x66, 0x4d, 0x37, 0xfe, 0xa9, 0xc1, 0xf5, 0xd8, 0x49, 0xfd, 0x88, 0x07, 0xe6, 0x39, 0x93, 0xb7, - 0x6e, 0x07, 0xd6, 0x42, 0x49, 0x63, 0x0a, 0x90, 0x31, 0x94, 0x85, 0x48, 0x07, 0x4a, 0x21, 0x3a, - 0x5e, 0x47, 0xc7, 0xef, 0x2d, 0x74, 0x7c, 0x56, 0xe7, 0x9e, 0x22, 0xf2, 0xde, 0x97, 0x4a, 0x48, - 0x2b, 0xf9, 0xde, 0xc9, 0xd4, 0x67, 0x78, 0x39, 0xab, 0xfb, 0x6f, 0xe4, 0x74, 0xf6, 0xd3, 0x75, - 0xd4, 0xf7, 0xb8, 0xd8, 0x6c, 0xf7, 0x9f, 0xd2, 0xac, 0x9c, 0xf1, 0x3e, 0xac, 0x67, 0xbf, 0x92, - 0x73, 0xce, 0x75, 0xa8, 0xd1, 0xd6, 0x41, 0x73, 0xd0, 0xeb, 0x76, 0xbe, 0x1c, 0xf4, 0x9f, 0x1c, - 0xd0, 0x56, 0xb3, 0xa6, 0x19, 0xbf, 0xd3, 0x81, 0x28, 0x11, 0xca, 0x7c, 0x1e, 0x44, 0xd2, 0xfe, - 0x7b, 0x0b, 0xec, 0x3f, 0xd4, 0xeb, 0x5a, 0xde, 0x07, 0x6f, 0xc0, 0xca, 0xd0, 0x74, 0x5c, 0x66, - 0xa3, 0x13, 0xca, 0x8f, 0x4b, 0x43, 0xd3, 0x0d, 0x19, 0x55, 0x60, 0x2e, 0x18, 0x0b, 0x2f, 0x0d, - 0xc6, 0xe2, 0xcb, 0x83, 0xb1, 0xf4, 0x2a, 0xc1, 0xb8, 0xb2, 0x24, 0x18, 0x3f, 0x86, 0x55, 0xb5, - 0x67, 0x0c, 0xb2, 0xb5, 0xfd, 0xb7, 0xae, 0x3c, 0x2a, 0x1a, 0x4b, 0x18, 0xdf, 0xea, 0x70, 0xad, - 0xc1, 0xbd, 0x88, 0x79, 0x51, 0x7f, 0x32, 0x1e, 0x9b, 0xc1, 0x34, 0xa9, 0x2b, 0x2e, 0xf3, 0xce, - 0xa3, 0x11, 0xba, 0xa6, 0x48, 0x15, 0x45, 0xee, 0x40, 0x65, 0xe8, 0xb8, 0x4c, 0xc6, 0xb4, 0x2c, - 0x74, 0x29, 0x40, 0xde, 0x86, 0xaa, 0xed, 0x04, 0xcc, 0x8a, 0x78, 0x30, 0x95, 0x2c, 0x32, 0xab, - 0xce, 0xa0, 0xe4, 0x3a, 0x94, 0x7e, 0x36, 0xe1, 0x91, 0x89, 0xa5, 0xa5, 0x48, 0x25, 0x21, 0xa2, - 0x23, 0xf4, 0x4d, 0x8b, 0x35, 0xb8, 0x17, 0x4e, 0xc6, 0x98, 0x7e, 0xc4, 0x6a, 0x1e, 0x24, 0x77, - 0x01, 0x10, 0xf8, 0x1c, 0x15, 0xac, 0x20, 0x4b, 0x06, 0x21, 0x3d, 0xa8, 0x46, 0x53, 0x5f, 0x12, - 0x98, 0xba, 0x95, 0x57, 0x1e, 0x2c, 0xbb, 0x6c, 0x29, 0xa7, 0xf4, 0xcd, 0x8c, 0xb8, 0xf1, 0x0f, - 0x0d, 0x36, 0x91, 0x3c, 0x0d, 0x93, 0xf0, 0xf9, 0x1f, 0xb8, 0x21, 0xac, 0x3e, 0xf0, 0xec, 0x66, - 0xde, 0x5e, 0xe9, 0xad, 0xc5, 0x8b, 0xa9, 0xd9, 0xfa, 0x4b, 0xcd, 0x2e, 0x5c, 0x6d, 0x76, 0xf1, - 0x15, 0xcc, 0x2e, 0xfd, 0x30, 0xb3, 0x7f, 0x0a, 0xdb, 0xcb, 0xb9, 0x49, 0x07, 0x36, 0x72, 0xfc, - 0xaa, 0x1a, 0xbe, 0x7d, 0xe5, 0xd7, 0xe4, 0xc7, 0xf2, 0xc2, 0xa2, 0xe0, 0xdf, 0x5e, 0xca, 0x4c, - 0x3e, 0x80, 0xa2, 0x60, 0x47, 0xdf, 0x5e, 0x95, 0x34, 0x28, 0xb2, 0x2e, 0xf1, 0xf4, 0x36, 0x94, - 0xad, 0xbc, 0x93, 0x13, 0xda, 0x38, 0x82, 0x9b, 0x0d, 0x1e, 0x04, 0x13, 0x3f, 0x3a, 0x72, 0x5c, - 0x86, 0x0d, 0x9f, 0x32, 0xf5, 0x3a, 0x94, 0xc4, 0x71, 0xca, 0x82, 0x5f, 0xa1, 0x92, 0x10, 0x01, - 0x62, 0x71, 0x7e, 0xe1, 0xc4, 0xed, 0x95, 0xa2, 0x8c, 0x07, 0xb0, 0x75, 0x14, 0x1e, 0xb3, 0x60, - 0xec, 0x84, 0xa2, 0xc1, 0x90, 0x2a, 0x08, 0x14, 0x7d, 0x16, 0x8c, 0xd1, 0x82, 0x0d, 0x8a, 0xbf, - 0x8d, 0xe7, 0xb0, 0x95, 0xd9, 0xbc, 0xfa, 0xd6, 0x01, 0xac, 0x67, 0xd2, 0x9d, 0xfc, 0xe4, 0x95, - 0x26, 0xe7, 0x44, 0x8c, 0xef, 0x74, 0xb8, 0x85, 0xdb, 0x8f, 0x03, 0x9e, 0xbb, 0x8e, 0xa5, 0xa2, - 0x7a, 0x1b, 0xca, 0x3e, 0x92, 0xaa, 0x7d, 0xdd, 0xa0, 0x09, 0x2d, 0xf6, 0xe8, 0xa5, 0xdd, 0x22, - 0xfe, 0x26, 0x47, 0x50, 0xb5, 0x02, 0x86, 0xd5, 0x58, 0xaa, 0x41, 0xb7, 0xcd, 0x36, 0x3d, 0x73, - 0x66, 0xd0, 0x19, 0x29, 0xf2, 0x1c, 0x6e, 0xc6, 0xc8, 0x91, 0xe9, 0xba, 0x67, 0xa6, 0x48, 0x5f, - 0xa8, 0xaf, 0x88, 0x97, 0xf4, 0x2a, 0x7d, 0x4b, 0xa4, 0xc9, 0x8f, 0xe1, 0x76, 0xc0, 0x7c, 0xd7, - 0xb1, 0x16, 0xa9, 0x2e, 0xbd, 0x92, 0xea, 0xe5, 0x0a, 0x8c, 0xef, 0x0a, 0xb0, 0x25, 0xda, 0xcf, - 0x28, 0xf7, 0x00, 0x78, 0x0f, 0xb4, 0x33, 0xd5, 0xae, 0xbd, 0x99, 0xd3, 0x3d, 0xff, 0x58, 0xa0, - 0xda, 0x99, 0xb8, 0x27, 0x7c, 0x38, 0x0c, 0x59, 0x9c, 0x2d, 0x15, 0x45, 0xf6, 0xa1, 0xe8, 0x72, - 0x2b, 0xac, 0x17, 0x5e, 0xa9, 0x8b, 0x44, 0x5e, 0xd1, 0x5c, 0x5b, 0xf2, 0x8e, 0x62, 0x02, 0x28, - 0xd3, 0x98, 0x24, 0xff, 0x0b, 0x80, 0x45, 0xe1, 0x84, 0x5f, 0x30, 0x0f, 0xf3, 0xe6, 0xda, 0xfe, - 0xed, 0x58, 0xa7, 0xc5, 0xc7, 0x63, 0xee, 0xed, 0xe1, 0x9a, 0x54, 0x97, 0x61, 0x26, 0x77, 0xa1, - 0xec, 0x84, 0x0d, 0xd1, 0xd7, 0x88, 0x12, 0x53, 0xd8, 0x2d, 0x1f, 0xea, 0x35, 0x8d, 0x26, 0xd8, - 0xdc, 0x95, 0x5c, 0xfd, 0xb7, 0xaf, 0x24, 0xe6, 0x2e, 0x49, 0xb7, 0x9b, 0x61, 0xbd, 0x8c, 0x61, - 0x94, 0x41, 0xc4, 0x83, 0x44, 0x3e, 0x96, 0x3c, 0xdb, 0xb1, 0x58, 0x58, 0xaf, 0xec, 0x68, 0xbb, - 0xeb, 0x34, 0x87, 0x91, 0x8f, 0x61, 0x2d, 0xdd, 0x74, 0x58, 0x07, 0x74, 0xdb, 0x4b, 0x4c, 0xcc, - 0x72, 0x1b, 0x7f, 0x51, 0x0f, 0x8a, 0x96, 0x67, 0x05, 0x53, 0x5f, 0x1c, 0xf5, 0x53, 0x36, 0x4d, - 0xa2, 0xfb, 0x82, 0xa5, 0xf1, 0x20, 0x09, 0xd1, 0x1e, 0x25, 0x45, 0x56, 0xbd, 0xe8, 0x2a, 0x34, - 0x0b, 0x09, 0x39, 0x8f, 0x7b, 0x96, 0x7c, 0x37, 0xac, 0x53, 0x49, 0x88, 0x5c, 0xce, 0xb2, 0xdf, - 0xc0, 0x73, 0x5a, 0xa7, 0x79, 0x50, 0xf8, 0x83, 0xbd, 0xf0, 0x9d, 0x60, 0xda, 0x14, 0xfd, 0x95, - 0xac, 0x72, 0x19, 0x84, 0xbc, 0x0f, 0xd7, 0x52, 0x81, 0x03, 0xf7, 0x9c, 0x07, 0x4e, 0x34, 0x1a, - 0x63, 0x03, 0x50, 0xa1, 0x8b, 0x96, 0x8c, 0xdf, 0xea, 0x70, 0x4b, 0xe4, 0xad, 0xd4, 0xc0, 0x34, - 0x7d, 0x3e, 0x82, 0x52, 0x38, 0x71, 0xa2, 0xc5, 0xf9, 0xb3, 0xe1, 0xf8, 0x23, 0x16, 0xf4, 0xc5, - 0xba, 0xf4, 0x9b, 0xe4, 0x25, 0x3f, 0x81, 0x1b, 0xa8, 0x49, 0xea, 0xb0, 0xb8, 0x9b, 0x7d, 0x45, - 0x55, 0x67, 0xaa, 0x4a, 0x63, 0x11, 0xa7, 0x54, 0xb7, 0x58, 0x0b, 0xa9, 0x41, 0xe1, 0x82, 0x4d, - 0x95, 0xef, 0xc4, 0x4f, 0x52, 0x05, 0xdd, 0xb9, 0x54, 0xee, 0xd2, 0x9d, 0x4b, 0x71, 0xd7, 0x2f, - 0xd8, 0x14, 0xdf, 0xaf, 0x25, 0xf4, 0x7e, 0x4c, 0x92, 0x87, 0x50, 0x63, 0xdf, 0x3c, 0x65, 0x53, - 0xa5, 0x0b, 0x59, 0x56, 0x90, 0x65, 0x0e, 0x17, 0x45, 0xec, 0x98, 0x05, 0xcb, 0x3c, 0xa3, 0x76, - 0xa1, 0xcd, 0xee, 0x42, 0x4f, 0x76, 0xb1, 0xe8, 0x5b, 0x85, 0x25, 0xdf, 0xfa, 0x4e, 0x83, 0x5b, - 0x5f, 0x71, 0xef, 0x3f, 0xe6, 0x0c, 0x32, 0x1e, 0x2e, 0xe4, 0x3c, 0x6c, 0x7c, 0xab, 0xc1, 0x96, - 0xdc, 0x54, 0x0f, 0xed, 0xf8, 0x01, 0x36, 0x5c, 0x87, 0x92, 0x83, 0x81, 0xa0, 0x63, 0x4c, 0x4b, - 0x42, 0xd4, 0x1a, 0xc7, 0x6b, 0x5f, 0x62, 0xd3, 0xbc, 0x4e, 0xf1, 0x37, 0x26, 0xca, 0x49, 0x24, - 0x63, 0x46, 0xa0, 0x8a, 0x12, 0x1a, 0xf8, 0x24, 0x6a, 0x5f, 0x62, 0x3e, 0x5f, 0xa7, 0x92, 0x30, - 0x7e, 0x5f, 0x00, 0x92, 0xcd, 0xcd, 0xaa, 0x7e, 0xde, 0x05, 0x10, 0xe5, 0xb9, 0x93, 0x6d, 0x5d, - 0x33, 0x08, 0xf9, 0x10, 0x56, 0x30, 0x88, 0xc3, 0xba, 0xbe, 0x20, 0xef, 0xce, 0x25, 0x7b, 0xaa, - 0xb8, 0xc9, 0xbb, 0xb0, 0x35, 0xf1, 0x6c, 0xf1, 0x68, 0xf5, 0xc2, 0x28, 0x98, 0x58, 0xf8, 0x5e, - 0x2d, 0x60, 0x0e, 0x9e, 0x5f, 0x20, 0x9f, 0x40, 0x45, 0xbc, 0xfe, 0x50, 0xcf, 0xc2, 0x0a, 0x37, - 0xff, 0xa1, 0x54, 0x40, 0x44, 0xbf, 0x13, 0x76, 0x62, 0xb2, 0xc1, 0xc7, 0xbe, 0xcb, 0x54, 0x9a, - 0x28, 0xd3, 0x45, 0x4b, 0xe4, 0x04, 0xc8, 0x70, 0xee, 0x8a, 0x63, 0xba, 0x58, 0xdb, 0xbf, 0x97, - 0xfb, 0xf0, 0x92, 0x48, 0xa0, 0x0b, 0xe4, 0xc9, 0x01, 0x94, 0x99, 0xa5, 0x6a, 0xa9, 0x6c, 0xa1, - 0xef, 0xe7, 0xeb, 0x5d, 0x60, 0x86, 0x93, 0x80, 0x35, 0xb8, 0xed, 0x78, 0xe7, 0x99, 0x2e, 0x83, - 0x26, 0x62, 0xc6, 0x21, 0xd4, 0x5b, 0x8d, 0xbe, 0x35, 0x62, 0x63, 0x53, 0xde, 0xa4, 0x96, 0x17, - 0xc5, 0x2f, 0x8c, 0x4c, 0xf0, 0x55, 0x64, 0xf0, 0x5d, 0x87, 0xd2, 0xa5, 0xe9, 0x4e, 0xe2, 0x16, - 0x44, 0x12, 0xc6, 0x1f, 0x35, 0xd8, 0x88, 0x95, 0x48, 0xc9, 0x3b, 0x50, 0xb1, 0xb8, 0xcd, 0x2c, - 0xbc, 0xba, 0x52, 0x3e, 0x05, 0xc4, 0xaa, 0x6d, 0x46, 0xe6, 0xa9, 0xe7, 0x44, 0x21, 0x6a, 0xda, - 0xa0, 0x29, 0x20, 0x12, 0xbb, 0x6f, 0x06, 0x4e, 0x34, 0x95, 0xeb, 0x05, 0x5c, 0xcf, 0x42, 0xe4, - 0xff, 0x61, 0x95, 0xe3, 0x5e, 0xc3, 0x7a, 0x11, 0xef, 0xc8, 0x8c, 0xd5, 0x4b, 0xec, 0xa1, 0xb1, - 0x94, 0xf1, 0x6b, 0x0d, 0xea, 0xcb, 0x7c, 0x93, 0x74, 0x59, 0x5a, 0xa6, 0xcb, 0xda, 0x87, 0x95, - 0x10, 0x75, 0xe2, 0x76, 0xd7, 0xf6, 0xb7, 0x17, 0x7e, 0x50, 0x5d, 0x48, 0xc9, 0x89, 0xad, 0x2c, - 0x73, 0xdd, 0xbe, 0xf3, 0x0d, 0x53, 0x46, 0x24, 0x34, 0x26, 0x31, 0x5b, 0x4d, 0xed, 0x74, 0xc7, - 0x36, 0xfe, 0xb0, 0x02, 0xd7, 0x9e, 0xd8, 0xc3, 0x50, 0x1c, 0xbe, 0x78, 0x36, 0x4f, 0x54, 0xb0, - 0xb4, 0xa0, 0x2c, 0x8e, 0xfd, 0x24, 0xed, 0xad, 0xf3, 0xd3, 0x95, 0x05, 0x32, 0x78, 0x81, 0x84, - 0x00, 0x4d, 0x44, 0xb1, 0xb9, 0x35, 0xa3, 0x91, 0xca, 0x9a, 0xf8, 0x3b, 0xf3, 0x7c, 0x2c, 0xe4, - 0x9e, 0x8f, 0x9f, 0x02, 0xf8, 0x49, 0x6f, 0x8c, 0x5b, 0x9c, 0x0d, 0x8d, 0xb9, 0xe6, 0x99, 0x66, - 0x24, 0x30, 0x19, 0x7c, 0xed, 0xb1, 0x40, 0xd5, 0x04, 0x49, 0x08, 0xf4, 0x3c, 0xe0, 0x13, 0x5f, - 0x95, 0x01, 0x49, 0x90, 0xff, 0x86, 0xad, 0x31, 0xb7, 0x9d, 0xa1, 0x6a, 0xee, 0x06, 0x38, 0xe5, - 0x5b, 0xc5, 0xed, 0xd4, 0xb2, 0x0b, 0x27, 0xce, 0x98, 0x91, 0x37, 0x61, 0xcd, 0xb4, 0x2c, 0x16, - 0x86, 0x92, 0xad, 0x2c, 0x33, 0x87, 0x84, 0x90, 0xa1, 0x0e, 0xab, 0xe1, 0x74, 0xec, 0x3a, 0xde, - 0x85, 0x6a, 0x4f, 0x62, 0x92, 0xec, 0xc1, 0x16, 0x66, 0x89, 0x41, 0xa6, 0x93, 0xc4, 0x69, 0x15, - 0x8e, 0xbb, 0x6a, 0xb8, 0x46, 0xd3, 0x25, 0xf1, 0xf2, 0x97, 0x59, 0x45, 0x9c, 0x5d, 0x32, 0x7f, - 0x4a, 0x31, 0xf2, 0x7f, 0x50, 0x89, 0x67, 0x60, 0x21, 0xce, 0x9e, 0x66, 0x3b, 0xcd, 0xf9, 0xc4, - 0x47, 0x53, 0x09, 0x72, 0x1b, 0x56, 0xc4, 0xd9, 0xb4, 0xed, 0x74, 0x18, 0xa5, 0x00, 0x72, 0x0f, - 0xd6, 0xac, 0x91, 0xe3, 0xda, 0x01, 0xf3, 0xba, 0x93, 0x31, 0x4e, 0x9f, 0x4a, 0x8f, 0xf5, 0xf7, - 0x3e, 0xa0, 0x59, 0x78, 0x49, 0x3a, 0xd9, 0xfc, 0x81, 0xe9, 0xe4, 0x01, 0x6c, 0x84, 0xd9, 0x17, - 0x49, 0xbd, 0x16, 0xbb, 0x28, 0x8f, 0xe7, 0xf2, 0xce, 0xd6, 0xf7, 0xcb, 0x3b, 0x8f, 0xa0, 0x1c, - 0x5f, 0x54, 0x02, 0xb0, 0xd2, 0xee, 0x0f, 0x9a, 0x6d, 0x5a, 0xd3, 0xc8, 0x1a, 0xac, 0xb6, 0xfb, - 0x83, 0xa3, 0x76, 0xa7, 0x55, 0xd3, 0x49, 0x15, 0xa0, 0xdd, 0x1f, 0xf4, 0xbf, 0x7c, 0xd6, 0x69, - 0x77, 0x9f, 0xd6, 0x0a, 0xc6, 0x2f, 0x0b, 0x70, 0xe3, 0x28, 0xec, 0xb3, 0xe0, 0x92, 0x05, 0x4d, - 0x36, 0x34, 0x27, 0x6e, 0x14, 0x26, 0x09, 0x07, 0x4f, 0x07, 0xa3, 0x4d, 0x16, 0x95, 0x14, 0x10, - 0x3d, 0xc2, 0xd9, 0x34, 0x62, 0xe2, 0xde, 0x36, 0x46, 0xcc, 0xba, 0x08, 0x27, 0x63, 0x95, 0x77, - 0xe6, 0x70, 0xb2, 0x0b, 0x9b, 0x5f, 0x07, 0xa2, 0x46, 0x9a, 0xd6, 0x05, 0x8b, 0x32, 0xd1, 0x3b, - 0x0b, 0x8b, 0x44, 0x95, 0xbd, 0x4f, 0x32, 0x9a, 0xb3, 0x10, 0x79, 0x1b, 0xaa, 0xc2, 0xcd, 0x87, - 0x93, 0xe1, 0x90, 0x05, 0xa8, 0x4a, 0x0e, 0xe3, 0x67, 0x50, 0xf2, 0x51, 0xd2, 0x4d, 0x8a, 0x16, - 0xf8, 0x24, 0x30, 0xbd, 0x70, 0xc8, 0x02, 0x2c, 0x0f, 0xc9, 0x44, 0x6b, 0x11, 0x87, 0x38, 0xb1, - 0x28, 0x30, 0xc3, 0x51, 0xdb, 0x8b, 0x58, 0x70, 0x69, 0xba, 0x58, 0x05, 0xe4, 0x04, 0x2a, 0x87, - 0x13, 0x0a, 0xeb, 0x96, 0xb2, 0x10, 0x93, 0x49, 0x19, 0xa7, 0x7b, 0xf9, 0xb8, 0x6e, 0x64, 0x18, - 0xe4, 0x78, 0xaf, 0xda, 0x78, 0xd2, 0x6a, 0x3c, 0xed, 0x9f, 0x3e, 0x1b, 0x34, 0x68, 0xe3, 0xd1, - 0x3e, 0xcd, 0xe9, 0x30, 0x7e, 0xa5, 0xc1, 0x8d, 0x64, 0x7c, 0xd2, 0x71, 0xc2, 0x48, 0x1c, 0x36, - 0x9e, 0xc6, 0x13, 0xa8, 0xfa, 0x66, 0x10, 0x39, 0xa6, 0xab, 0x60, 0x35, 0x7b, 0xd8, 0xb9, 0x2a, - 0x79, 0xd1, 0x19, 0x39, 0x71, 0x72, 0xc9, 0xbc, 0x4d, 0x64, 0x72, 0x87, 0xc5, 0x15, 0x63, 0x0e, - 0x37, 0xfe, 0xac, 0xc1, 0x5b, 0x7d, 0xcf, 0xf4, 0xc3, 0x11, 0x8f, 0x22, 0xf3, 0xcc, 0x65, 0xc9, - 0xe6, 0xb2, 0x29, 0xf5, 0x53, 0xa8, 0xd8, 0x4e, 0x20, 0x11, 0xf5, 0x48, 0xbc, 0x7a, 0x5b, 0xa9, - 0x08, 0xb9, 0x0f, 0xd5, 0x50, 0x7d, 0x64, 0x90, 0x0e, 0x30, 0x36, 0xe8, 0x46, 0x8c, 0xca, 0x61, - 0xcf, 0x03, 0xd8, 0x4c, 0xd8, 0xbc, 0xc9, 0xf8, 0x8c, 0x05, 0xea, 0x1a, 0x25, 0xd2, 0x5d, 0x44, - 0x05, 0xa3, 0x6f, 0x06, 0xcc, 0x8b, 0x06, 0xc3, 0x89, 0xeb, 0x62, 0x9a, 0x96, 0x2d, 0x76, 0x55, - 0xc2, 0x47, 0x0a, 0x15, 0x45, 0xcb, 0x58, 0x6c, 0x5e, 0xce, 0xf7, 0x23, 0xb8, 0x15, 0xce, 0x70, - 0xe5, 0x0f, 0x21, 0x3f, 0x26, 0xbe, 0xd2, 0x61, 0x74, 0x99, 0x3a, 0xe3, 0x17, 0x1a, 0xdc, 0x89, - 0xc5, 0x9b, 0xce, 0x70, 0x28, 0x87, 0xb7, 0x99, 0xfe, 0x61, 0x1b, 0xca, 0x89, 0x4d, 0xb2, 0x83, - 0x4f, 0x68, 0xd1, 0xae, 0x65, 0x33, 0x7c, 0xc7, 0x3c, 0x63, 0xae, 0xea, 0x2a, 0xe6, 0x17, 0x44, - 0xd3, 0x18, 0x99, 0xc1, 0x39, 0x8b, 0x8e, 0x4d, 0x2c, 0x58, 0x22, 0xbb, 0x67, 0x10, 0xe3, 0x4f, - 0x1a, 0xdc, 0x9a, 0xdf, 0x8a, 0xdc, 0x85, 0x01, 0xeb, 0xb1, 0x05, 0x94, 0xf3, 0x48, 0xd5, 0xf5, - 0x1c, 0x26, 0x78, 0x86, 0x01, 0x1f, 0xc7, 0x2a, 0xd4, 0x46, 0x72, 0x18, 0xee, 0x81, 0x27, 0x1c, - 0xb2, 0x1f, 0xcf, 0x20, 0xe4, 0x0b, 0xd8, 0xb2, 0x73, 0x5e, 0x70, 0x58, 0xdc, 0x9f, 0xbc, 0xb3, - 0xd0, 0xe5, 0x8b, 0x7c, 0x46, 0xe7, 0x75, 0x18, 0x26, 0x40, 0x66, 0xb8, 0x91, 0xf9, 0x17, 0x53, - 0xcb, 0xff, 0x8b, 0xb9, 0x0d, 0xe5, 0x73, 0xa6, 0xfe, 0xbe, 0x94, 0x93, 0x8c, 0x84, 0xce, 0xfd, - 0x6f, 0x59, 0x98, 0xff, 0xdf, 0xf2, 0x6f, 0x1a, 0x6c, 0xc5, 0xdb, 0x4a, 0x9f, 0x44, 0x19, 0xcf, - 0x65, 0x1a, 0xb9, 0x1c, 0x36, 0xe7, 0x5d, 0x7d, 0x81, 0x77, 0xf3, 0x2d, 0x45, 0xe1, 0xfb, 0xb7, - 0x14, 0xc5, 0x85, 0x2d, 0x45, 0x29, 0xdb, 0x52, 0xdc, 0x05, 0xc0, 0x49, 0x14, 0x13, 0x2d, 0x81, - 0xea, 0x36, 0x32, 0x88, 0x71, 0x06, 0xb7, 0x29, 0x77, 0x5d, 0xc7, 0x3b, 0x3f, 0x95, 0xff, 0xe8, - 0x64, 0x73, 0xc3, 0xcc, 0x4c, 0x41, 0x9b, 0x9f, 0x29, 0xfc, 0x17, 0x54, 0x86, 0x8e, 0x67, 0xba, - 0xce, 0x37, 0xb3, 0xff, 0x38, 0xa4, 0xb8, 0xf1, 0x51, 0x32, 0x37, 0x3c, 0x9d, 0x38, 0x76, 0x98, - 0x3a, 0x33, 0x03, 0xaa, 0x51, 0x65, 0x0e, 0x7b, 0xf8, 0x23, 0xa8, 0xcd, 0xce, 0x69, 0x48, 0x19, - 0xf0, 0x4f, 0x96, 0x9a, 0x46, 0x56, 0xa1, 0xd0, 0xef, 0x37, 0x6b, 0xba, 0xa8, 0x91, 0x07, 0xb4, - 0xf1, 0xa4, 0xfd, 0xbc, 0x55, 0x2b, 0x90, 0x75, 0x28, 0xd3, 0x83, 0x67, 0x03, 0xe4, 0x29, 0x3e, - 0xfc, 0x10, 0x6a, 0xb3, 0xef, 0x3c, 0xc1, 0x7e, 0xda, 0x7d, 0xda, 0xed, 0x7d, 0xd1, 0xad, 0x69, - 0xe4, 0x06, 0x6c, 0x1d, 0xb4, 0xfa, 0x83, 0xc6, 0x09, 0x1d, 0x74, 0x7b, 0xc7, 0x07, 0xcd, 0x66, - 0xbb, 0xfb, 0x59, 0x4d, 0x7f, 0x78, 0x0c, 0xdb, 0xcb, 0x9f, 0xa7, 0xe4, 0x0e, 0xd4, 0x95, 0x86, - 0xc1, 0x31, 0xed, 0x9d, 0xf4, 0x1a, 0xbd, 0xce, 0xe0, 0x79, 0x8b, 0xf6, 0xdb, 0x3d, 0xa1, 0xf2, - 0x3a, 0xd4, 0x5a, 0xdd, 0x06, 0xfd, 0xf2, 0xf8, 0xa4, 0xdd, 0xeb, 0x0e, 0xbe, 0xea, 0x75, 0x5b, - 0xfd, 0x9a, 0xfe, 0xb0, 0x07, 0x5b, 0x73, 0x05, 0x85, 0x6c, 0xc1, 0x46, 0x52, 0x52, 0xba, 0xa7, - 0x9d, 0x4e, 0xed, 0x35, 0x42, 0x60, 0xa6, 0xca, 0xd4, 0x34, 0x72, 0x0d, 0x36, 0xf3, 0x58, 0xa3, - 0xa6, 0x1f, 0x7e, 0x08, 0xf7, 0x79, 0x70, 0xbe, 0x67, 0xfa, 0xa6, 0x35, 0x62, 0xb9, 0xdb, 0xe3, - 0xab, 0x3d, 0xcb, 0x1f, 0x87, 0x20, 0x32, 0x38, 0x7e, 0x2f, 0xfc, 0x56, 0xd3, 0xfe, 0x15, 0x00, - 0x00, 0xff, 0xff, 0xd4, 0x8c, 0x02, 0x21, 0x4f, 0x20, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.proto deleted file mode 100644 index 0db8a3f5084..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/hdfs.proto +++ /dev/null @@ -1,500 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -// This file contains protocol buffers that are used throughout HDFS -- i.e. -// by the client, server, and data transfer protocols. - - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "HdfsProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "Security.proto"; - -/** - * Extended block idenfies a block - */ -message ExtendedBlockProto { - required string poolId = 1; // Block pool id - gloablly unique across clusters - required uint64 blockId = 2; // the local id within a pool - required uint64 generationStamp = 3; - optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid - // here for historical reasons -} - -/** - * Identifies a Datanode - */ -message DatanodeIDProto { - required string ipAddr = 1; // IP address - required string hostName = 2; // hostname - required string datanodeUuid = 3; // UUID assigned to the Datanode. For - // upgraded clusters this is the same - // as the original StorageID of the - // Datanode. - required uint32 xferPort = 4; // data streaming port - required uint32 infoPort = 5; // datanode http port - required uint32 ipcPort = 6; // ipc server port - optional uint32 infoSecurePort = 7 [default = 0]; // datanode https port -} - -/** - * Datanode local information - */ -message DatanodeLocalInfoProto { - required string softwareVersion = 1; - required string configVersion = 2; - required uint64 uptime = 3; -} - -/** - * DatanodeInfo array - */ -message DatanodeInfosProto { - repeated DatanodeInfoProto datanodes = 1; -} - -/** - * The status of a Datanode - */ -message DatanodeInfoProto { - required DatanodeIDProto id = 1; - optional uint64 capacity = 2 [default = 0]; - optional uint64 dfsUsed = 3 [default = 0]; - optional uint64 remaining = 4 [default = 0]; - optional uint64 blockPoolUsed = 5 [default = 0]; - optional uint64 lastUpdate = 6 [default = 0]; - optional uint32 xceiverCount = 7 [default = 0]; - optional string location = 8; - enum AdminState { - NORMAL = 0; - DECOMMISSION_INPROGRESS = 1; - DECOMMISSIONED = 2; - } - - optional AdminState adminState = 10 [default = NORMAL]; - optional uint64 cacheCapacity = 11 [default = 0]; - optional uint64 cacheUsed = 12 [default = 0]; - optional uint64 lastUpdateMonotonic = 13 [default = 0]; - optional string upgradeDomain = 14; -} - -/** - * Represents a storage available on the datanode - */ -message DatanodeStorageProto { - enum StorageState { - NORMAL = 0; - READ_ONLY_SHARED = 1; - } - - required string storageUuid = 1; - optional StorageState state = 2 [default = NORMAL]; - optional StorageTypeProto storageType = 3 [default = DISK]; -} - -message StorageReportProto { - required string storageUuid = 1 [ deprecated = true ]; - optional bool failed = 2 [ default = false ]; - optional uint64 capacity = 3 [ default = 0 ]; - optional uint64 dfsUsed = 4 [ default = 0 ]; - optional uint64 remaining = 5 [ default = 0 ]; - optional uint64 blockPoolUsed = 6 [ default = 0 ]; - optional DatanodeStorageProto storage = 7; // supersedes StorageUuid -} - -/** - * Summary of a file or directory - */ -message ContentSummaryProto { - required uint64 length = 1; - required uint64 fileCount = 2; - required uint64 directoryCount = 3; - required uint64 quota = 4; - required uint64 spaceConsumed = 5; - required uint64 spaceQuota = 6; - optional StorageTypeQuotaInfosProto typeQuotaInfos = 7; -} - -/** - * Summary of quota usage of a directory - */ -message QuotaUsageProto { - required uint64 fileAndDirectoryCount = 1; - required uint64 quota = 2; - required uint64 spaceConsumed = 3; - required uint64 spaceQuota = 4; - optional StorageTypeQuotaInfosProto typeQuotaInfos = 5; -} - -/** - * Storage type quota and usage information of a file or directory - */ -message StorageTypeQuotaInfosProto { - repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1; -} - -message StorageTypeQuotaInfoProto { - required StorageTypeProto type = 1; - required uint64 quota = 2; - required uint64 consumed = 3; -} - -/** - * Contains a list of paths corresponding to corrupt files and a cookie - * used for iterative calls to NameNode.listCorruptFileBlocks. - * - */ -message CorruptFileBlocksProto { - repeated string files = 1; - required string cookie = 2; -} - -/** - * File or Directory permision - same spec as posix - */ -message FsPermissionProto { - required uint32 perm = 1; // Actually a short - only 16bits used -} - -/** - * Types of recognized storage media. - */ -enum StorageTypeProto { - DISK = 1; - SSD = 2; - ARCHIVE = 3; - RAM_DISK = 4; -} - -/** - * A list of storage types. - */ -message StorageTypesProto { - repeated StorageTypeProto storageTypes = 1; -} - -/** - * Block replica storage policy. - */ -message BlockStoragePolicyProto { - required uint32 policyId = 1; - required string name = 2; - // a list of storage types for storing the block replicas when creating a - // block. - required StorageTypesProto creationPolicy = 3; - // A list of storage types for creation fallback storage. - optional StorageTypesProto creationFallbackPolicy = 4; - optional StorageTypesProto replicationFallbackPolicy = 5; -} - - -/** - * A LocatedBlock gives information about a block and its location. - */ -message LocatedBlockProto { - required ExtendedBlockProto b = 1; - required uint64 offset = 2; // offset of first byte of block in the file - repeated DatanodeInfoProto locs = 3; // Locations ordered by proximity to client ip - required bool corrupt = 4; // true if all replicas of a block are corrupt, else false - // If block has few corrupt replicas, they are filtered and - // their locations are not part of this object - - required hadoop.common.TokenProto blockToken = 5; - repeated bool isCached = 6 [packed=true]; // if a location in locs is cached - repeated StorageTypeProto storageTypes = 7; - repeated string storageIDs = 8; - - // striped block related fields - optional bytes blockIndices = 9; // used for striped block to indicate block index for each storage - repeated hadoop.common.TokenProto blockTokens = 10; // each internal block has a block token -} - -message DataEncryptionKeyProto { - required uint32 keyId = 1; - required string blockPoolId = 2; - required bytes nonce = 3; - required bytes encryptionKey = 4; - required uint64 expiryDate = 5; - optional string encryptionAlgorithm = 6; -} - -/** - * Cipher suite. - */ -enum CipherSuiteProto { - UNKNOWN = 1; - AES_CTR_NOPADDING = 2; -} - -/** - * Crypto protocol version used to access encrypted files. - */ -enum CryptoProtocolVersionProto { - UNKNOWN_PROTOCOL_VERSION = 1; - ENCRYPTION_ZONES = 2; -} - -/** - * Encryption information for a file. - */ -message FileEncryptionInfoProto { - required CipherSuiteProto suite = 1; - required CryptoProtocolVersionProto cryptoProtocolVersion = 2; - required bytes key = 3; - required bytes iv = 4; - required string keyName = 5; - required string ezKeyVersionName = 6; -} - -/** - * Encryption information for an individual - * file within an encryption zone - */ -message PerFileEncryptionInfoProto { - required bytes key = 1; - required bytes iv = 2; - required string ezKeyVersionName = 3; -} - -/** - * Encryption information for an encryption - * zone - */ -message ZoneEncryptionInfoProto { - required CipherSuiteProto suite = 1; - required CryptoProtocolVersionProto cryptoProtocolVersion = 2; - required string keyName = 3; -} - -/** - * Cipher option - */ -message CipherOptionProto { - required CipherSuiteProto suite = 1; - optional bytes inKey = 2; - optional bytes inIv = 3; - optional bytes outKey = 4; - optional bytes outIv = 5; -} - -/** - * A set of file blocks and their locations. - */ -message LocatedBlocksProto { - required uint64 fileLength = 1; - repeated LocatedBlockProto blocks = 2; - required bool underConstruction = 3; - optional LocatedBlockProto lastBlock = 4; - required bool isLastBlockComplete = 5; - optional FileEncryptionInfoProto fileEncryptionInfo = 6; - - // Optional field for erasure coding - optional ErasureCodingPolicyProto ecPolicy = 7; -} - -/** - * ECSchema options entry - */ -message ECSchemaOptionEntryProto { - required string key = 1; - required string value = 2; -} - -/** - * ECSchema for erasurecoding - */ -message ECSchemaProto { - required string codecName = 1; - required uint32 dataUnits = 2; - required uint32 parityUnits = 3; - repeated ECSchemaOptionEntryProto options = 4; -} - -message ErasureCodingPolicyProto { - required string name = 1; - required ECSchemaProto schema = 2; - required uint32 cellSize = 3; - required uint32 id = 4; // Actually a byte - only 8 bits used -} - -/** - * Status of a file, directory or symlink - * Optionally includes a file's block locations if requested by client on the rpc call. - */ -message HdfsFileStatusProto { - enum FileType { - IS_DIR = 1; - IS_FILE = 2; - IS_SYMLINK = 3; - } - required FileType fileType = 1; - required bytes path = 2; // local name of inode encoded java UTF8 - required uint64 length = 3; - required FsPermissionProto permission = 4; - required string owner = 5; - required string group = 6; - required uint64 modification_time = 7; - required uint64 access_time = 8; - - // Optional fields for symlink - optional bytes symlink = 9; // if symlink, target encoded java UTF8 - - // Optional fields for file - optional uint32 block_replication = 10 [default = 0]; // only 16bits used - optional uint64 blocksize = 11 [default = 0]; - optional LocatedBlocksProto locations = 12; // suppled only if asked by client - - // Optional field for fileId - optional uint64 fileId = 13 [default = 0]; // default as an invalid id - optional int32 childrenNum = 14 [default = -1]; - // Optional field for file encryption - optional FileEncryptionInfoProto fileEncryptionInfo = 15; - - optional uint32 storagePolicy = 16 [default = 0]; // block storage policy id - - // Optional field for erasure coding - optional ErasureCodingPolicyProto ecPolicy = 17; -} - -/** - * Checksum algorithms/types used in HDFS - * Make sure this enum's integer values match enum values' id properties defined - * in org.apache.hadoop.util.DataChecksum.Type - */ -enum ChecksumTypeProto { - CHECKSUM_NULL = 0; - CHECKSUM_CRC32 = 1; - CHECKSUM_CRC32C = 2; -} - -/** - * HDFS Server Defaults - */ -message FsServerDefaultsProto { - required uint64 blockSize = 1; - required uint32 bytesPerChecksum = 2; - required uint32 writePacketSize = 3; - required uint32 replication = 4; // Actually a short - only 16 bits used - required uint32 fileBufferSize = 5; - optional bool encryptDataTransfer = 6 [default = false]; - optional uint64 trashInterval = 7 [default = 0]; - optional ChecksumTypeProto checksumType = 8 [default = CHECKSUM_CRC32]; -} - - -/** - * Directory listing - */ -message DirectoryListingProto { - repeated HdfsFileStatusProto partialListing = 1; - required uint32 remainingEntries = 2; -} - -/** - * Status of a snapshottable directory: besides the normal information for - * a directory status, also include snapshot quota, number of snapshots, and - * the full path of the parent directory. - */ -message SnapshottableDirectoryStatusProto { - required HdfsFileStatusProto dirStatus = 1; - - // Fields specific for snapshottable directory - required uint32 snapshot_quota = 2; - required uint32 snapshot_number = 3; - required bytes parent_fullpath = 4; -} - -/** - * Snapshottable directory listing - */ -message SnapshottableDirectoryListingProto { - repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1; -} - -/** - * Snapshot diff report entry - */ -message SnapshotDiffReportEntryProto { - required bytes fullpath = 1; - required string modificationLabel = 2; - optional bytes targetPath = 3; -} - -/** - * Snapshot diff report - */ -message SnapshotDiffReportProto { - // full path of the directory where snapshots were taken - required string snapshotRoot = 1; - required string fromSnapshot = 2; - required string toSnapshot = 3; - repeated SnapshotDiffReportEntryProto diffReportEntries = 4; -} - -/** - * Block information - * - * Please be wary of adding additional fields here, since INodeFiles - * need to fit in PB's default max message size of 64MB. - * We restrict the max # of blocks per file - * (dfs.namenode.fs-limits.max-blocks-per-file), but it's better - * to avoid changing this. - */ -message BlockProto { - required uint64 blockId = 1; - required uint64 genStamp = 2; - optional uint64 numBytes = 3 [default = 0]; -} - -/** - * Information related to a snapshot - * TODO: add more information - */ -message SnapshotInfoProto { - required string snapshotName = 1; - required string snapshotRoot = 2; - required FsPermissionProto permission = 3; - required string owner = 4; - required string group = 5; - required string createTime = 6; - // TODO: do we need access time? -} - -/** - * Rolling upgrade status - */ -message RollingUpgradeStatusProto { - required string blockPoolId = 1; - optional bool finalized = 2 [default = false]; -} - - -/** - * A list of storage IDs. - */ -message StorageUuidsProto { - repeated string storageUuids = 1; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.pb.go deleted file mode 100644 index bc597e5c48a..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.pb.go +++ /dev/null @@ -1,663 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: inotify.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type EventType int32 - -const ( - EventType_EVENT_CREATE EventType = 0 - EventType_EVENT_CLOSE EventType = 1 - EventType_EVENT_APPEND EventType = 2 - EventType_EVENT_RENAME EventType = 3 - EventType_EVENT_METADATA EventType = 4 - EventType_EVENT_UNLINK EventType = 5 - EventType_EVENT_TRUNCATE EventType = 6 -) - -var EventType_name = map[int32]string{ - 0: "EVENT_CREATE", - 1: "EVENT_CLOSE", - 2: "EVENT_APPEND", - 3: "EVENT_RENAME", - 4: "EVENT_METADATA", - 5: "EVENT_UNLINK", - 6: "EVENT_TRUNCATE", -} -var EventType_value = map[string]int32{ - "EVENT_CREATE": 0, - "EVENT_CLOSE": 1, - "EVENT_APPEND": 2, - "EVENT_RENAME": 3, - "EVENT_METADATA": 4, - "EVENT_UNLINK": 5, - "EVENT_TRUNCATE": 6, -} - -func (x EventType) Enum() *EventType { - p := new(EventType) - *p = x - return p -} -func (x EventType) String() string { - return proto.EnumName(EventType_name, int32(x)) -} -func (x *EventType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EventType_value, data, "EventType") - if err != nil { - return err - } - *x = EventType(value) - return nil -} -func (EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } - -type INodeType int32 - -const ( - INodeType_I_TYPE_FILE INodeType = 0 - INodeType_I_TYPE_DIRECTORY INodeType = 1 - INodeType_I_TYPE_SYMLINK INodeType = 2 -) - -var INodeType_name = map[int32]string{ - 0: "I_TYPE_FILE", - 1: "I_TYPE_DIRECTORY", - 2: "I_TYPE_SYMLINK", -} -var INodeType_value = map[string]int32{ - "I_TYPE_FILE": 0, - "I_TYPE_DIRECTORY": 1, - "I_TYPE_SYMLINK": 2, -} - -func (x INodeType) Enum() *INodeType { - p := new(INodeType) - *p = x - return p -} -func (x INodeType) String() string { - return proto.EnumName(INodeType_name, int32(x)) -} -func (x *INodeType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(INodeType_value, data, "INodeType") - if err != nil { - return err - } - *x = INodeType(value) - return nil -} -func (INodeType) EnumDescriptor() ([]byte, []int) { return fileDescriptor7, []int{1} } - -type MetadataUpdateType int32 - -const ( - MetadataUpdateType_META_TYPE_TIMES MetadataUpdateType = 0 - MetadataUpdateType_META_TYPE_REPLICATION MetadataUpdateType = 1 - MetadataUpdateType_META_TYPE_OWNER MetadataUpdateType = 2 - MetadataUpdateType_META_TYPE_PERMS MetadataUpdateType = 3 - MetadataUpdateType_META_TYPE_ACLS MetadataUpdateType = 4 - MetadataUpdateType_META_TYPE_XATTRS MetadataUpdateType = 5 -) - -var MetadataUpdateType_name = map[int32]string{ - 0: "META_TYPE_TIMES", - 1: "META_TYPE_REPLICATION", - 2: "META_TYPE_OWNER", - 3: "META_TYPE_PERMS", - 4: "META_TYPE_ACLS", - 5: "META_TYPE_XATTRS", -} -var MetadataUpdateType_value = map[string]int32{ - "META_TYPE_TIMES": 0, - "META_TYPE_REPLICATION": 1, - "META_TYPE_OWNER": 2, - "META_TYPE_PERMS": 3, - "META_TYPE_ACLS": 4, - "META_TYPE_XATTRS": 5, -} - -func (x MetadataUpdateType) Enum() *MetadataUpdateType { - p := new(MetadataUpdateType) - *p = x - return p -} -func (x MetadataUpdateType) String() string { - return proto.EnumName(MetadataUpdateType_name, int32(x)) -} -func (x *MetadataUpdateType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetadataUpdateType_value, data, "MetadataUpdateType") - if err != nil { - return err - } - *x = MetadataUpdateType(value) - return nil -} -func (MetadataUpdateType) EnumDescriptor() ([]byte, []int) { return fileDescriptor7, []int{2} } - -type EventProto struct { - Type *EventType `protobuf:"varint,1,req,name=type,enum=hadoop.hdfs.EventType" json:"type,omitempty"` - Contents []byte `protobuf:"bytes,2,req,name=contents" json:"contents,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EventProto) Reset() { *m = EventProto{} } -func (m *EventProto) String() string { return proto.CompactTextString(m) } -func (*EventProto) ProtoMessage() {} -func (*EventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } - -func (m *EventProto) GetType() EventType { - if m != nil && m.Type != nil { - return *m.Type - } - return EventType_EVENT_CREATE -} - -func (m *EventProto) GetContents() []byte { - if m != nil { - return m.Contents - } - return nil -} - -type EventBatchProto struct { - Txid *int64 `protobuf:"varint,1,req,name=txid" json:"txid,omitempty"` - Events []*EventProto `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EventBatchProto) Reset() { *m = EventBatchProto{} } -func (m *EventBatchProto) String() string { return proto.CompactTextString(m) } -func (*EventBatchProto) ProtoMessage() {} -func (*EventBatchProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{1} } - -func (m *EventBatchProto) GetTxid() int64 { - if m != nil && m.Txid != nil { - return *m.Txid - } - return 0 -} - -func (m *EventBatchProto) GetEvents() []*EventProto { - if m != nil { - return m.Events - } - return nil -} - -type CreateEventProto struct { - Type *INodeType `protobuf:"varint,1,req,name=type,enum=hadoop.hdfs.INodeType" json:"type,omitempty"` - Path *string `protobuf:"bytes,2,req,name=path" json:"path,omitempty"` - Ctime *int64 `protobuf:"varint,3,req,name=ctime" json:"ctime,omitempty"` - OwnerName *string `protobuf:"bytes,4,req,name=ownerName" json:"ownerName,omitempty"` - GroupName *string `protobuf:"bytes,5,req,name=groupName" json:"groupName,omitempty"` - Perms *FsPermissionProto `protobuf:"bytes,6,req,name=perms" json:"perms,omitempty"` - Replication *int32 `protobuf:"varint,7,opt,name=replication" json:"replication,omitempty"` - SymlinkTarget *string `protobuf:"bytes,8,opt,name=symlinkTarget" json:"symlinkTarget,omitempty"` - Overwrite *bool `protobuf:"varint,9,opt,name=overwrite" json:"overwrite,omitempty"` - DefaultBlockSize *int64 `protobuf:"varint,10,opt,name=defaultBlockSize,def=0" json:"defaultBlockSize,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CreateEventProto) Reset() { *m = CreateEventProto{} } -func (m *CreateEventProto) String() string { return proto.CompactTextString(m) } -func (*CreateEventProto) ProtoMessage() {} -func (*CreateEventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{2} } - -const Default_CreateEventProto_DefaultBlockSize int64 = 0 - -func (m *CreateEventProto) GetType() INodeType { - if m != nil && m.Type != nil { - return *m.Type - } - return INodeType_I_TYPE_FILE -} - -func (m *CreateEventProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *CreateEventProto) GetCtime() int64 { - if m != nil && m.Ctime != nil { - return *m.Ctime - } - return 0 -} - -func (m *CreateEventProto) GetOwnerName() string { - if m != nil && m.OwnerName != nil { - return *m.OwnerName - } - return "" -} - -func (m *CreateEventProto) GetGroupName() string { - if m != nil && m.GroupName != nil { - return *m.GroupName - } - return "" -} - -func (m *CreateEventProto) GetPerms() *FsPermissionProto { - if m != nil { - return m.Perms - } - return nil -} - -func (m *CreateEventProto) GetReplication() int32 { - if m != nil && m.Replication != nil { - return *m.Replication - } - return 0 -} - -func (m *CreateEventProto) GetSymlinkTarget() string { - if m != nil && m.SymlinkTarget != nil { - return *m.SymlinkTarget - } - return "" -} - -func (m *CreateEventProto) GetOverwrite() bool { - if m != nil && m.Overwrite != nil { - return *m.Overwrite - } - return false -} - -func (m *CreateEventProto) GetDefaultBlockSize() int64 { - if m != nil && m.DefaultBlockSize != nil { - return *m.DefaultBlockSize - } - return Default_CreateEventProto_DefaultBlockSize -} - -type CloseEventProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - FileSize *int64 `protobuf:"varint,2,req,name=fileSize" json:"fileSize,omitempty"` - Timestamp *int64 `protobuf:"varint,3,req,name=timestamp" json:"timestamp,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *CloseEventProto) Reset() { *m = CloseEventProto{} } -func (m *CloseEventProto) String() string { return proto.CompactTextString(m) } -func (*CloseEventProto) ProtoMessage() {} -func (*CloseEventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{3} } - -func (m *CloseEventProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *CloseEventProto) GetFileSize() int64 { - if m != nil && m.FileSize != nil { - return *m.FileSize - } - return 0 -} - -func (m *CloseEventProto) GetTimestamp() int64 { - if m != nil && m.Timestamp != nil { - return *m.Timestamp - } - return 0 -} - -type TruncateEventProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - FileSize *int64 `protobuf:"varint,2,req,name=fileSize" json:"fileSize,omitempty"` - Timestamp *int64 `protobuf:"varint,3,req,name=timestamp" json:"timestamp,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TruncateEventProto) Reset() { *m = TruncateEventProto{} } -func (m *TruncateEventProto) String() string { return proto.CompactTextString(m) } -func (*TruncateEventProto) ProtoMessage() {} -func (*TruncateEventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{4} } - -func (m *TruncateEventProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *TruncateEventProto) GetFileSize() int64 { - if m != nil && m.FileSize != nil { - return *m.FileSize - } - return 0 -} - -func (m *TruncateEventProto) GetTimestamp() int64 { - if m != nil && m.Timestamp != nil { - return *m.Timestamp - } - return 0 -} - -type AppendEventProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - NewBlock *bool `protobuf:"varint,2,opt,name=newBlock,def=0" json:"newBlock,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AppendEventProto) Reset() { *m = AppendEventProto{} } -func (m *AppendEventProto) String() string { return proto.CompactTextString(m) } -func (*AppendEventProto) ProtoMessage() {} -func (*AppendEventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{5} } - -const Default_AppendEventProto_NewBlock bool = false - -func (m *AppendEventProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *AppendEventProto) GetNewBlock() bool { - if m != nil && m.NewBlock != nil { - return *m.NewBlock - } - return Default_AppendEventProto_NewBlock -} - -type RenameEventProto struct { - SrcPath *string `protobuf:"bytes,1,req,name=srcPath" json:"srcPath,omitempty"` - DestPath *string `protobuf:"bytes,2,req,name=destPath" json:"destPath,omitempty"` - Timestamp *int64 `protobuf:"varint,3,req,name=timestamp" json:"timestamp,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RenameEventProto) Reset() { *m = RenameEventProto{} } -func (m *RenameEventProto) String() string { return proto.CompactTextString(m) } -func (*RenameEventProto) ProtoMessage() {} -func (*RenameEventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{6} } - -func (m *RenameEventProto) GetSrcPath() string { - if m != nil && m.SrcPath != nil { - return *m.SrcPath - } - return "" -} - -func (m *RenameEventProto) GetDestPath() string { - if m != nil && m.DestPath != nil { - return *m.DestPath - } - return "" -} - -func (m *RenameEventProto) GetTimestamp() int64 { - if m != nil && m.Timestamp != nil { - return *m.Timestamp - } - return 0 -} - -type MetadataUpdateEventProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - Type *MetadataUpdateType `protobuf:"varint,2,req,name=type,enum=hadoop.hdfs.MetadataUpdateType" json:"type,omitempty"` - Mtime *int64 `protobuf:"varint,3,opt,name=mtime" json:"mtime,omitempty"` - Atime *int64 `protobuf:"varint,4,opt,name=atime" json:"atime,omitempty"` - Replication *int32 `protobuf:"varint,5,opt,name=replication" json:"replication,omitempty"` - OwnerName *string `protobuf:"bytes,6,opt,name=ownerName" json:"ownerName,omitempty"` - GroupName *string `protobuf:"bytes,7,opt,name=groupName" json:"groupName,omitempty"` - Perms *FsPermissionProto `protobuf:"bytes,8,opt,name=perms" json:"perms,omitempty"` - Acls []*AclEntryProto `protobuf:"bytes,9,rep,name=acls" json:"acls,omitempty"` - XAttrs []*XAttrProto `protobuf:"bytes,10,rep,name=xAttrs" json:"xAttrs,omitempty"` - XAttrsRemoved *bool `protobuf:"varint,11,opt,name=xAttrsRemoved" json:"xAttrsRemoved,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MetadataUpdateEventProto) Reset() { *m = MetadataUpdateEventProto{} } -func (m *MetadataUpdateEventProto) String() string { return proto.CompactTextString(m) } -func (*MetadataUpdateEventProto) ProtoMessage() {} -func (*MetadataUpdateEventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{7} } - -func (m *MetadataUpdateEventProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *MetadataUpdateEventProto) GetType() MetadataUpdateType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetadataUpdateType_META_TYPE_TIMES -} - -func (m *MetadataUpdateEventProto) GetMtime() int64 { - if m != nil && m.Mtime != nil { - return *m.Mtime - } - return 0 -} - -func (m *MetadataUpdateEventProto) GetAtime() int64 { - if m != nil && m.Atime != nil { - return *m.Atime - } - return 0 -} - -func (m *MetadataUpdateEventProto) GetReplication() int32 { - if m != nil && m.Replication != nil { - return *m.Replication - } - return 0 -} - -func (m *MetadataUpdateEventProto) GetOwnerName() string { - if m != nil && m.OwnerName != nil { - return *m.OwnerName - } - return "" -} - -func (m *MetadataUpdateEventProto) GetGroupName() string { - if m != nil && m.GroupName != nil { - return *m.GroupName - } - return "" -} - -func (m *MetadataUpdateEventProto) GetPerms() *FsPermissionProto { - if m != nil { - return m.Perms - } - return nil -} - -func (m *MetadataUpdateEventProto) GetAcls() []*AclEntryProto { - if m != nil { - return m.Acls - } - return nil -} - -func (m *MetadataUpdateEventProto) GetXAttrs() []*XAttrProto { - if m != nil { - return m.XAttrs - } - return nil -} - -func (m *MetadataUpdateEventProto) GetXAttrsRemoved() bool { - if m != nil && m.XAttrsRemoved != nil { - return *m.XAttrsRemoved - } - return false -} - -type UnlinkEventProto struct { - Path *string `protobuf:"bytes,1,req,name=path" json:"path,omitempty"` - Timestamp *int64 `protobuf:"varint,2,req,name=timestamp" json:"timestamp,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *UnlinkEventProto) Reset() { *m = UnlinkEventProto{} } -func (m *UnlinkEventProto) String() string { return proto.CompactTextString(m) } -func (*UnlinkEventProto) ProtoMessage() {} -func (*UnlinkEventProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{8} } - -func (m *UnlinkEventProto) GetPath() string { - if m != nil && m.Path != nil { - return *m.Path - } - return "" -} - -func (m *UnlinkEventProto) GetTimestamp() int64 { - if m != nil && m.Timestamp != nil { - return *m.Timestamp - } - return 0 -} - -type EventsListProto struct { - Events []*EventProto `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` - FirstTxid *int64 `protobuf:"varint,2,req,name=firstTxid" json:"firstTxid,omitempty"` - LastTxid *int64 `protobuf:"varint,3,req,name=lastTxid" json:"lastTxid,omitempty"` - SyncTxid *int64 `protobuf:"varint,4,req,name=syncTxid" json:"syncTxid,omitempty"` - Batch []*EventBatchProto `protobuf:"bytes,5,rep,name=batch" json:"batch,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EventsListProto) Reset() { *m = EventsListProto{} } -func (m *EventsListProto) String() string { return proto.CompactTextString(m) } -func (*EventsListProto) ProtoMessage() {} -func (*EventsListProto) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{9} } - -func (m *EventsListProto) GetEvents() []*EventProto { - if m != nil { - return m.Events - } - return nil -} - -func (m *EventsListProto) GetFirstTxid() int64 { - if m != nil && m.FirstTxid != nil { - return *m.FirstTxid - } - return 0 -} - -func (m *EventsListProto) GetLastTxid() int64 { - if m != nil && m.LastTxid != nil { - return *m.LastTxid - } - return 0 -} - -func (m *EventsListProto) GetSyncTxid() int64 { - if m != nil && m.SyncTxid != nil { - return *m.SyncTxid - } - return 0 -} - -func (m *EventsListProto) GetBatch() []*EventBatchProto { - if m != nil { - return m.Batch - } - return nil -} - -func init() { - proto.RegisterType((*EventProto)(nil), "hadoop.hdfs.EventProto") - proto.RegisterType((*EventBatchProto)(nil), "hadoop.hdfs.EventBatchProto") - proto.RegisterType((*CreateEventProto)(nil), "hadoop.hdfs.CreateEventProto") - proto.RegisterType((*CloseEventProto)(nil), "hadoop.hdfs.CloseEventProto") - proto.RegisterType((*TruncateEventProto)(nil), "hadoop.hdfs.TruncateEventProto") - proto.RegisterType((*AppendEventProto)(nil), "hadoop.hdfs.AppendEventProto") - proto.RegisterType((*RenameEventProto)(nil), "hadoop.hdfs.RenameEventProto") - proto.RegisterType((*MetadataUpdateEventProto)(nil), "hadoop.hdfs.MetadataUpdateEventProto") - proto.RegisterType((*UnlinkEventProto)(nil), "hadoop.hdfs.UnlinkEventProto") - proto.RegisterType((*EventsListProto)(nil), "hadoop.hdfs.EventsListProto") - proto.RegisterEnum("hadoop.hdfs.EventType", EventType_name, EventType_value) - proto.RegisterEnum("hadoop.hdfs.INodeType", INodeType_name, INodeType_value) - proto.RegisterEnum("hadoop.hdfs.MetadataUpdateType", MetadataUpdateType_name, MetadataUpdateType_value) -} - -func init() { proto.RegisterFile("inotify.proto", fileDescriptor7) } - -var fileDescriptor7 = []byte{ - // 917 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x5f, 0x6f, 0xe3, 0x44, - 0x10, 0x3f, 0x3b, 0x71, 0x2f, 0x99, 0x5c, 0xaf, 0xab, 0xa5, 0x80, 0x89, 0x4e, 0x10, 0x22, 0x90, - 0xa2, 0x4a, 0x04, 0x54, 0x78, 0xe1, 0xde, 0xdc, 0xd4, 0x95, 0x2c, 0x12, 0x37, 0xda, 0xb8, 0xc7, - 0xf5, 0xa9, 0xda, 0xb3, 0x37, 0x8d, 0x75, 0x8e, 0x6d, 0x79, 0xb7, 0x7f, 0xc2, 0x67, 0xe0, 0x03, - 0xf0, 0xc8, 0x33, 0x9f, 0x85, 0xef, 0xc2, 0x57, 0x40, 0xbb, 0xeb, 0xda, 0xf1, 0xe5, 0x50, 0xfb, - 0xc0, 0x9b, 0xe7, 0x37, 0xbf, 0x9d, 0xf9, 0x79, 0x76, 0x66, 0x6c, 0xd8, 0x8f, 0xd3, 0x4c, 0xc4, - 0xcb, 0xcd, 0x38, 0x2f, 0x32, 0x91, 0xe1, 0xde, 0x8a, 0x46, 0x59, 0x96, 0x8f, 0x57, 0xd1, 0x92, - 0xf7, 0xbb, 0x34, 0x4c, 0x34, 0xde, 0xef, 0xdd, 0x53, 0x21, 0x8a, 0xd2, 0x00, 0xe9, 0xd5, 0xcf, - 0xc3, 0x00, 0xc0, 0xbd, 0x65, 0xa9, 0x98, 0xab, 0xe3, 0x47, 0xd0, 0x16, 0x9b, 0x9c, 0xd9, 0xc6, - 0xc0, 0x1c, 0xbd, 0x3c, 0xfe, 0x6c, 0xbc, 0x15, 0x6d, 0xac, 0x68, 0xc1, 0x26, 0x67, 0x44, 0x71, - 0x70, 0x1f, 0x3a, 0x61, 0x96, 0x0a, 0x96, 0x0a, 0x6e, 0x9b, 0x03, 0x73, 0xf4, 0x82, 0x54, 0xf6, - 0xf0, 0x0d, 0x1c, 0x28, 0xfa, 0x09, 0x15, 0xe1, 0x4a, 0x87, 0xc6, 0xd0, 0x16, 0xf7, 0x71, 0xa4, - 0x42, 0xb7, 0x88, 0x7a, 0xc6, 0xdf, 0xc3, 0x1e, 0xbb, 0x2d, 0x03, 0xb4, 0x46, 0xbd, 0xe3, 0xcf, - 0x77, 0x13, 0xaa, 0xc3, 0xa4, 0xa4, 0x0d, 0xff, 0x31, 0x01, 0x4d, 0x0a, 0x46, 0x05, 0x7b, 0xa2, - 0x68, 0xcf, 0xcf, 0x22, 0xb6, 0x25, 0x1a, 0x43, 0x3b, 0xa7, 0x62, 0xa5, 0x04, 0x77, 0x89, 0x7a, - 0xc6, 0x87, 0x60, 0x85, 0x22, 0x5e, 0x33, 0xbb, 0xa5, 0xa4, 0x69, 0x03, 0xbf, 0x82, 0x6e, 0x76, - 0x97, 0xb2, 0xc2, 0xa7, 0x6b, 0x66, 0xb7, 0x15, 0xbd, 0x06, 0xa4, 0xf7, 0xba, 0xc8, 0x6e, 0x72, - 0xe5, 0xb5, 0xb4, 0xb7, 0x02, 0xf0, 0x4f, 0x60, 0xe5, 0xac, 0x58, 0x73, 0x7b, 0x6f, 0x60, 0x8e, - 0x7a, 0xc7, 0x5f, 0x36, 0x24, 0x9d, 0xf1, 0x39, 0x2b, 0xd6, 0x31, 0xe7, 0x71, 0x96, 0xea, 0xb7, - 0xd3, 0x64, 0x3c, 0x80, 0x5e, 0xc1, 0xf2, 0x24, 0x0e, 0xa9, 0x88, 0xb3, 0xd4, 0x7e, 0x3e, 0x30, - 0x46, 0x16, 0xd9, 0x86, 0xf0, 0x37, 0xb0, 0xcf, 0x37, 0xeb, 0x24, 0x4e, 0xdf, 0x07, 0xb4, 0xb8, - 0x66, 0xc2, 0xee, 0x0c, 0x8c, 0x51, 0x97, 0x34, 0x41, 0xa5, 0xfc, 0x96, 0x15, 0x77, 0x45, 0x2c, - 0x98, 0xdd, 0x1d, 0x18, 0xa3, 0x0e, 0xa9, 0x01, 0xfc, 0x1d, 0xa0, 0x88, 0x2d, 0xe9, 0x4d, 0x22, - 0x4e, 0x92, 0x2c, 0x7c, 0xbf, 0x88, 0x7f, 0x63, 0x36, 0x0c, 0x8c, 0x51, 0xeb, 0xb5, 0xf1, 0x03, - 0xd9, 0x71, 0x0d, 0xaf, 0xe0, 0x60, 0x92, 0x64, 0x7c, 0xbb, 0xde, 0x0f, 0x35, 0x34, 0xb6, 0x6a, - 0xd8, 0x87, 0xce, 0x32, 0x4e, 0x98, 0x8a, 0x66, 0xaa, 0x32, 0x56, 0xb6, 0xd4, 0x23, 0x2b, 0xca, - 0x05, 0x5d, 0xe7, 0x65, 0x8d, 0x6b, 0x60, 0xf8, 0x0e, 0x70, 0x50, 0xdc, 0xa4, 0x61, 0xf3, 0x4e, - 0xff, 0xdf, 0x1c, 0x1e, 0x20, 0x27, 0xcf, 0x59, 0x1a, 0x3d, 0x92, 0xe1, 0x6b, 0xe8, 0xa4, 0xec, - 0x4e, 0xbd, 0xbc, 0x6d, 0xca, 0xc2, 0xbd, 0xb6, 0x96, 0x34, 0xe1, 0x8c, 0x54, 0xf0, 0x70, 0x09, - 0x88, 0xb0, 0x94, 0xae, 0xb7, 0xc5, 0xda, 0xf0, 0x9c, 0x17, 0xe1, 0xbc, 0x8e, 0xf6, 0x60, 0x4a, - 0xc9, 0x11, 0xe3, 0x62, 0x5e, 0xb7, 0x5c, 0x65, 0x3f, 0x22, 0xf9, 0xaf, 0x16, 0xd8, 0x33, 0x26, - 0x68, 0x44, 0x05, 0xbd, 0xc8, 0xa3, 0xc7, 0xab, 0xf3, 0x63, 0x39, 0x05, 0xa6, 0x9a, 0x82, 0xaf, - 0x1a, 0x2d, 0xd7, 0x0c, 0xb4, 0x35, 0x0e, 0x87, 0x60, 0xad, 0xcb, 0xd6, 0x37, 0x64, 0xeb, 0x2b, - 0x43, 0xa2, 0x54, 0xa1, 0x6d, 0x8d, 0x2a, 0xe3, 0xc3, 0xf6, 0xb4, 0x76, 0xdb, 0xb3, 0x31, 0x32, - 0x7b, 0xaa, 0x35, 0xff, 0x6b, 0x64, 0x9e, 0x6b, 0xef, 0x47, 0x46, 0x46, 0xb6, 0xf4, 0x93, 0x47, - 0x66, 0x0c, 0x6d, 0x1a, 0x26, 0xdc, 0xee, 0xaa, 0xf5, 0xd1, 0x6f, 0x1c, 0x72, 0xc2, 0xc4, 0x4d, - 0x45, 0xb1, 0xd1, 0x07, 0x14, 0x4f, 0x2e, 0x9c, 0x7b, 0x47, 0x88, 0x82, 0xdb, 0xf0, 0x91, 0x85, - 0xf3, 0x56, 0xba, 0xca, 0x85, 0xa3, 0x69, 0x72, 0xe2, 0xf4, 0x13, 0x61, 0xeb, 0xec, 0x96, 0x45, - 0x76, 0x4f, 0xcd, 0x53, 0x13, 0x1c, 0x9e, 0x02, 0xba, 0x48, 0xe5, 0x04, 0x3e, 0x72, 0x47, 0x8d, - 0x2b, 0x37, 0x3f, 0xbc, 0xf2, 0xbf, 0x8d, 0x72, 0x6b, 0xf2, 0x69, 0xcc, 0xcb, 0x28, 0xf5, 0x86, - 0x34, 0x9e, 0xb4, 0x21, 0x65, 0x8a, 0x65, 0x5c, 0x70, 0x11, 0xc8, 0x5d, 0x5b, 0xa6, 0xa8, 0x00, - 0xd9, 0x8f, 0x09, 0x2d, 0x9d, 0xba, 0xe5, 0x2a, 0x5b, 0xfa, 0xf8, 0x26, 0x0d, 0x95, 0xaf, 0xad, - 0x7d, 0x0f, 0x36, 0x3e, 0x06, 0xeb, 0x9d, 0x5c, 0xe5, 0xb6, 0xa5, 0x54, 0xbc, 0xda, 0x55, 0x51, - 0x6f, 0x7a, 0xa2, 0xa9, 0x47, 0xbf, 0x1b, 0xd0, 0xad, 0xbe, 0x19, 0x18, 0xc1, 0x0b, 0xf7, 0x8d, - 0xeb, 0x07, 0x57, 0x13, 0xe2, 0x3a, 0x81, 0x8b, 0x9e, 0xe1, 0x03, 0xe8, 0x95, 0xc8, 0xf4, 0x7c, - 0xe1, 0x22, 0xa3, 0xa6, 0x38, 0xf3, 0xb9, 0xeb, 0x9f, 0x22, 0xb3, 0x46, 0x88, 0xeb, 0x3b, 0x33, - 0x17, 0xb5, 0x30, 0x86, 0x97, 0x1a, 0x99, 0xb9, 0x81, 0x73, 0xea, 0x04, 0x0e, 0x6a, 0xd7, 0xac, - 0x0b, 0x7f, 0xea, 0xf9, 0xbf, 0x20, 0xab, 0x66, 0x05, 0xe4, 0xc2, 0x9f, 0xc8, 0x74, 0x7b, 0x47, - 0x67, 0xd0, 0xad, 0x3e, 0x06, 0x32, 0xb7, 0x77, 0x15, 0x5c, 0xce, 0xdd, 0xab, 0x33, 0x6f, 0x2a, - 0xc5, 0x1c, 0x02, 0x2a, 0x81, 0x53, 0x8f, 0xb8, 0x93, 0xe0, 0x9c, 0x5c, 0x22, 0x43, 0xc6, 0x29, - 0xd1, 0xc5, 0xe5, 0x4c, 0xc5, 0x36, 0x8f, 0xfe, 0x30, 0x00, 0xef, 0xce, 0x13, 0xfe, 0x04, 0x0e, - 0xa4, 0x24, 0xcd, 0x0e, 0xbc, 0x99, 0xbb, 0x40, 0xcf, 0xf0, 0x17, 0xf0, 0x69, 0x0d, 0x12, 0x77, - 0x3e, 0xf5, 0x26, 0x4e, 0xe0, 0x9d, 0xfb, 0xc8, 0x68, 0xf2, 0xcf, 0x7f, 0xf5, 0x5d, 0x82, 0xcc, - 0x26, 0x38, 0x77, 0xc9, 0x6c, 0xa1, 0x5f, 0xb9, 0x06, 0x9d, 0xc9, 0x74, 0x81, 0xda, 0x52, 0x6e, - 0x8d, 0xbd, 0x75, 0x82, 0x80, 0x2c, 0x90, 0x75, 0xf2, 0x33, 0x7c, 0x9b, 0x15, 0xd7, 0x63, 0x9a, - 0xd3, 0x70, 0xc5, 0x1a, 0x57, 0xa4, 0xbe, 0xf5, 0x61, 0x56, 0xfe, 0x0d, 0x9c, 0xec, 0x7b, 0xfa, - 0xa7, 0x41, 0xdd, 0x17, 0xff, 0xd3, 0x30, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x80, 0xd0, 0x57, - 0x29, 0x47, 0x08, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.proto deleted file mode 100644 index 53399029582..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/inotify.proto +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * These .proto interfaces are private and stable. - * Please see http://wiki.apache.org/hadoop/Compatibility - * for what changes are allowed for a *stable* .proto interface. - */ - -// This file contains protocol buffers used to communicate edits to clients -// as part of the inotify system. - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "InotifyProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -import "acl.proto"; -import "xattr.proto"; -import "hdfs.proto"; - -enum EventType { - EVENT_CREATE = 0x0; - EVENT_CLOSE = 0x1; - EVENT_APPEND = 0x2; - EVENT_RENAME = 0x3; - EVENT_METADATA = 0x4; - EVENT_UNLINK = 0x5; - EVENT_TRUNCATE = 0x6; -} - -message EventProto { - required EventType type = 1; - required bytes contents = 2; -} - -message EventBatchProto { - required int64 txid = 1; - repeated EventProto events = 2; -} - -enum INodeType { - I_TYPE_FILE = 0x0; - I_TYPE_DIRECTORY = 0x1; - I_TYPE_SYMLINK = 0x2; -} - -enum MetadataUpdateType { - META_TYPE_TIMES = 0x0; - META_TYPE_REPLICATION = 0x1; - META_TYPE_OWNER = 0x2; - META_TYPE_PERMS = 0x3; - META_TYPE_ACLS = 0x4; - META_TYPE_XATTRS = 0x5; -} - -message CreateEventProto { - required INodeType type = 1; - required string path = 2; - required int64 ctime = 3; - required string ownerName = 4; - required string groupName = 5; - required FsPermissionProto perms = 6; - optional int32 replication = 7; - optional string symlinkTarget = 8; - optional bool overwrite = 9; - optional int64 defaultBlockSize = 10 [default=0]; -} - -message CloseEventProto { - required string path = 1; - required int64 fileSize = 2; - required int64 timestamp = 3; -} - -message TruncateEventProto { - required string path = 1; - required int64 fileSize = 2; - required int64 timestamp = 3; -} - -message AppendEventProto { - required string path = 1; - optional bool newBlock = 2 [default = false]; -} - -message RenameEventProto { - required string srcPath = 1; - required string destPath = 2; - required int64 timestamp = 3; -} - -message MetadataUpdateEventProto { - required string path = 1; - required MetadataUpdateType type = 2; - optional int64 mtime = 3; - optional int64 atime = 4; - optional int32 replication = 5; - optional string ownerName = 6; - optional string groupName = 7; - optional FsPermissionProto perms = 8; - repeated AclEntryProto acls = 9; - repeated XAttrProto xAttrs = 10; - optional bool xAttrsRemoved = 11; -} - -message UnlinkEventProto { - required string path = 1; - required int64 timestamp = 2; -} - -message EventsListProto { - repeated EventProto events = 1; // deprecated - required int64 firstTxid = 2; - required int64 lastTxid = 3; - required int64 syncTxid = 4; - repeated EventBatchProto batch = 5; -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.pb.go b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.pb.go deleted file mode 100644 index 06aa8bc17fb..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.pb.go +++ /dev/null @@ -1,323 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: xattr.proto - -package hadoop_hdfs - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type XAttrSetFlagProto int32 - -const ( - XAttrSetFlagProto_XATTR_CREATE XAttrSetFlagProto = 1 - XAttrSetFlagProto_XATTR_REPLACE XAttrSetFlagProto = 2 -) - -var XAttrSetFlagProto_name = map[int32]string{ - 1: "XATTR_CREATE", - 2: "XATTR_REPLACE", -} -var XAttrSetFlagProto_value = map[string]int32{ - "XATTR_CREATE": 1, - "XATTR_REPLACE": 2, -} - -func (x XAttrSetFlagProto) Enum() *XAttrSetFlagProto { - p := new(XAttrSetFlagProto) - *p = x - return p -} -func (x XAttrSetFlagProto) String() string { - return proto.EnumName(XAttrSetFlagProto_name, int32(x)) -} -func (x *XAttrSetFlagProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(XAttrSetFlagProto_value, data, "XAttrSetFlagProto") - if err != nil { - return err - } - *x = XAttrSetFlagProto(value) - return nil -} -func (XAttrSetFlagProto) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -type XAttrProto_XAttrNamespaceProto int32 - -const ( - XAttrProto_USER XAttrProto_XAttrNamespaceProto = 0 - XAttrProto_TRUSTED XAttrProto_XAttrNamespaceProto = 1 - XAttrProto_SECURITY XAttrProto_XAttrNamespaceProto = 2 - XAttrProto_SYSTEM XAttrProto_XAttrNamespaceProto = 3 - XAttrProto_RAW XAttrProto_XAttrNamespaceProto = 4 -) - -var XAttrProto_XAttrNamespaceProto_name = map[int32]string{ - 0: "USER", - 1: "TRUSTED", - 2: "SECURITY", - 3: "SYSTEM", - 4: "RAW", -} -var XAttrProto_XAttrNamespaceProto_value = map[string]int32{ - "USER": 0, - "TRUSTED": 1, - "SECURITY": 2, - "SYSTEM": 3, - "RAW": 4, -} - -func (x XAttrProto_XAttrNamespaceProto) Enum() *XAttrProto_XAttrNamespaceProto { - p := new(XAttrProto_XAttrNamespaceProto) - *p = x - return p -} -func (x XAttrProto_XAttrNamespaceProto) String() string { - return proto.EnumName(XAttrProto_XAttrNamespaceProto_name, int32(x)) -} -func (x *XAttrProto_XAttrNamespaceProto) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(XAttrProto_XAttrNamespaceProto_value, data, "XAttrProto_XAttrNamespaceProto") - if err != nil { - return err - } - *x = XAttrProto_XAttrNamespaceProto(value) - return nil -} -func (XAttrProto_XAttrNamespaceProto) EnumDescriptor() ([]byte, []int) { - return fileDescriptor1, []int{0, 0} -} - -type XAttrProto struct { - Namespace *XAttrProto_XAttrNamespaceProto `protobuf:"varint,1,req,name=namespace,enum=hadoop.hdfs.XAttrProto_XAttrNamespaceProto" json:"namespace,omitempty"` - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *XAttrProto) Reset() { *m = XAttrProto{} } -func (m *XAttrProto) String() string { return proto.CompactTextString(m) } -func (*XAttrProto) ProtoMessage() {} -func (*XAttrProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *XAttrProto) GetNamespace() XAttrProto_XAttrNamespaceProto { - if m != nil && m.Namespace != nil { - return *m.Namespace - } - return XAttrProto_USER -} - -func (m *XAttrProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *XAttrProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type SetXAttrRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XAttr *XAttrProto `protobuf:"bytes,2,opt,name=xAttr" json:"xAttr,omitempty"` - Flag *uint32 `protobuf:"varint,3,opt,name=flag" json:"flag,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetXAttrRequestProto) Reset() { *m = SetXAttrRequestProto{} } -func (m *SetXAttrRequestProto) String() string { return proto.CompactTextString(m) } -func (*SetXAttrRequestProto) ProtoMessage() {} -func (*SetXAttrRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -func (m *SetXAttrRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *SetXAttrRequestProto) GetXAttr() *XAttrProto { - if m != nil { - return m.XAttr - } - return nil -} - -func (m *SetXAttrRequestProto) GetFlag() uint32 { - if m != nil && m.Flag != nil { - return *m.Flag - } - return 0 -} - -type SetXAttrResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *SetXAttrResponseProto) Reset() { *m = SetXAttrResponseProto{} } -func (m *SetXAttrResponseProto) String() string { return proto.CompactTextString(m) } -func (*SetXAttrResponseProto) ProtoMessage() {} -func (*SetXAttrResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } - -type GetXAttrsRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XAttrs []*XAttrProto `protobuf:"bytes,2,rep,name=xAttrs" json:"xAttrs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetXAttrsRequestProto) Reset() { *m = GetXAttrsRequestProto{} } -func (m *GetXAttrsRequestProto) String() string { return proto.CompactTextString(m) } -func (*GetXAttrsRequestProto) ProtoMessage() {} -func (*GetXAttrsRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } - -func (m *GetXAttrsRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *GetXAttrsRequestProto) GetXAttrs() []*XAttrProto { - if m != nil { - return m.XAttrs - } - return nil -} - -type GetXAttrsResponseProto struct { - XAttrs []*XAttrProto `protobuf:"bytes,1,rep,name=xAttrs" json:"xAttrs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetXAttrsResponseProto) Reset() { *m = GetXAttrsResponseProto{} } -func (m *GetXAttrsResponseProto) String() string { return proto.CompactTextString(m) } -func (*GetXAttrsResponseProto) ProtoMessage() {} -func (*GetXAttrsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } - -func (m *GetXAttrsResponseProto) GetXAttrs() []*XAttrProto { - if m != nil { - return m.XAttrs - } - return nil -} - -type ListXAttrsRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListXAttrsRequestProto) Reset() { *m = ListXAttrsRequestProto{} } -func (m *ListXAttrsRequestProto) String() string { return proto.CompactTextString(m) } -func (*ListXAttrsRequestProto) ProtoMessage() {} -func (*ListXAttrsRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } - -func (m *ListXAttrsRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -type ListXAttrsResponseProto struct { - XAttrs []*XAttrProto `protobuf:"bytes,1,rep,name=xAttrs" json:"xAttrs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListXAttrsResponseProto) Reset() { *m = ListXAttrsResponseProto{} } -func (m *ListXAttrsResponseProto) String() string { return proto.CompactTextString(m) } -func (*ListXAttrsResponseProto) ProtoMessage() {} -func (*ListXAttrsResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } - -func (m *ListXAttrsResponseProto) GetXAttrs() []*XAttrProto { - if m != nil { - return m.XAttrs - } - return nil -} - -type RemoveXAttrRequestProto struct { - Src *string `protobuf:"bytes,1,req,name=src" json:"src,omitempty"` - XAttr *XAttrProto `protobuf:"bytes,2,opt,name=xAttr" json:"xAttr,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveXAttrRequestProto) Reset() { *m = RemoveXAttrRequestProto{} } -func (m *RemoveXAttrRequestProto) String() string { return proto.CompactTextString(m) } -func (*RemoveXAttrRequestProto) ProtoMessage() {} -func (*RemoveXAttrRequestProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } - -func (m *RemoveXAttrRequestProto) GetSrc() string { - if m != nil && m.Src != nil { - return *m.Src - } - return "" -} - -func (m *RemoveXAttrRequestProto) GetXAttr() *XAttrProto { - if m != nil { - return m.XAttr - } - return nil -} - -type RemoveXAttrResponseProto struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *RemoveXAttrResponseProto) Reset() { *m = RemoveXAttrResponseProto{} } -func (m *RemoveXAttrResponseProto) String() string { return proto.CompactTextString(m) } -func (*RemoveXAttrResponseProto) ProtoMessage() {} -func (*RemoveXAttrResponseProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } - -func init() { - proto.RegisterType((*XAttrProto)(nil), "hadoop.hdfs.XAttrProto") - proto.RegisterType((*SetXAttrRequestProto)(nil), "hadoop.hdfs.SetXAttrRequestProto") - proto.RegisterType((*SetXAttrResponseProto)(nil), "hadoop.hdfs.SetXAttrResponseProto") - proto.RegisterType((*GetXAttrsRequestProto)(nil), "hadoop.hdfs.GetXAttrsRequestProto") - proto.RegisterType((*GetXAttrsResponseProto)(nil), "hadoop.hdfs.GetXAttrsResponseProto") - proto.RegisterType((*ListXAttrsRequestProto)(nil), "hadoop.hdfs.ListXAttrsRequestProto") - proto.RegisterType((*ListXAttrsResponseProto)(nil), "hadoop.hdfs.ListXAttrsResponseProto") - proto.RegisterType((*RemoveXAttrRequestProto)(nil), "hadoop.hdfs.RemoveXAttrRequestProto") - proto.RegisterType((*RemoveXAttrResponseProto)(nil), "hadoop.hdfs.RemoveXAttrResponseProto") - proto.RegisterEnum("hadoop.hdfs.XAttrSetFlagProto", XAttrSetFlagProto_name, XAttrSetFlagProto_value) - proto.RegisterEnum("hadoop.hdfs.XAttrProto_XAttrNamespaceProto", XAttrProto_XAttrNamespaceProto_name, XAttrProto_XAttrNamespaceProto_value) -} - -func init() { proto.RegisterFile("xattr.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 408 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xdf, 0x6e, 0xd3, 0x30, - 0x18, 0xc5, 0x71, 0xd2, 0xfd, 0xe9, 0x97, 0x0e, 0x79, 0x66, 0x5b, 0x22, 0xae, 0x22, 0x4b, 0x48, - 0xd1, 0x10, 0x41, 0xda, 0x0d, 0xdc, 0x66, 0xc5, 0xa0, 0xa2, 0x01, 0x93, 0x93, 0x8a, 0x6d, 0x37, - 0xc8, 0xca, 0xbc, 0x16, 0x91, 0xd5, 0x21, 0x76, 0xab, 0x3e, 0x0e, 0xcf, 0xc4, 0x13, 0xa1, 0x38, - 0xad, 0x92, 0x4a, 0xfc, 0xa9, 0xc4, 0xee, 0x8e, 0x3f, 0x9f, 0x9c, 0xf3, 0xfb, 0x14, 0x19, 0xbc, - 0xa5, 0x30, 0xa6, 0x8a, 0xcb, 0x4a, 0x19, 0x45, 0xbc, 0xa9, 0xb8, 0x55, 0xaa, 0x8c, 0xa7, 0xb7, - 0x77, 0x9a, 0xfe, 0x44, 0x00, 0x57, 0x89, 0x31, 0xd5, 0xa5, 0xbd, 0x1b, 0x41, 0x7f, 0x26, 0xee, - 0xa5, 0x2e, 0x45, 0x2e, 0x03, 0x14, 0x3a, 0xd1, 0xe3, 0xb3, 0xe7, 0x71, 0xc7, 0x1f, 0xb7, 0xde, - 0x46, 0x7e, 0x5c, 0xbb, 0xed, 0x8c, 0xb7, 0x5f, 0x13, 0x02, 0xbd, 0xfa, 0x10, 0x38, 0xa1, 0x13, - 0xf5, 0xb9, 0xd5, 0xe4, 0x08, 0x76, 0x16, 0xa2, 0x98, 0xcb, 0xc0, 0x0d, 0x51, 0x34, 0xe0, 0xcd, - 0x81, 0x7e, 0x82, 0x27, 0xbf, 0xc9, 0x22, 0xfb, 0xd0, 0x1b, 0xa7, 0x8c, 0xe3, 0x47, 0xc4, 0x83, - 0xbd, 0x8c, 0x8f, 0xd3, 0x8c, 0xbd, 0xc1, 0x88, 0x0c, 0x60, 0x3f, 0x65, 0xc3, 0x31, 0x1f, 0x65, - 0xd7, 0xd8, 0x21, 0x00, 0xbb, 0xe9, 0x75, 0x9a, 0xb1, 0x0f, 0xd8, 0x25, 0x7b, 0xe0, 0xf2, 0xe4, - 0x33, 0xee, 0xd1, 0x6f, 0x70, 0x94, 0x4a, 0x63, 0x33, 0xb9, 0xfc, 0x3e, 0x97, 0xda, 0x34, 0x89, - 0x18, 0x5c, 0x5d, 0xe5, 0x76, 0xaf, 0x3e, 0xaf, 0x25, 0x79, 0x01, 0x3b, 0xcb, 0xda, 0x16, 0x38, - 0x21, 0x8a, 0xbc, 0x33, 0xff, 0x0f, 0xbb, 0xf2, 0xc6, 0x55, 0xef, 0x74, 0x57, 0x88, 0x89, 0xc5, - 0x3f, 0xe0, 0x56, 0x53, 0x1f, 0x8e, 0xdb, 0x32, 0x5d, 0xaa, 0x99, 0x6e, 0xf8, 0xe9, 0x0d, 0x1c, - 0xbf, 0x5b, 0x5d, 0xe8, 0x7f, 0x60, 0xbc, 0x84, 0x5d, 0x5b, 0xa0, 0x03, 0x27, 0x74, 0xff, 0xc6, - 0xb1, 0xb2, 0xd1, 0x11, 0x9c, 0x74, 0xb2, 0x3b, 0xad, 0x9d, 0x28, 0xb4, 0x5d, 0xd4, 0x29, 0x9c, - 0x5c, 0x7c, 0xd5, 0x5b, 0x71, 0xd2, 0xf7, 0xe0, 0x77, 0xbd, 0xff, 0xd5, 0x7b, 0x03, 0x3e, 0x97, - 0xf7, 0x6a, 0x21, 0x1f, 0xfe, 0x3f, 0xd1, 0xa7, 0x10, 0x6c, 0x64, 0x77, 0x40, 0x4f, 0x5f, 0xc3, - 0xa1, 0x9d, 0xa6, 0xd2, 0xbc, 0x2d, 0xc4, 0x64, 0xdd, 0x38, 0xb8, 0x4a, 0xb2, 0x8c, 0x7f, 0x19, - 0x72, 0x96, 0x64, 0x0c, 0x23, 0x72, 0x08, 0x07, 0xcd, 0x84, 0xb3, 0xcb, 0x8b, 0x64, 0xc8, 0xb0, - 0x73, 0xfe, 0x0a, 0x9e, 0xa9, 0x6a, 0x12, 0x8b, 0x52, 0xe4, 0x53, 0xb9, 0x41, 0x60, 0x1f, 0x56, - 0xae, 0x8a, 0x46, 0x9c, 0x7b, 0x2d, 0x91, 0xfe, 0x81, 0xd0, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x56, 0x91, 0x13, 0x1d, 0x7f, 0x03, 0x00, 0x00, -} diff --git a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.proto b/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.proto deleted file mode 100644 index 6c8b5eb5943..00000000000 --- a/vendor/github.com/colinmarc/hdfs/protocol/hadoop_hdfs/xattr.proto +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -option java_package = "org.apache.hadoop.hdfs.protocol.proto"; -option java_outer_classname = "XAttrProtos"; -option java_generate_equals_and_hash = true; -package hadoop.hdfs; - -message XAttrProto { - enum XAttrNamespaceProto { - USER = 0; - TRUSTED = 1; - SECURITY = 2; - SYSTEM = 3; - RAW = 4; - } - - required XAttrNamespaceProto namespace = 1; - required string name = 2; - optional bytes value = 3; -} - -enum XAttrSetFlagProto { - XATTR_CREATE = 0x01; - XATTR_REPLACE = 0x02; -} - -message SetXAttrRequestProto { - required string src = 1; - optional XAttrProto xAttr = 2; - optional uint32 flag = 3; //bits set using XAttrSetFlagProto -} - -message SetXAttrResponseProto { -} - -message GetXAttrsRequestProto { - required string src = 1; - repeated XAttrProto xAttrs = 2; -} - -message GetXAttrsResponseProto { - repeated XAttrProto xAttrs = 1; -} - -message ListXAttrsRequestProto { - required string src = 1; -} - -message ListXAttrsResponseProto { - repeated XAttrProto xAttrs = 1; -} - -message RemoveXAttrRequestProto { - required string src = 1; - optional XAttrProto xAttr = 2; -} - -message RemoveXAttrResponseProto { -} diff --git a/vendor/github.com/colinmarc/hdfs/readdir.go b/vendor/github.com/colinmarc/hdfs/readdir.go deleted file mode 100644 index 0f7c78e1427..00000000000 --- a/vendor/github.com/colinmarc/hdfs/readdir.go +++ /dev/null @@ -1,17 +0,0 @@ -package hdfs - -import "os" - -// ReadDir reads the directory named by dirname and returns a list of sorted -// directory entries. -// -// The os.FileInfo values returned will not have block location attached to -// the struct returned by Sys(). -func (c *Client) ReadDir(dirname string) ([]os.FileInfo, error) { - f, err := c.Open(dirname) - if err != nil { - return nil, err - } - - return f.Readdir(0) -} diff --git a/vendor/github.com/colinmarc/hdfs/remove.go b/vendor/github.com/colinmarc/hdfs/remove.go deleted file mode 100644 index c6f87b3ff6c..00000000000 --- a/vendor/github.com/colinmarc/hdfs/remove.go +++ /dev/null @@ -1,41 +0,0 @@ -package hdfs - -import ( - "errors" - "os" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// Remove removes the named file or directory. -func (c *Client) Remove(name string) error { - _, err := c.getFileInfo(name) - if err != nil { - return &os.PathError{"remove", name, err} - } - - req := &hdfs.DeleteRequestProto{ - Src: proto.String(name), - Recursive: proto.Bool(true), - } - resp := &hdfs.DeleteResponseProto{} - - err = c.namenode.Execute("delete", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return &os.PathError{"remove", name, err} - } else if resp.Result == nil { - return &os.PathError{ - "remove", - name, - errors.New("unexpected empty response"), - } - } - - return nil -} diff --git a/vendor/github.com/colinmarc/hdfs/rename.go b/vendor/github.com/colinmarc/hdfs/rename.go deleted file mode 100644 index eb42b4047c9..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rename.go +++ /dev/null @@ -1,35 +0,0 @@ -package hdfs - -import ( - "os" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// Rename renames (moves) a file. -func (c *Client) Rename(oldpath, newpath string) error { - _, err := c.getFileInfo(newpath) - if err != nil && !os.IsNotExist(err) { - return &os.PathError{"rename", newpath, err} - } - - req := &hdfs.Rename2RequestProto{ - Src: proto.String(oldpath), - Dst: proto.String(newpath), - OverwriteDest: proto.Bool(true), - } - resp := &hdfs.Rename2ResponseProto{} - - err = c.namenode.Execute("rename2", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return &os.PathError{"rename", oldpath, err} - } - - return nil -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/block_read_stream.go b/vendor/github.com/colinmarc/hdfs/rpc/block_read_stream.go deleted file mode 100644 index 26ccc4ee994..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/block_read_stream.go +++ /dev/null @@ -1,183 +0,0 @@ -package rpc - -import ( - "bytes" - "encoding/binary" - "errors" - "hash/crc32" - "io" - "math" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/golang/protobuf/proto" -) - -var errInvalidChecksum = errors.New("invalid checksum") - -// blockReadStream implements io.Reader for reading a packet stream for a single -// block from a single datanode. -type blockReadStream struct { - reader io.Reader - checksumTab *crc32.Table - chunkSize int - - checksums bytes.Buffer - chunk bytes.Buffer - - packetLength int - chunkIndex int - numChunks int - lastPacket bool -} - -func newBlockReadStream(reader io.Reader, chunkSize int, checksumTab *crc32.Table) *blockReadStream { - return &blockReadStream{ - reader: reader, - chunkSize: chunkSize, - checksumTab: checksumTab, - } -} - -func (s *blockReadStream) Read(b []byte) (int, error) { - if s.chunkIndex == s.numChunks { - if s.lastPacket { - return 0, io.EOF - } - - err := s.startPacket() - if err != nil { - return 0, err - } - } - - remainingInPacket := (s.packetLength - (s.chunkIndex * s.chunkSize)) - - // For small reads, we need to buffer a single chunk. If we did that - // previously, read the rest of the buffer, so we're aligned back on a - // chunk boundary. - if s.chunk.Len() > 0 { - n, _ := s.chunk.Read(b) - return n, nil - } else if len(b) < s.chunkSize { - chunkSize := s.chunkSize - if chunkSize > remainingInPacket { - chunkSize = remainingInPacket - } - - _, err := io.CopyN(&s.chunk, s.reader, int64(chunkSize)) - if err != nil { - return 0, err - } - - err = s.validateChecksum(s.chunk.Bytes()) - if err != nil { - return 0, nil - } - - s.chunkIndex++ - n, _ := s.chunk.Read(b) - return n, nil - } - - // Always align reads to a chunk boundary. This makes the code much simpler, - // and with readers that pick sane read sizes (like io.Copy), should be - // efficient. - var amountToRead int - var chunksToRead int - if len(b) > remainingInPacket { - chunksToRead = s.numChunks - s.chunkIndex - amountToRead = remainingInPacket - } else { - chunksToRead = len(b) / s.chunkSize - amountToRead = chunksToRead * s.chunkSize - } - - n, err := io.ReadFull(s.reader, b[:amountToRead]) - if err != nil { - return n, err - } - - // Validate the bytes we just read into b against the packet checksums. - for i := 0; i < chunksToRead; i++ { - chunkOff := i * s.chunkSize - chunkEnd := chunkOff + s.chunkSize - if chunkEnd >= len(b) { - chunkEnd = len(b) - } - - err := s.validateChecksum(b[chunkOff:chunkEnd]) - if err != nil { - return n, err - } - - s.chunkIndex++ - } - - // EOF would be returned by the next call to Read anyway, but it's nice to - // return it here. - if s.chunkIndex == s.numChunks && s.lastPacket { - err = io.EOF - } - - return n, err -} - -func (s *blockReadStream) validateChecksum(b []byte) error { - checksumOffset := 4 * s.chunkIndex - checksumBytes := s.checksums.Bytes()[checksumOffset : checksumOffset+4] - checksum := binary.BigEndian.Uint32(checksumBytes) - - crc := crc32.Checksum(b, s.checksumTab) - if crc != checksum { - return errInvalidChecksum - } - - return nil -} - -func (s *blockReadStream) startPacket() error { - header, err := s.readPacketHeader() - if err != nil { - return err - } - - dataLength := int(header.GetDataLen()) - numChunks := int(math.Ceil(float64(dataLength) / float64(s.chunkSize))) - - // TODO don't assume checksum size is 4 - checksumsLength := numChunks * 4 - s.checksums.Reset() - s.checksums.Grow(checksumsLength) - _, err = io.CopyN(&s.checksums, s.reader, int64(checksumsLength)) - if err != nil { - return err - } - - s.packetLength = dataLength - s.numChunks = numChunks - s.chunkIndex = 0 - s.lastPacket = header.GetLastPacketInBlock() - - return nil -} - -func (s *blockReadStream) readPacketHeader() (*hdfs.PacketHeaderProto, error) { - lengthBytes := make([]byte, 6) - _, err := io.ReadFull(s.reader, lengthBytes) - if err != nil { - return nil, err - } - - // We don't actually care about the total length. - packetHeaderLength := binary.BigEndian.Uint16(lengthBytes[4:]) - packetHeaderBytes := make([]byte, packetHeaderLength) - _, err = io.ReadFull(s.reader, packetHeaderBytes) - if err != nil { - return nil, err - } - - packetHeader := &hdfs.PacketHeaderProto{} - err = proto.Unmarshal(packetHeaderBytes, packetHeader) - - return packetHeader, nil -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/block_reader.go b/vendor/github.com/colinmarc/hdfs/rpc/block_reader.go deleted file mode 100644 index 70f66579a62..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/block_reader.go +++ /dev/null @@ -1,233 +0,0 @@ -package rpc - -import ( - "context" - "errors" - "fmt" - "hash/crc32" - "io" - "io/ioutil" - "net" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/golang/protobuf/proto" -) - -// BlockReader implements io.ReadCloser, for reading a block. It abstracts over -// reading from multiple datanodes, in order to be robust to connection -// failures, timeouts, and other shenanigans. -type BlockReader struct { - // ClientName is the unique ID used by the NamenodeConnection to locate the - // block. - ClientName string - // Block is the block location provided by the namenode. - Block *hdfs.LocatedBlockProto - // Offset is the current read offset in the block. - Offset int64 - // UseDatanodeHostname specifies whether the datanodes should be connected to - // via their hostnames (if true) or IP addresses (if false). - UseDatanodeHostname bool - // DialFunc is used to connect to the datanodes. If nil, then - // (&net.Dialer{}).DialContext is used. - DialFunc func(ctx context.Context, network, addr string) (net.Conn, error) - - datanodes *datanodeFailover - stream *blockReadStream - conn net.Conn - deadline time.Time - closed bool -} - -// NewBlockReader returns a new BlockReader, given the block information and -// security token from the namenode. It will connect (lazily) to one of the -// provided datanode locations based on which datanodes have seen failures. -// -// Deprecated: this method does not do any required initialization, and does -// not allow you to set fields such as UseDatanodeHostname. -func NewBlockReader(block *hdfs.LocatedBlockProto, offset int64, clientName string) *BlockReader { - return &BlockReader{ - ClientName: clientName, - Block: block, - Offset: offset, - } -} - -// SetDeadline sets the deadline for future Read calls. A zero value for t -// means Read will not time out. -func (br *BlockReader) SetDeadline(t time.Time) error { - br.deadline = t - if br.conn != nil { - return br.conn.SetDeadline(t) - } - - // Return the error at connection time. - return nil -} - -// Read implements io.Reader. -// -// In the case that a failure (such as a disconnect) occurs while reading, the -// BlockReader will failover to another datanode and continue reading -// transparently. In the case that all the datanodes fail, the error -// from the most recent attempt will be returned. -// -// Any datanode failures are recorded in a global cache, so subsequent reads, -// even reads for different blocks, will prioritize them lower. -func (br *BlockReader) Read(b []byte) (int, error) { - if br.closed { - return 0, io.ErrClosedPipe - } else if uint64(br.Offset) >= br.Block.GetB().GetNumBytes() { - br.Close() - return 0, io.EOF - } - - if br.datanodes == nil { - locs := br.Block.GetLocs() - datanodes := make([]string, len(locs)) - for i, loc := range locs { - datanodes[i] = getDatanodeAddress(loc.GetId(), br.UseDatanodeHostname) - } - - br.datanodes = newDatanodeFailover(datanodes) - } - - // This is the main retry loop. - for br.stream != nil || br.datanodes.numRemaining() > 0 { - // First, we try to connect. If this fails, we can just skip the datanode - // and continue. - if br.stream == nil { - err := br.connectNext() - if err != nil { - br.datanodes.recordFailure(err) - continue - } - } - - // Then, try to read. If we fail here after reading some bytes, we return - // a partial read (n < len(b)). - n, err := br.stream.Read(b) - br.Offset += int64(n) - if err != nil && err != io.EOF { - br.stream = nil - br.datanodes.recordFailure(err) - if n > 0 { - return n, nil - } - - continue - } - - return n, err - } - - err := br.datanodes.lastError() - if err == nil { - err = errors.New("no available datanodes") - } - - return 0, err -} - -// Close implements io.Closer. -func (br *BlockReader) Close() error { - br.closed = true - if br.conn != nil { - br.conn.Close() - } - - return nil -} - -// connectNext pops a datanode from the list based on previous failures, and -// connects to it. -func (br *BlockReader) connectNext() error { - address := br.datanodes.next() - - if br.DialFunc == nil { - br.DialFunc = (&net.Dialer{}).DialContext - } - - conn, err := br.DialFunc(context.Background(), "tcp", address) - if err != nil { - return err - } - - err = br.writeBlockReadRequest(conn) - if err != nil { - return err - } - - resp, err := readBlockOpResponse(conn) - if err != nil { - return err - } else if resp.GetStatus() != hdfs.Status_SUCCESS { - return fmt.Errorf("read failed: %s (%s)", resp.GetStatus().String(), resp.GetMessage()) - } - - readInfo := resp.GetReadOpChecksumInfo() - checksumInfo := readInfo.GetChecksum() - - var checksumTab *crc32.Table - checksumType := checksumInfo.GetType() - switch checksumType { - case hdfs.ChecksumTypeProto_CHECKSUM_CRC32: - checksumTab = crc32.IEEETable - case hdfs.ChecksumTypeProto_CHECKSUM_CRC32C: - checksumTab = crc32.MakeTable(crc32.Castagnoli) - default: - return fmt.Errorf("unsupported checksum type: %d", checksumType) - } - - chunkSize := int(checksumInfo.GetBytesPerChecksum()) - stream := newBlockReadStream(conn, chunkSize, checksumTab) - - // The read will start aligned to a chunk boundary, so we need to seek forward - // to the requested offset. - amountToDiscard := br.Offset - int64(readInfo.GetChunkOffset()) - if amountToDiscard > 0 { - _, err := io.CopyN(ioutil.Discard, stream, amountToDiscard) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - - conn.Close() - return err - } - } - - br.stream = stream - br.conn = conn - err = br.conn.SetDeadline(br.deadline) - if err != nil { - return err - } - - return nil -} - -// A read request to a datanode: -// +-----------------------------------------------------------+ -// | Data Transfer Protocol Version, int16 | -// +-----------------------------------------------------------+ -// | Op code, 1 byte (READ_BLOCK = 0x51) | -// +-----------------------------------------------------------+ -// | varint length + OpReadBlockProto | -// +-----------------------------------------------------------+ -func (br *BlockReader) writeBlockReadRequest(w io.Writer) error { - needed := br.Block.GetB().GetNumBytes() - uint64(br.Offset) - op := &hdfs.OpReadBlockProto{ - Header: &hdfs.ClientOperationHeaderProto{ - BaseHeader: &hdfs.BaseHeaderProto{ - Block: br.Block.GetB(), - Token: br.Block.GetBlockToken(), - }, - ClientName: proto.String(br.ClientName), - }, - Offset: proto.Uint64(uint64(br.Offset)), - Len: proto.Uint64(needed), - } - - return writeBlockOpRequest(w, readBlockOp, op) -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/block_write_stream.go b/vendor/github.com/colinmarc/hdfs/rpc/block_write_stream.go deleted file mode 100644 index 20ae16c1e3e..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/block_write_stream.go +++ /dev/null @@ -1,317 +0,0 @@ -package rpc - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "math" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/golang/protobuf/proto" -) - -const ( - outboundPacketSize = 65536 - outboundChunkSize = 512 - maxPacketsInQueue = 5 -) - -// blockWriteStream writes data out to a datanode, and reads acks back. -type blockWriteStream struct { - block *hdfs.LocatedBlockProto - - conn io.ReadWriter - buf bytes.Buffer - offset int64 - closed bool - - packets chan outboundPacket - seqno int - - ackError error - acksDone chan struct{} - lastPacketSeqno int -} - -type outboundPacket struct { - seqno int - offset int64 - last bool - checksums []byte - data []byte -} - -type ackError struct { - pipelineIndex int - seqno int - status hdfs.Status -} - -func (ae ackError) Error() string { - return fmt.Sprintf("Ack error from datanode: %s", ae.status.String()) -} - -var ErrInvalidSeqno = errors.New("invalid ack sequence number") - -func newBlockWriteStream(conn io.ReadWriter, offset int64) *blockWriteStream { - s := &blockWriteStream{ - conn: conn, - offset: offset, - seqno: 1, - packets: make(chan outboundPacket, maxPacketsInQueue), - acksDone: make(chan struct{}), - } - - // Ack packets in the background. - go func() { - s.ackPackets() - close(s.acksDone) - }() - - return s -} - -// func newBlockWriteStreamForRecovery(conn io.ReadWriter, oldWriteStream *blockWriteStream) { -// s := &blockWriteStream{ -// conn: conn, -// buf: oldWriteStream.buf, -// packets: oldWriteStream.packets, -// offset: oldWriteStream.offset, -// seqno: oldWriteStream.seqno, -// packets -// } - -// go s.ackPackets() -// return s -// } - -func (s *blockWriteStream) Write(b []byte) (int, error) { - if s.closed { - return 0, io.ErrClosedPipe - } - - if err := s.getAckError(); err != nil { - return 0, err - } - - n, _ := s.buf.Write(b) - err := s.flush(false) - return n, err -} - -// finish flushes the rest of the buffered bytes, and then sends a final empty -// packet signifying the end of the block. -func (s *blockWriteStream) finish() error { - if s.closed { - return nil - } - s.closed = true - - if err := s.getAckError(); err != nil { - return err - } - - err := s.flush(true) - if err != nil { - return err - } - - // The last packet has no data; it's just a marker that the block is finished. - lastPacket := outboundPacket{ - seqno: s.seqno, - offset: s.offset, - last: true, - checksums: []byte{}, - data: []byte{}, - } - s.packets <- lastPacket - err = s.writePacket(lastPacket) - if err != nil { - return err - } - close(s.packets) - - // Wait for the ack loop to finish. - <-s.acksDone - - // Check one more time for any ack errors. - if err := s.getAckError(); err != nil { - return err - } - - return nil -} - -// flush parcels out the buffered bytes into packets, which it then flushes to -// the datanode. We keep around a reference to the packet, in case the ack -// fails, and we need to send it again later. -func (s *blockWriteStream) flush(force bool) error { - if err := s.getAckError(); err != nil { - return err - } - - for s.buf.Len() > 0 && (force || s.buf.Len() >= outboundPacketSize) { - packet := s.makePacket() - s.packets <- packet - s.offset += int64(len(packet.data)) - s.seqno++ - - err := s.writePacket(packet) - if err != nil { - return err - } - } - - return nil -} - -func (s *blockWriteStream) makePacket() outboundPacket { - packetLength := outboundPacketSize - if s.buf.Len() < outboundPacketSize { - packetLength = s.buf.Len() - } - - // If we're starting from a weird offset (usually because of an Append), HDFS - // gets unhappy unless we first align to a chunk boundary with a small packet. - // Otherwise it yells at us with "a partial chunk must be sent in an - // individual packet" or just complains about a corrupted block. - alignment := int(s.offset) % outboundChunkSize - if alignment > 0 && packetLength > (outboundChunkSize-alignment) { - packetLength = outboundChunkSize - alignment - } - - numChunks := int(math.Ceil(float64(packetLength) / float64(outboundChunkSize))) - packet := outboundPacket{ - seqno: s.seqno, - offset: s.offset, - last: false, - checksums: make([]byte, numChunks*4), - data: make([]byte, packetLength), - } - - // TODO: we shouldn't actually need this extra copy. We should also be able - // to "reuse" packets. - io.ReadFull(&s.buf, packet.data) - - // Fill in the checksum for each chunk of data. - for i := 0; i < numChunks; i++ { - chunkOff := i * outboundChunkSize - chunkEnd := chunkOff + outboundChunkSize - if chunkEnd >= len(packet.data) { - chunkEnd = len(packet.data) - } - - checksum := crc32.Checksum(packet.data[chunkOff:chunkEnd], crc32.IEEETable) - binary.BigEndian.PutUint32(packet.checksums[i*4:], checksum) - } - - return packet -} - -// ackPackets is meant to run in the background, reading acks and setting -// ackError if one fails. -func (s *blockWriteStream) ackPackets() { - reader := bufio.NewReader(s.conn) - - for { - p, ok := <-s.packets - if !ok { - // All packets all acked. - return - } - - // If we fail to read the ack at all, that counts as a failure from the - // first datanode (the one we're connected to). - ack := &hdfs.PipelineAckProto{} - err := readPrefixedMessage(reader, ack) - if err != nil { - s.ackError = err - break - } - - seqno := int(ack.GetSeqno()) - for i, status := range ack.GetReply() { - if status != hdfs.Status_SUCCESS { - s.ackError = ackError{status: status, seqno: seqno, pipelineIndex: i} - break - } - } - - if seqno != p.seqno { - s.ackError = ErrInvalidSeqno - break - } - } - - // Once we've seen an error, just keep reading packets off the channel (but - // not off the socket) until the writing thread figures it out. If we don't, - // the upstream thread could deadlock waiting for the channel to have space. - for _ = range s.packets { - } -} - -func (s *blockWriteStream) getAckError() error { - select { - case <-s.acksDone: - if s.ackError != nil { - return s.ackError - } - default: - } - - return nil -} - -// A packet for the datanode: -// +-----------------------------------------------------------+ -// | uint32 length of the packet | -// +-----------------------------------------------------------+ -// | size of the PacketHeaderProto, uint16 | -// +-----------------------------------------------------------+ -// | PacketHeaderProto | -// +-----------------------------------------------------------+ -// | N checksums, 4 bytes each | -// +-----------------------------------------------------------+ -// | N chunks of payload data | -// +-----------------------------------------------------------+ -func (s *blockWriteStream) writePacket(p outboundPacket) error { - headerInfo := &hdfs.PacketHeaderProto{ - OffsetInBlock: proto.Int64(p.offset), - Seqno: proto.Int64(int64(p.seqno)), - LastPacketInBlock: proto.Bool(p.last), - DataLen: proto.Int32(int32(len(p.data))), - } - - header := make([]byte, 6) - infoBytes, err := proto.Marshal(headerInfo) - if err != nil { - return err - } - - // Don't ask me why this doesn't include the header proto... - totalLength := len(p.data) + len(p.checksums) + 4 - binary.BigEndian.PutUint32(header, uint32(totalLength)) - binary.BigEndian.PutUint16(header[4:], uint16(len(infoBytes))) - header = append(header, infoBytes...) - - _, err = s.conn.Write(header) - if err != nil { - return err - } - - _, err = s.conn.Write(p.checksums) - if err != nil { - return err - } - - _, err = s.conn.Write(p.data) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/block_writer.go b/vendor/github.com/colinmarc/hdfs/rpc/block_writer.go deleted file mode 100644 index 18fbb1a3e7b..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/block_writer.go +++ /dev/null @@ -1,241 +0,0 @@ -package rpc - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/golang/protobuf/proto" -) - -var ErrEndOfBlock = errors.New("end of block") - -// BlockWriter implements io.WriteCloser for writing a block to a datanode. -// Given a block location, it handles pipeline construction and failures, -// including communicating with the namenode if need be. -type BlockWriter struct { - // ClientName is the unique ID used by the NamenodeConnection to initialize - // the block. - ClientName string - // Block is the block location provided by the namenode. - Block *hdfs.LocatedBlockProto - // BlockSize is the target size of the new block (or the existing one, if - // appending). The represents the configured value, not the actual number - // of bytes currently in the block. - BlockSize int64 - // Offset is the current write offset in the block. - Offset int64 - // Append indicates whether this is an append operation on an existing block. - Append bool - // UseDatanodeHostname indicates whether the datanodes will be connected to - // via hostname (if true) or IP address (if false). - UseDatanodeHostname bool - // DialFunc is used to connect to the datanodes. If nil, then - // (&net.Dialer{}).DialContext is used. - DialFunc func(ctx context.Context, network, addr string) (net.Conn, error) - - conn net.Conn - deadline time.Time - stream *blockWriteStream - closed bool -} - -// NewBlockWriter returns a BlockWriter for the given block. It will lazily -// set up a replication pipeline, and connect to the "best" datanode based on -// any previously seen failures. -// -// Deprecated: this method does not do any required initialization, and does -// not allow you to set fields such as UseDatanodeHostname. -func NewBlockWriter(block *hdfs.LocatedBlockProto, namenode *NamenodeConnection, blockSize int64) *BlockWriter { - return &BlockWriter{ - ClientName: namenode.ClientName(), - Block: block, - BlockSize: blockSize, - } -} - -// SetDeadline sets the deadline for future Write, Flush, and Close calls. A -// zero value for t means those calls will not time out. -func (bw *BlockWriter) SetDeadline(t time.Time) error { - bw.deadline = t - if bw.conn != nil { - return bw.conn.SetDeadline(t) - } - - // Return the error at connection time. - return nil -} - -// Write implements io.Writer. -// -// Unlike BlockReader, BlockWriter currently has no ability to recover from -// write failures (timeouts, datanode failure, etc). Once it returns an error -// from Write or Close, it may be in an invalid state. -// -// This will hopefully be fixed in a future release. -func (bw *BlockWriter) Write(b []byte) (int, error) { - var blockFull bool - if bw.Offset >= bw.BlockSize { - return 0, ErrEndOfBlock - } else if (bw.Offset + int64(len(b))) > bw.BlockSize { - blockFull = true - b = b[:bw.BlockSize-bw.Offset] - } - - if bw.stream == nil { - err := bw.connectNext() - // TODO: handle failures, set up recovery pipeline - if err != nil { - return 0, err - } - } - - // TODO: handle failures, set up recovery pipeline - n, err := bw.stream.Write(b) - bw.Offset += int64(n) - if err == nil && blockFull { - err = ErrEndOfBlock - } - - return n, err -} - -// Flush flushes any unwritten packets out to the datanode. -func (bw *BlockWriter) Flush() error { - if bw.stream != nil { - return bw.stream.flush(true) - } - - return nil -} - -// Close implements io.Closer. It flushes any unwritten packets out to the -// datanode, and sends a final packet indicating the end of the block. The -// block must still be finalized with the namenode. -func (bw *BlockWriter) Close() error { - bw.closed = true - if bw.conn != nil { - defer bw.conn.Close() - } - - if bw.stream != nil { - // TODO: handle failures, set up recovery pipeline - err := bw.stream.finish() - if err != nil { - return err - } - } - - return nil -} - -func (bw *BlockWriter) connectNext() error { - address := getDatanodeAddress(bw.currentPipeline()[0].GetId(), bw.UseDatanodeHostname) - - if bw.DialFunc == nil { - bw.DialFunc = (&net.Dialer{}).DialContext - } - - conn, err := bw.DialFunc(context.Background(), "tcp", address) - if err != nil { - return err - } - - err = conn.SetDeadline(bw.deadline) - if err != nil { - return err - } - - err = bw.writeBlockWriteRequest(conn) - if err != nil { - return err - } - - resp, err := readBlockOpResponse(conn) - if err != nil { - return err - } else if resp.GetStatus() != hdfs.Status_SUCCESS { - return fmt.Errorf("write failed: %s (%s)", resp.GetStatus().String(), resp.GetMessage()) - } - - bw.conn = conn - bw.stream = newBlockWriteStream(conn, bw.Offset) - return nil -} - -func (bw *BlockWriter) currentPipeline() []*hdfs.DatanodeInfoProto { - // TODO: we need to be able to reconfigure the pipeline when a node fails. - // - // targets := make([]*hdfs.DatanodeInfoProto, 0, len(br.pipeline)) - // for _, loc := range s.block.GetLocs() { - // addr := getDatanodeAddress(loc) - // for _, pipelineAddr := range br.pipeline { - // if ipAddr == addr { - // append(targets, loc) - // } - // } - // } - // - // return targets - - return bw.Block.GetLocs() -} - -func (bw *BlockWriter) currentStage() hdfs.OpWriteBlockProto_BlockConstructionStage { - // TODO: this should be PIPELINE_SETUP_STREAMING_RECOVERY or - // PIPELINE_SETUP_APPEND_RECOVERY for recovery. - if bw.Append { - return hdfs.OpWriteBlockProto_PIPELINE_SETUP_APPEND - } - - return hdfs.OpWriteBlockProto_PIPELINE_SETUP_CREATE -} - -func (bw *BlockWriter) generationTimestamp() int64 { - if bw.Append { - return int64(bw.Block.B.GetGenerationStamp()) - } - - return 0 -} - -// writeBlockWriteRequest creates an OpWriteBlock message and submits it to the -// datanode. This occurs before any writing actually occurs, and is intended -// to synchronize the client with the datanode, returning an error if the -// submitted expected state differs from the actual state on the datanode. -// -// The field "MinBytesRcvd" below is used during append operation and should be -// the block's expected size. The field "MaxBytesRcvd" is used only in the case -// of PIPELINE_SETUP_STREAMING_RECOVERY. -// -// See: https://github.com/apache/hadoop/blob/6314843881b4c67d08215e60293f8b33242b9416/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java#L216 -// And: https://github.com/apache/hadoop/blob/6314843881b4c67d08215e60293f8b33242b9416/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java#L1462 -func (bw *BlockWriter) writeBlockWriteRequest(w io.Writer) error { - targets := bw.currentPipeline()[1:] - - op := &hdfs.OpWriteBlockProto{ - Header: &hdfs.ClientOperationHeaderProto{ - BaseHeader: &hdfs.BaseHeaderProto{ - Block: bw.Block.GetB(), - Token: bw.Block.GetBlockToken(), - }, - ClientName: proto.String(bw.ClientName), - }, - Targets: targets, - Stage: bw.currentStage().Enum(), - PipelineSize: proto.Uint32(uint32(len(targets))), - MinBytesRcvd: proto.Uint64(bw.Block.GetB().GetNumBytes()), - MaxBytesRcvd: proto.Uint64(uint64(bw.Offset)), - LatestGenerationStamp: proto.Uint64(uint64(bw.generationTimestamp())), - RequestedChecksum: &hdfs.ChecksumProto{ - Type: hdfs.ChecksumTypeProto_CHECKSUM_CRC32.Enum(), - BytesPerChecksum: proto.Uint32(outboundChunkSize), - }, - } - - return writeBlockOpRequest(w, writeBlockOp, op) -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/checksum_reader.go b/vendor/github.com/colinmarc/hdfs/rpc/checksum_reader.go deleted file mode 100644 index ac7d1a57d5a..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/checksum_reader.go +++ /dev/null @@ -1,151 +0,0 @@ -package rpc - -import ( - "context" - "errors" - "io" - "net" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" -) - -// ChecksumReader provides an interface for reading the "MD5CRC32" checksums of -// individual blocks. It abstracts over reading from multiple datanodes, in -// order to be robust to failures. -type ChecksumReader struct { - // Block is the block location provided by the namenode. - Block *hdfs.LocatedBlockProto - // UseDatanodeHostname specifies whether the datanodes should be connected to - // via their hostnames (if true) or IP addresses (if false). - UseDatanodeHostname bool - // DialFunc is used to connect to the datanodes. If nil, then - // (&net.Dialer{}).DialContext is used. - DialFunc func(ctx context.Context, network, addr string) (net.Conn, error) - - deadline time.Time - datanodes *datanodeFailover -} - -// NewChecksumReader creates a new ChecksumReader for the given block. -// -// Deprecated: this method does not do any required initialization, and does -// not allow you to set fields such as UseDatanodeHostname. -func NewChecksumReader(block *hdfs.LocatedBlockProto) *ChecksumReader { - return &ChecksumReader{ - Block: block, - } -} - -// SetDeadline sets the deadline for future ReadChecksum calls. A zero value -// for t means Read will not time out. -func (cr *ChecksumReader) SetDeadline(t time.Time) error { - cr.deadline = t - // Return the error at connection time. - return nil -} - -// ReadChecksum returns the checksum of the block. -func (cr *ChecksumReader) ReadChecksum() ([]byte, error) { - if cr.datanodes == nil { - locs := cr.Block.GetLocs() - datanodes := make([]string, len(locs)) - for i, loc := range locs { - dn := loc.GetId() - datanodes[i] = getDatanodeAddress(dn, cr.UseDatanodeHostname) - } - - cr.datanodes = newDatanodeFailover(datanodes) - } - - for cr.datanodes.numRemaining() > 0 { - address := cr.datanodes.next() - checksum, err := cr.readChecksum(address) - if err != nil { - cr.datanodes.recordFailure(err) - continue - } - - return checksum, nil - } - - err := cr.datanodes.lastError() - if err != nil { - err = errors.New("No available datanodes for block.") - } - - return nil, err -} - -func (cr *ChecksumReader) readChecksum(address string) ([]byte, error) { - if cr.DialFunc == nil { - cr.DialFunc = (&net.Dialer{}).DialContext - } - - conn, err := cr.DialFunc(context.Background(), "tcp", address) - if err != nil { - return nil, err - } - - err = conn.SetDeadline(cr.deadline) - if err != nil { - return nil, err - } - - err = cr.writeBlockChecksumRequest(conn) - if err != nil { - return nil, err - } - - resp, err := cr.readBlockChecksumResponse(conn) - if err != nil { - return nil, err - } - - return resp.GetChecksumResponse().GetMd5(), nil -} - -// A checksum request to a datanode: -// +-----------------------------------------------------------+ -// | Data Transfer Protocol Version, int16 | -// +-----------------------------------------------------------+ -// | Op code, 1 byte (CHECKSUM_BLOCK = 0x55) | -// +-----------------------------------------------------------+ -// | varint length + OpReadBlockProto | -// +-----------------------------------------------------------+ -func (cr *ChecksumReader) writeBlockChecksumRequest(w io.Writer) error { - header := []byte{0x00, dataTransferVersion, checksumBlockOp} - - op := newChecksumBlockOp(cr.Block) - opBytes, err := makePrefixedMessage(op) - if err != nil { - return err - } - - req := append(header, opBytes...) - _, err = w.Write(req) - if err != nil { - return err - } - - return nil -} - -// The response from the datanode: -// +-----------------------------------------------------------+ -// | varint length + BlockOpResponseProto | -// +-----------------------------------------------------------+ -func (cr *ChecksumReader) readBlockChecksumResponse(r io.Reader) (*hdfs.BlockOpResponseProto, error) { - resp := &hdfs.BlockOpResponseProto{} - err := readPrefixedMessage(r, resp) - return resp, err -} - -func newChecksumBlockOp(block *hdfs.LocatedBlockProto) *hdfs.OpBlockChecksumProto { - return &hdfs.OpBlockChecksumProto{ - Header: &hdfs.BaseHeaderProto{ - Block: block.GetB(), - Token: block.GetBlockToken(), - }, - } -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/datanode_failover.go b/vendor/github.com/colinmarc/hdfs/rpc/datanode_failover.go deleted file mode 100644 index 731155731eb..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/datanode_failover.go +++ /dev/null @@ -1,71 +0,0 @@ -package rpc - -import ( - "sync" - "time" -) - -// datanodeFailures is a global map of address to the last recorded failure -var datanodeFailures = make(map[string]time.Time) -var datanodeFailuresLock sync.Mutex - -// a datanodeFailover provides some common code for trying multiple datanodes -// in the context of a single operation on a single block. -type datanodeFailover struct { - datanodes []string - currentDatanode string - err error -} - -func newDatanodeFailover(datanodes []string) *datanodeFailover { - return &datanodeFailover{ - datanodes: datanodes, - currentDatanode: "", - err: nil, - } -} - -func (df *datanodeFailover) recordFailure(err error) { - datanodeFailuresLock.Lock() - defer datanodeFailuresLock.Unlock() - - datanodeFailures[df.currentDatanode] = time.Now() - df.err = err -} - -func (df *datanodeFailover) next() string { - if df.numRemaining() == 0 { - return "" - } - - var picked = -1 - var oldestFailure time.Time - - for i, address := range df.datanodes { - datanodeFailuresLock.Lock() - failedAt, hasFailed := datanodeFailures[address] - datanodeFailuresLock.Unlock() - - if !hasFailed { - picked = i - break - } else if oldestFailure.IsZero() || failedAt.Before(oldestFailure) { - picked = i - oldestFailure = failedAt - } - } - - address := df.datanodes[picked] - df.datanodes = append(df.datanodes[:picked], df.datanodes[picked+1:]...) - - df.currentDatanode = address - return address -} - -func (df *datanodeFailover) numRemaining() int { - return len(df.datanodes) -} - -func (df *datanodeFailover) lastError() error { - return df.err -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/kerberos.go b/vendor/github.com/colinmarc/hdfs/rpc/kerberos.go deleted file mode 100644 index f8e2cf9779b..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/kerberos.go +++ /dev/null @@ -1,150 +0,0 @@ -package rpc - -import ( - "errors" - "fmt" - "net" - "regexp" - - hadoop "github.com/colinmarc/hdfs/protocol/hadoop_common" - "gopkg.in/jcmturner/gokrb5.v5/gssapi" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - krbtypes "gopkg.in/jcmturner/gokrb5.v5/types" -) - -const saslRpcCallId = -33 - -var ( - errKerberosNotSupported = errors.New("kerberos authentication not supported by namenode") - krbSPNHost = regexp.MustCompile(`\A[^/]+/(_HOST)([@/]|\z)`) -) - -func (c *NamenodeConnection) doKerberosHandshake() error { - // All SASL requests/responses use this sequence number. - c.currentRequestID = saslRpcCallId - - // Start negotiation, and get the list of supported mechanisms in reply. - c.writeSaslRequest(&hadoop.RpcSaslProto{State: hadoop.RpcSaslProto_NEGOTIATE.Enum()}) - resp, err := c.readSaslResponse(hadoop.RpcSaslProto_NEGOTIATE) - if err != nil { - return err - } - - var mechanism *hadoop.RpcSaslProto_SaslAuth - for _, m := range resp.GetAuths() { - if *m.Method == "KERBEROS" { - mechanism = m - } - } - - if mechanism == nil { - return errKerberosNotSupported - } - - // Get a ticket from Kerberos, and send the initial token to the namenode. - token, sessionKey, err := c.getKerberosTicket() - if err != nil { - return err - } - - err = c.writeSaslRequest(&hadoop.RpcSaslProto{ - State: hadoop.RpcSaslProto_INITIATE.Enum(), - Token: token.MechToken, - Auths: []*hadoop.RpcSaslProto_SaslAuth{mechanism}, - }) - - if err != nil { - return err - } - - // In response, we get a server token to verify. - resp, err = c.readSaslResponse(hadoop.RpcSaslProto_CHALLENGE) - if err != nil { - return err - } - - var nnToken gssapi.WrapToken - err = nnToken.Unmarshal(resp.GetToken(), true) - if err != nil { - return err - } - - _, err = nnToken.VerifyCheckSum(sessionKey, keyusage.GSSAPI_ACCEPTOR_SEAL) - if err != nil { - return fmt.Errorf("invalid server token: %s", err) - } - - // Sign the payload and send it back to the namenode. - // TODO: Make sure we can support what is required based on what's in the - // payload. - signed, err := gssapi.NewInitiatorToken(nnToken.Payload, sessionKey) - if err != nil { - return err - } - - signedBytes, err := signed.Marshal() - if err != nil { - return err - } - - err = c.writeSaslRequest(&hadoop.RpcSaslProto{ - State: hadoop.RpcSaslProto_RESPONSE.Enum(), - Token: signedBytes, - }) - - if err != nil { - return err - } - - // Read the final response. If it's a SUCCESS, then we're done here. - _, err = c.readSaslResponse(hadoop.RpcSaslProto_SUCCESS) - return err -} - -func (c *NamenodeConnection) writeSaslRequest(req *hadoop.RpcSaslProto) error { - packet, err := makeRPCPacket(newRPCRequestHeader(saslRpcCallId, c.clientId), req) - if err != nil { - return err - } - - _, err = c.conn.Write(packet) - return err -} - -func (c *NamenodeConnection) readSaslResponse(expectedState hadoop.RpcSaslProto_SaslState) (*hadoop.RpcSaslProto, error) { - resp := &hadoop.RpcSaslProto{} - err := c.readResponse("sasl", resp) - if err != nil { - return nil, err - } else if resp.GetState() != expectedState { - return nil, fmt.Errorf("unexpected SASL state: %s", resp.GetState().String()) - } - - return resp, nil -} - -// getKerberosTicket returns an initial kerberos negotiation token and the -// paired session key, along with an error if any occured. -func (c *NamenodeConnection) getKerberosTicket() (gssapi.NegTokenInit, krbtypes.EncryptionKey, error) { - host, _, _ := net.SplitHostPort(c.host.address) - spn := replaceSPNHostWildcard(c.kerberosServicePrincipleName, host) - - ticket, key, err := c.kerberosClient.GetServiceTicket(spn) - if err != nil { - return gssapi.NegTokenInit{}, key, err - } - - token, err := gssapi.NewNegTokenInitKrb5(*c.kerberosClient.Credentials, ticket, key) - return token, key, err -} - -// replaceSPNHostWildcard substitutes the special string '_HOST' in the given -// SPN for the given (current) host. -func replaceSPNHostWildcard(spn, host string) string { - res := krbSPNHost.FindStringSubmatchIndex(spn) - if res == nil || res[2] == -1 { - return spn - } - - return spn[:res[2]] + host + spn[res[3]:] -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/namenode.go b/vendor/github.com/colinmarc/hdfs/rpc/namenode.go deleted file mode 100644 index a66c23e4c61..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/namenode.go +++ /dev/null @@ -1,410 +0,0 @@ -package rpc - -import ( - "context" - "errors" - "fmt" - "net" - "sync" - "time" - - hadoop "github.com/colinmarc/hdfs/protocol/hadoop_common" - "github.com/golang/protobuf/proto" - krb "gopkg.in/jcmturner/gokrb5.v5/client" -) - -const ( - rpcVersion byte = 0x09 - serviceClass byte = 0x0 - noneAuthProtocol byte = 0x0 - saslAuthProtocol byte = 0xdf - protocolClass = "org.apache.hadoop.hdfs.protocol.ClientProtocol" - protocolClassVersion = 1 - handshakeCallID = -3 - standbyExceptionClass = "org.apache.hadoop.ipc.StandbyException" -) - -const backoffDuration = time.Second * 5 - -// NamenodeConnection represents an open connection to a namenode. -type NamenodeConnection struct { - clientId []byte - clientName string - currentRequestID int32 - - user string - kerberosClient *krb.Client - kerberosServicePrincipleName string - - dialFunc func(ctx context.Context, network, addr string) (net.Conn, error) - conn net.Conn - host *namenodeHost - hostList []*namenodeHost - - reqLock sync.Mutex -} - -// NamenodeConnectionOptions represents the configurable options available -// for a NamenodeConnection. -type NamenodeConnectionOptions struct { - // Addresses specifies the namenode(s) to connect to. - Addresses []string - // User specifies which HDFS user the client will act as. - User string - // DialFunc is used to connect to the datanodes. If nil, then - // (&net.Dialer{}).DialContext is used. - DialFunc func(ctx context.Context, network, addr string) (net.Conn, error) - // KerberosClient is used to connect to kerberized HDFS clusters. If provided, - // the NamenodeConnection will always mutually athenticate when connecting - // to the namenode(s). - KerberosClient *krb.Client - // KerberosServicePrincipleName specifiesthe Service Principle Name - // (/) for the namenode(s). Like in the - // dfs.namenode.kerberos.principal property of core-site.xml, the special - // string '_HOST' can be substituted for the hostname in a multi-namenode - // setup (for example: 'nn/_HOST@EXAMPLE.COM'). It is required if - // KerberosClient is provided. - KerberosServicePrincipleName string -} - -// NamenodeError represents an interepreted error from the Namenode, including -// the error code and the java backtrace. -type NamenodeError struct { - Method string - Message string - Code int - Exception string -} - -// Desc returns the long form of the error code, as defined in the -// RpcErrorCodeProto in RpcHeader.proto -func (err *NamenodeError) Desc() string { - return hadoop.RpcResponseHeaderProto_RpcErrorCodeProto_name[int32(err.Code)] -} - -func (err *NamenodeError) Error() string { - s := fmt.Sprintf("%s call failed with %s", err.Method, err.Desc()) - if err.Exception != "" { - s += fmt.Sprintf(" (%s)", err.Exception) - } - - return s -} - -type namenodeHost struct { - address string - lastError error - lastErrorAt time.Time -} - -// NewNamenodeConnection creates a new connection to a namenode and performs an -// initial handshake. -// -// You probably want to use hdfs.New instead, which provides a higher-level -// interface. -func NewNamenodeConnection(address string, user string) (*NamenodeConnection, error) { - return NewNamenodeConnectionWithOptions(NamenodeConnectionOptions{ - Addresses: []string{address}, - User: user, - }) -} - -// NewNamenodeConnectionWithOptions creates a new connection to a namenode with -// the given options and performs an initial handshake. -func NewNamenodeConnectionWithOptions(options NamenodeConnectionOptions) (*NamenodeConnection, error) { - // Build the list of hosts to be used for failover. - hostList := make([]*namenodeHost, len(options.Addresses)) - for i, addr := range options.Addresses { - hostList[i] = &namenodeHost{address: addr} - } - - // The ClientID is reused here both in the RPC headers (which requires a - // "globally unique" ID) and as the "client name" in various requests. - clientId := newClientID() - c := &NamenodeConnection{ - clientId: clientId, - clientName: "go-hdfs-" + string(clientId), - kerberosClient: options.KerberosClient, - kerberosServicePrincipleName: options.KerberosServicePrincipleName, - user: options.User, - dialFunc: options.DialFunc, - hostList: hostList, - } - - err := c.resolveConnection() - if err != nil { - return nil, err - } - - return c, nil -} - -// WrapNamenodeConnection wraps an existing net.Conn to a Namenode, and preforms -// an initial handshake. -// -// Deprecated: use the DialFunc option in NamenodeConnectionOptions or the -// higher-level hdfs.NewClient. -func WrapNamenodeConnection(conn net.Conn, user string) (*NamenodeConnection, error) { - // The ClientID is reused here both in the RPC headers (which requires a - // "globally unique" ID) and as the "client name" in various requests. - clientId := newClientID() - c := &NamenodeConnection{ - clientId: clientId, - clientName: "go-hdfs-" + string(clientId), - user: user, - conn: conn, - host: &namenodeHost{}, - hostList: make([]*namenodeHost, 0), - } - - err := c.doNamenodeHandshake() - if err != nil { - conn.Close() - return nil, fmt.Errorf("namenode handshake failed: %s", err) - } - - return c, nil -} - -func (c *NamenodeConnection) resolveConnection() error { - if c.conn != nil { - return nil - } - - var err error - - if c.host != nil { - err = c.host.lastError - } - - for _, host := range c.hostList { - if host.lastErrorAt.After(time.Now().Add(-backoffDuration)) { - continue - } - - if c.dialFunc == nil { - c.dialFunc = (&net.Dialer{}).DialContext - } - - c.host = host - c.conn, err = c.dialFunc(context.Background(), "tcp", host.address) - if err != nil { - c.markFailure(err) - continue - } - - err = c.doNamenodeHandshake() - if err != nil { - c.markFailure(err) - continue - } - - break - } - - if c.conn == nil { - return fmt.Errorf("no available namenodes: %s", err) - } - - return nil -} - -func (c *NamenodeConnection) markFailure(err error) { - if c.conn != nil { - c.conn.Close() - c.conn = nil - } - c.host.lastError = err - c.host.lastErrorAt = time.Now() -} - -// ClientName provides a unique identifier for this client, which is required -// for various RPC calls. Confusingly, it's separate from clientID, which is -// used in the RPC header; to make things simpler, it reuses the random bytes -// from that, but adds a prefix to make it human-readable. -func (c *NamenodeConnection) ClientName() string { - return c.clientName -} - -// Execute performs an rpc call. It does this by sending req over the wire and -// unmarshaling the result into resp. -func (c *NamenodeConnection) Execute(method string, req proto.Message, resp proto.Message) error { - c.reqLock.Lock() - defer c.reqLock.Unlock() - - c.currentRequestID++ - - for { - err := c.resolveConnection() - if err != nil { - return err - } - - err = c.writeRequest(method, req) - if err != nil { - c.markFailure(err) - continue - } - - err = c.readResponse(method, resp) - if err != nil { - // Only retry on a standby exception. - if nerr, ok := err.(*NamenodeError); ok && nerr.Exception == standbyExceptionClass { - c.markFailure(err) - continue - } - - return err - } - - break - } - - return nil -} - -// RPC definitions - -// A request packet: -// +-----------------------------------------------------------+ -// | uint32 length of the next three parts | -// +-----------------------------------------------------------+ -// | varint length + RpcRequestHeaderProto | -// +-----------------------------------------------------------+ -// | varint length + RequestHeaderProto | -// +-----------------------------------------------------------+ -// | varint length + Request | -// +-----------------------------------------------------------+ -func (c *NamenodeConnection) writeRequest(method string, req proto.Message) error { - rrh := newRPCRequestHeader(c.currentRequestID, c.clientId) - rh := newRequestHeader(method) - - reqBytes, err := makeRPCPacket(rrh, rh, req) - if err != nil { - return err - } - - _, err = c.conn.Write(reqBytes) - return err -} - -// A response from the namenode: -// +-----------------------------------------------------------+ -// | uint32 length of the next two parts | -// +-----------------------------------------------------------+ -// | varint length + RpcResponseHeaderProto | -// +-----------------------------------------------------------+ -// | varint length + Response | -// +-----------------------------------------------------------+ -func (c *NamenodeConnection) readResponse(method string, resp proto.Message) error { - rrh := &hadoop.RpcResponseHeaderProto{} - err := readRPCPacket(c.conn, rrh, resp) - if err != nil { - return err - } else if int32(rrh.GetCallId()) != c.currentRequestID { - return errors.New("unexpected sequence number") - } else if rrh.GetStatus() != hadoop.RpcResponseHeaderProto_SUCCESS { - return &NamenodeError{ - Method: method, - Message: rrh.GetErrorMsg(), - Code: int(rrh.GetErrorDetail()), - Exception: rrh.GetExceptionClassName(), - } - } - - return nil -} - -// A handshake packet: -// +-----------------------------------------------------------+ -// | Header, 4 bytes ("hrpc") | -// +-----------------------------------------------------------+ -// | Version, 1 byte (default verion 0x09) | -// +-----------------------------------------------------------+ -// | RPC service class, 1 byte (0x00) | -// +-----------------------------------------------------------+ -// | Auth protocol, 1 byte (Auth method None = 0x00) | -// +-----------------------------------------------------------+ -// -// If the auth protocol is something other than 'none', the authentication -// handshake happens here. Otherwise, everything can be sent as one packet. -// -// +-----------------------------------------------------------+ -// | uint32 length of the next two parts | -// +-----------------------------------------------------------+ -// | varint length + RpcRequestHeaderProto | -// +-----------------------------------------------------------+ -// | varint length + IpcConnectionContextProto | -// +-----------------------------------------------------------+ -func (c *NamenodeConnection) doNamenodeHandshake() error { - authProtocol := noneAuthProtocol - kerberos := false - if c.kerberosClient != nil { - authProtocol = saslAuthProtocol - kerberos = true - } - - rpcHeader := []byte{ - 0x68, 0x72, 0x70, 0x63, // "hrpc" - rpcVersion, serviceClass, authProtocol, - } - - _, err := c.conn.Write(rpcHeader) - if err != nil { - return err - } - - if kerberos { - err = c.doKerberosHandshake() - if err != nil { - return fmt.Errorf("SASL handshake: %s", err) - } - - // Reset the sequence number here, since we set it to -33 for the SASL bits. - c.currentRequestID = 0 - } - - rrh := newRPCRequestHeader(handshakeCallID, c.clientId) - cc := newConnectionContext(c.user) - packet, err := makeRPCPacket(rrh, cc) - if err != nil { - return err - } - - _, err = c.conn.Write(packet) - return err -} - -// Close terminates all underlying socket connections to remote server. -func (c *NamenodeConnection) Close() error { - if c.conn != nil { - return c.conn.Close() - } - return nil -} - -func newRPCRequestHeader(id int32, clientID []byte) *hadoop.RpcRequestHeaderProto { - return &hadoop.RpcRequestHeaderProto{ - RpcKind: hadoop.RpcKindProto_RPC_PROTOCOL_BUFFER.Enum(), - RpcOp: hadoop.RpcRequestHeaderProto_RPC_FINAL_PACKET.Enum(), - CallId: proto.Int32(id), - ClientId: clientID, - } -} - -func newRequestHeader(methodName string) *hadoop.RequestHeaderProto { - return &hadoop.RequestHeaderProto{ - MethodName: proto.String(methodName), - DeclaringClassProtocolName: proto.String(protocolClass), - ClientProtocolVersion: proto.Uint64(uint64(protocolClassVersion)), - } -} - -func newConnectionContext(user string) *hadoop.IpcConnectionContextProto { - return &hadoop.IpcConnectionContextProto{ - UserInfo: &hadoop.UserInformationProto{ - EffectiveUser: proto.String(user), - }, - Protocol: proto.String(protocolClass), - } -} diff --git a/vendor/github.com/colinmarc/hdfs/rpc/rpc.go b/vendor/github.com/colinmarc/hdfs/rpc/rpc.go deleted file mode 100644 index ad52d5a3c86..00000000000 --- a/vendor/github.com/colinmarc/hdfs/rpc/rpc.go +++ /dev/null @@ -1,179 +0,0 @@ -// Package rpc implements some of the lower-level functionality required to -// communicate with the namenode and datanodes. -package rpc - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/rand" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/golang/protobuf/proto" -) - -const ( - dataTransferVersion = 0x1c - writeBlockOp = 0x50 - readBlockOp = 0x51 - checksumBlockOp = 0x55 -) - -var errMalformedRPCMessage = errors.New("malformed RPC message") - -// Used for client ID generation, below. -const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -func newClientID() []byte { - id := make([]byte, 16) - - rand.Seed(time.Now().UTC().UnixNano()) - for i := range id { - id[i] = chars[rand.Intn(len(chars))] - } - - return id -} - -func makeRPCPacket(msgs ...proto.Message) ([]byte, error) { - packet := make([]byte, 4, 128) - - length := 0 - for _, msg := range msgs { - b, err := makePrefixedMessage(msg) - if err != nil { - return nil, err - } - - packet = append(packet, b...) - length += len(b) - } - - binary.BigEndian.PutUint32(packet, uint32(length)) - return packet, nil -} - -func readRPCPacket(r io.Reader, msgs ...proto.Message) error { - var packetLength uint32 - err := binary.Read(r, binary.BigEndian, &packetLength) - if err != nil { - return err - } - - packet := make([]byte, packetLength) - _, err = io.ReadFull(r, packet) - if err != nil { - return err - } - - for _, msg := range msgs { - // HDFS doesn't send all the response messages all the time (for example, if - // the RpcResponseHeaderProto contains an error). - if len(packet) == 0 { - return nil - } - - msgLength, n := binary.Uvarint(packet) - if n <= 0 || msgLength > uint64(len(packet)) { - return errMalformedRPCMessage - } - - packet = packet[n:] - if msgLength != 0 { - err = proto.Unmarshal(packet[:msgLength], msg) - if err != nil { - return err - } - - packet = packet[msgLength:] - } - } - - if len(packet) > 0 { - return errMalformedRPCMessage - } - - return nil -} - -func makePrefixedMessage(msg proto.Message) ([]byte, error) { - msgBytes, err := proto.Marshal(msg) - if err != nil { - return nil, err - } - - lengthBytes := make([]byte, 10) - n := binary.PutUvarint(lengthBytes, uint64(len(msgBytes))) - return append(lengthBytes[:n], msgBytes...), nil -} - -func readPrefixedMessage(r io.Reader, msg proto.Message) error { - varintBytes := make([]byte, binary.MaxVarintLen32) - _, err := io.ReadAtLeast(r, varintBytes, 1) - if err != nil { - return err - } - - respLength, varintLength := binary.Uvarint(varintBytes) - if varintLength < 1 { - return io.ErrUnexpectedEOF - } - - // We may have grabbed too many bytes when reading the varint. - respBytes := make([]byte, respLength) - extraLength := copy(respBytes, varintBytes[varintLength:]) - _, err = io.ReadFull(r, respBytes[extraLength:]) - if err != nil { - return err - } - - return proto.Unmarshal(respBytes, msg) -} - -// A op request to a datanode: -// +-----------------------------------------------------------+ -// | Data Transfer Protocol Version, int16 | -// +-----------------------------------------------------------+ -// | Op code, 1 byte | -// +-----------------------------------------------------------+ -// | varint length + OpReadBlockProto | -// +-----------------------------------------------------------+ -func writeBlockOpRequest(w io.Writer, op uint8, msg proto.Message) error { - header := []byte{0x00, dataTransferVersion, op} - msgBytes, err := makePrefixedMessage(msg) - if err != nil { - return err - } - - req := append(header, msgBytes...) - _, err = w.Write(req) - if err != nil { - return err - } - - return nil -} - -// The initial response from a datanode, in the case of reads and writes: -// +-----------------------------------------------------------+ -// | varint length + BlockOpResponseProto | -// +-----------------------------------------------------------+ -func readBlockOpResponse(r io.Reader) (*hdfs.BlockOpResponseProto, error) { - resp := &hdfs.BlockOpResponseProto{} - err := readPrefixedMessage(r, resp) - - return resp, err -} - -func getDatanodeAddress(datanode *hdfs.DatanodeIDProto, useHostname bool) string { - var host string - if useHostname { - host = datanode.GetHostName() - } else { - host = datanode.GetIpAddr() - } - - return fmt.Sprintf("%s:%d", host, datanode.GetXferPort()) -} diff --git a/vendor/github.com/colinmarc/hdfs/stat.go b/vendor/github.com/colinmarc/hdfs/stat.go deleted file mode 100644 index 217609861bd..00000000000 --- a/vendor/github.com/colinmarc/hdfs/stat.go +++ /dev/null @@ -1,111 +0,0 @@ -package hdfs - -import ( - "os" - "path" - "time" - - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" - "github.com/golang/protobuf/proto" -) - -// FileInfo implements os.FileInfo, and provides information about a file or -// directory in HDFS. -type FileInfo struct { - name string - status *hdfs.HdfsFileStatusProto -} - -// Stat returns an os.FileInfo describing the named file or directory. -func (c *Client) Stat(name string) (os.FileInfo, error) { - fi, err := c.getFileInfo(name) - if err != nil { - err = &os.PathError{"stat", name, err} - } - - return fi, err -} - -func (c *Client) getFileInfo(name string) (os.FileInfo, error) { - req := &hdfs.GetFileInfoRequestProto{Src: proto.String(name)} - resp := &hdfs.GetFileInfoResponseProto{} - - err := c.namenode.Execute("getFileInfo", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - - return nil, err - } - - if resp.GetFs() == nil { - return nil, os.ErrNotExist - } - - return newFileInfo(resp.GetFs(), name), nil -} - -func newFileInfo(status *hdfs.HdfsFileStatusProto, name string) *FileInfo { - fi := &FileInfo{status: status} - - var fullName string - if string(status.GetPath()) != "" { - fullName = string(status.GetPath()) - } else { - fullName = name - } - - fi.name = path.Base(fullName) - return fi -} - -func (fi *FileInfo) Name() string { - return fi.name -} - -func (fi *FileInfo) Size() int64 { - return int64(fi.status.GetLength()) -} - -func (fi *FileInfo) Mode() os.FileMode { - mode := os.FileMode(fi.status.GetPermission().GetPerm()) - if fi.IsDir() { - mode |= os.ModeDir - } - - return mode -} - -func (fi *FileInfo) ModTime() time.Time { - return time.Unix(0, int64(fi.status.GetModificationTime())*int64(time.Millisecond)) -} - -func (fi *FileInfo) IsDir() bool { - return fi.status.GetFileType() == hdfs.HdfsFileStatusProto_IS_DIR -} - -// Sys returns the raw *hadoop_hdfs.HdfsFileStatusProto message from the -// namenode. -func (fi *FileInfo) Sys() interface{} { - return fi.status -} - -// Owner returns the name of the user that owns the file or directory. It's not -// part of the os.FileInfo interface. -func (fi *FileInfo) Owner() string { - return fi.status.GetOwner() -} - -// OwnerGroup returns the name of the group that owns the file or directory. -// It's not part of the os.FileInfo interface. -func (fi *FileInfo) OwnerGroup() string { - return fi.status.GetGroup() -} - -// AccessTime returns the last time the file was accessed. It's not part of the -// os.FileInfo interface. -func (fi *FileInfo) AccessTime() time.Time { - return time.Unix(int64(fi.status.GetAccessTime())/1000, 0) -} diff --git a/vendor/github.com/colinmarc/hdfs/stat_fs.go b/vendor/github.com/colinmarc/hdfs/stat_fs.go deleted file mode 100644 index 4226ed30c0b..00000000000 --- a/vendor/github.com/colinmarc/hdfs/stat_fs.go +++ /dev/null @@ -1,45 +0,0 @@ -package hdfs - -import ( - hdfs "github.com/colinmarc/hdfs/protocol/hadoop_hdfs" - "github.com/colinmarc/hdfs/rpc" -) - -// FsInfo provides information about HDFS -type FsInfo struct { - Capacity uint64 - Used uint64 - Remaining uint64 - UnderReplicated uint64 - CorruptBlocks uint64 - MissingBlocks uint64 - MissingReplOneBlocks uint64 - BlocksInFuture uint64 - PendingDeletionBlocks uint64 -} - -func (c *Client) StatFs() (FsInfo, error) { - req := &hdfs.GetFsStatusRequestProto{} - resp := &hdfs.GetFsStatsResponseProto{} - - err := c.namenode.Execute("getFsStats", req, resp) - if err != nil { - if nnErr, ok := err.(*rpc.NamenodeError); ok { - err = interpretException(nnErr.Exception, err) - } - return FsInfo{}, err - } - - var fs FsInfo - fs.Capacity = resp.GetCapacity() - fs.Used = resp.GetUsed() - fs.Remaining = resp.GetRemaining() - fs.UnderReplicated = resp.GetUnderReplicated() - fs.CorruptBlocks = resp.GetCorruptBlocks() - fs.MissingBlocks = resp.GetMissingBlocks() - fs.MissingReplOneBlocks = resp.GetMissingReplOneBlocks() - fs.BlocksInFuture = resp.GetBlocksInFuture() - fs.PendingDeletionBlocks = resp.GetPendingDeletionBlocks() - - return fs, nil -} diff --git a/vendor/github.com/colinmarc/hdfs/travis-setup-cdh5.sh b/vendor/github.com/colinmarc/hdfs/travis-setup-cdh5.sh deleted file mode 100644 index 1739f571e10..00000000000 --- a/vendor/github.com/colinmarc/hdfs/travis-setup-cdh5.sh +++ /dev/null @@ -1,158 +0,0 @@ -#!/bin/sh - -set -e - -KERBEROS=${KERBEROS-"false"} - -UBUNTU_CODENAME=$(lsb_release -c | awk '{print $2}') - -sudo tee /etc/apt/sources.list.d/cdh.list < - - fs.defaultFS - hdfs://localhost:9000 - - - hadoop.security.authentication - $CONF_AUTHENTICATION - - - hadoop.security.authorization - $KERBEROS - - - dfs.namenode.keytab.file - /tmp/nn.keytab - - - dfs.namenode.kerberos.principal - nn/localhost@$KERBEROS_REALM - - - dfs.web.authentication.kerberos.principal - nn/localhost@$KERBEROS_REALM - - - dfs.datanode.keytab.file - /tmp/dn.keytab - - - dfs.datanode.kerberos.principal - dn/localhost@$KERBEROS_REALM - - -EOF - -sudo tee /etc/hadoop/conf.gohdfs/hdfs-site.xml < - - dfs.namenode.name.dir - /opt/hdfs/name - - - dfs.datanode.data.dir - /opt/hdfs/data - - - dfs.permissions.superusergroup - hadoop - - - dfs.safemode.extension - 0 - - - dfs.safemode.min.datanodes - 1 - - - dfs.block.access.token.enable - $KERBEROS - - - ignore.secure.ports.for.testing - true - - -EOF - -sudo update-alternatives --install /etc/hadoop/conf hadoop-conf /etc/hadoop/conf.gohdfs 99 -sudo apt-get install -y --allow-unauthenticated hadoop-hdfs-namenode hadoop-hdfs-datanode - -sudo mkdir -p /opt/hdfs/data /opt/hdfs/name -sudo chown -R hdfs:hdfs /opt/hdfs -sudo -u hdfs hdfs namenode -format -nonInteractive - -sudo adduser travis hadoop - -sudo service hadoop-hdfs-datanode restart -sudo service hadoop-hdfs-namenode restart - -hdfs dfsadmin -safemode wait diff --git a/vendor/github.com/colinmarc/hdfs/travis-setup-hdp2.sh b/vendor/github.com/colinmarc/hdfs/travis-setup-hdp2.sh deleted file mode 100644 index 9d8680f94d9..00000000000 --- a/vendor/github.com/colinmarc/hdfs/travis-setup-hdp2.sh +++ /dev/null @@ -1,51 +0,0 @@ -#!/bin/sh - -set -e - -UBUNTU_VERSION=$(lsb_release -r | awk '{print substr($2,0,2)}') - -sudo tee /etc/apt/sources.list.d/hdp.list < - - fs.defaultFS - hdfs://localhost:9000 - - -EOF - -sudo tee /etc/hadoop/conf/hdfs-site.xml < - - dfs.namenode.name.dir - /opt/hdfs/name - - - dfs.datanode.data.dir - /opt/hdfs/data - - - dfs.permissions.superusergroup - hadoop - - -EOF - -sudo apt-get install -y --allow-unauthenticated hadoop hadoop-hdfs - -sudo mkdir -p /opt/hdfs/data /opt/hdfs/name -sudo chown -R hdfs:hdfs /opt/hdfs -sudo -u hdfs hdfs namenode -format -nonInteractive - -sudo adduser travis hadoop - -sudo /usr/hdp/current/hadoop-hdfs-datanode/../hadoop/sbin/hadoop-daemon.sh start datanode -sudo /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh start namenode - -hdfs dfsadmin -safemode wait diff --git a/vendor/github.com/colinmarc/hdfs/travis-setup.sh b/vendor/github.com/colinmarc/hdfs/travis-setup.sh deleted file mode 100644 index 037e399f298..00000000000 --- a/vendor/github.com/colinmarc/hdfs/travis-setup.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -set -e -cd $(dirname $0) - -case $1 in - cdh5) - ./travis-setup-cdh5.sh - ;; - hdp2) - ./travis-setup-hdp2.sh - ;; - *) - echo "Uknown platform: $PLATFORM" - exit 1 - ;; -esac - -./fixtures.sh diff --git a/vendor/github.com/colinmarc/hdfs/walk.go b/vendor/github.com/colinmarc/hdfs/walk.go deleted file mode 100644 index 438a407af13..00000000000 --- a/vendor/github.com/colinmarc/hdfs/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package hdfs - -import ( - "os" - "path/filepath" - "sort" -) - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very large -// directories Walk can be inefficient. Walk does not follow symbolic links. -func (c *Client) Walk(root string, walkFn filepath.WalkFunc) error { - return c.walk(root, walkFn) -} - -func (c *Client) walk(path string, walkFn filepath.WalkFunc) error { - file, err := c.Open(path) - var info os.FileInfo - if file != nil { - info = file.Stat() - } - - err = walkFn(path, info, err) - if err != nil { - if info != nil && info.IsDir() && err == filepath.SkipDir { - return nil - } - - return err - } - - if info == nil || !info.IsDir() { - return nil - } - - names, err := file.Readdirnames(0) - if err != nil { - return walkFn(path, info, err) - } - - sort.Strings(names) - for _, name := range names { - err = c.walk(filepath.ToSlash(filepath.Join(path, name)), walkFn) - if err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/doublerebel/bellows/LICENSE b/vendor/github.com/doublerebel/bellows/LICENSE deleted file mode 100644 index 229b055dac2..00000000000 --- a/vendor/github.com/doublerebel/bellows/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Charles Phillips - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/doublerebel/bellows/README.md b/vendor/github.com/doublerebel/bellows/README.md deleted file mode 100644 index 430098bd381..00000000000 --- a/vendor/github.com/doublerebel/bellows/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# bellows -Flatten and expand golang maps and structs - -## Features -There are some existing golang flatten/expand implementations, but they are targeted to specific use-cases, and none of them flatten Structs. This one doesn't do Slices, but the feature could be added. - -This `Flatten` will skip parsing a Map that does not have a String as the index. - -This `Flatten` recursively passes values as their original Interface, so it is simpler than implementations that rely on passing reflect.Value. - -## Usage - -```go -// Expand a dot-separated flat map into a nested map -func Expand(value map[string]interface{}) map[string]interface{} {} - -// Expand a dot-separated flat map into a nested map, with a prefix -func ExpandPrefixed(value map[string]interface{}, prefix string) map[string]interface{} {} - -// Expand a dot-separated flat map into an existing nested map, with a prefix -func ExpandPrefixedToResult(value map[string]interface{}, prefix string, result map[string]interface{}) {} - -// Flatten a nested map into a dot-separated flat map -func Flatten(value interface{}) map[string]interface{} {} - -// Flatten a nested map into a dot-separated flat map, with a prefix -func FlattenPrefixed(value interface{}, prefix string) map[string]interface{} {} - -// Flatten a nested map into an existing dot-separated flat map, with a prefix -func FlattenPrefixedToResult(value interface{}, prefix string, m map[string]interface{}) {} -``` - -## Examples - -Used in [an update to Viper](https://github.com/doublerebel/viper), to normalize config values coming from nested (JSON) and flat (CLI flags, env vars) sources. - -## Other golang flatten/expand implementations - - * [hashicorp/terraform/flatmap](https://github.com/hashicorp/terraform/tree/master/flatmap) - * [turtlemonvh/mapmap](https://github.com/turtlemonvh/mapmap) - * [peking2/func-go](https://github.com/peking2/func-go) - * [wolfeidau/unflatten](https://github.com/wolfeidau/unflatten) - * [jeremywohl/flatten](https://github.com/jeremywohl/flatten) - -(C) Copyright 2015 doublerebel. MIT Licensed. \ No newline at end of file diff --git a/vendor/github.com/doublerebel/bellows/main.go b/vendor/github.com/doublerebel/bellows/main.go deleted file mode 100644 index 082b80a02dd..00000000000 --- a/vendor/github.com/doublerebel/bellows/main.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright © 2016 Charles Phillips . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package bellows - -import ( - "reflect" - "strings" -) - -func Expand(value map[string]interface{}) map[string]interface{} { - return ExpandPrefixed(value, "") -} - -func ExpandPrefixed(value map[string]interface{}, prefix string) map[string]interface{} { - m := make(map[string]interface{}) - ExpandPrefixedToResult(value, prefix, m) - return m -} - -func ExpandPrefixedToResult(value map[string]interface{}, prefix string, result map[string]interface{}) { - if prefix != "" { - prefix += "." - } - for k, val := range value { - if !strings.HasPrefix(k, prefix) { - continue - } - - key := k[len(prefix):] - idx := strings.Index(key, ".") - if idx != -1 { - key = key[:idx] - } - if _, ok := result[key]; ok { - continue - } - if idx == -1 { - result[key] = val - continue - } - - // It contains a period, so it is a more complex structure - result[key] = ExpandPrefixed(value, k[:len(prefix)+len(key)]) - } -} - -func Flatten(value interface{}) map[string]interface{} { - return FlattenPrefixed(value, "") -} - -func FlattenPrefixed(value interface{}, prefix string) map[string]interface{} { - m := make(map[string]interface{}) - FlattenPrefixedToResult(value, prefix, m) - return m -} - -func FlattenPrefixedToResult(value interface{}, prefix string, m map[string]interface{}) { - base := "" - if prefix != "" { - base = prefix+"." - } - - original := reflect.ValueOf(value) - kind := original.Kind() - if kind == reflect.Ptr || kind == reflect.Interface { - original = reflect.Indirect(original) - kind = original.Kind() - } - t := original.Type() - - switch kind { - case reflect.Map: - if t.Key().Kind() != reflect.String { - break - } - for _, childKey := range original.MapKeys() { - childValue := original.MapIndex(childKey) - FlattenPrefixedToResult(childValue.Interface(), base+childKey.String(), m) - } - case reflect.Struct: - for i := 0; i < original.NumField(); i += 1 { - childValue := original.Field(i) - childKey := t.Field(i).Name - FlattenPrefixedToResult(childValue.Interface(), base+childKey, m) - } - default: - if prefix != "" { - m[prefix] = value - } - } -} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index a8c29bfbd53..8969526a6e5 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -91,11 +91,12 @@ logr design but also left out some parts and changed others: | Adding a name to a logger | `WithName` | no API | | Modify verbosity of log entries in a call chain | `V` | no API | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | The high-level slog API is explicitly meant to be one of many different APIs that can be layered on top of a shared `slog.Handler`. logr is one such -alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) -package. +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. ### Inspiration @@ -145,24 +146,24 @@ There are implementations for the following logging libraries: ## slog interoperability Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` -and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and -`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level -slog API. `slogr` itself leaves that to the caller. +slog API. -## Using a `logr.Sink` as backend for slog +### Using a `logr.LogSink` as backend for slog Ideally, a logr sink implementation should support both logr and slog by -implementing both the normal logr interface(s) and `slogr.SlogSink`. Because +implementing both the normal logr interface(s) and `SlogSink`. Because of a conflict in the parameters of the common `Enabled` method, it is [not possible to implement both slog.Handler and logr.Sink in the same type](https://github.com/golang/go/issues/59110). If both are supported, log calls can go from the high-level APIs to the backend -without the need to convert parameters. `NewLogr` and `NewSlogHandler` can +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can convert back and forth without adding additional wrappers, with one exception: when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then -`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future log calls. Such an implementation should also support values that implement specific @@ -187,13 +188,13 @@ Not supporting slog has several drawbacks: These drawbacks are severe enough that applications using a mixture of slog and logr should switch to a different backend. -## Using a `slog.Handler` as backend for logr +### Using a `slog.Handler` as backend for logr Using a plain `slog.Handler` without support for logr works better than the other direction: - All logr verbosity levels can be mapped 1:1 to their corresponding slog level by negating them. -- Stack unwinding is done by the `slogr.SlogSink` and the resulting program +- Stack unwinding is done by the `SlogSink` and the resulting program counter is passed to the `slog.Handler`. - Names added via `Logger.WithName` are gathered and recorded in an additional attribute with `logger` as key and the names separated by slash as value. @@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility with logr implementations without slog support is not important, then `slog.Valuer` is sufficient. -## Context support for slog +### Context support for slog Storing a logger in a `context.Context` is not supported by -slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this -to fill this gap: - - func HandlerFromContext(ctx context.Context) slog.Handler { - logger, err := logr.FromContext(ctx) - if err == nil { - return slogr.NewSlogHandler(logger) - } - return slog.Default().Handler() - } - - func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { - return logr.NewContext(ctx, slogr.NewLogr(handler)) - } - -The downside is that storing and retrieving a `slog.Handler` needs more -allocations compared to using a `logr.Logger`. Therefore the recommendation is -to use the `logr.Logger` API in code which uses contextual logging. +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. ## FAQ diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 00000000000..de8bcc3ad89 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 00000000000..f012f9a18e8 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 00000000000..065ef0b8280 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go index 12e5807cc5c..fb2f866f4b7 100644 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -100,6 +100,11 @@ type Options struct { // details, see docs for Go's time.Layout. TimestampFormat string + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + // Verbosity tells funcr which V logs to produce. Higher values enable // more logs. Info logs at or below this level will be written, while logs // above this level will be discarded. @@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { if opts.MaxLogDepth == 0 { opts.MaxLogDepth = defaultMaxLogDepth } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } f := Formatter{ outputFormat: outfmt, prefix: "", @@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter { // implementation. It should be constructed with NewFormatter. Some of // its methods directly implement logr.LogSink. type Formatter struct { - outputFormat outputFormat - prefix string - values []any - valuesStr string - depth int - opts *Options + outputFormat outputFormat + prefix string + values []any + valuesStr string + parentValuesStr string + depth int + opts *Options + group string // for slog groups + groupDepth int } // outputFormat indicates which outputFormat to use. @@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string { // Empirically bytes.Buffer is faster than strings.Builder for this. buf := bytes.NewBuffer(make([]byte, 0, 1024)) if f.outputFormat == outputJSON { - buf.WriteByte('{') + buf.WriteByte('{') // for the whole line } + vals := builtins if hook := f.opts.RenderBuiltinsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, false, false) // keys are ours, no need to escape continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { + + if f.parentValuesStr != "" { if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } + buf.WriteByte(f.comma()) } + buf.WriteString(f.parentValuesStr) continuing = true + } + + groupDepth := f.groupDepth + if f.group != "" { + if f.valuesStr != "" || len(args) != 0 { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } else { + // The group was empty + groupDepth-- + } + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } buf.WriteString(f.valuesStr) + continuing = true } + vals = args if hook := f.opts.RenderArgsHook; hook != nil { vals = hook(f.sanitize(vals)) } f.flatten(buf, vals, continuing, true) // escape user-provided keys + + for i := 0; i < groupDepth; i++ { + buf.WriteByte('}') // for the groups + } + if f.outputFormat == outputJSON { - buf.WriteByte('}') + buf.WriteByte('}') // for the whole line } + return buf.String() } @@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if len(kvList)%2 != 0 { kvList = append(kvList, noValue) } + copied := false for i := 0; i < len(kvList); i += 2 { k, ok := kvList[i].(string) if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } k = f.nonStringKey(kvList[i]) kvList[i] = k } @@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc if i > 0 || continuing { if f.outputFormat == outputJSON { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } else { // In theory the format could be something we don't understand. In // practice, we control it, so it won't be. @@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc } } - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) buf.WriteString(f.pretty(v)) } return kvList } +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + func (f Formatter) pretty(value any) string { return f.prettyWithFlags(value, 0, 0) } @@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } for i := 0; i < len(v); i += 2 { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } k, _ := v[i].(string) // sanitize() above means no need to check success // arbitrary keys might need escaping buf.WriteString(prettyString(k)) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { continue } if printComma { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } printComma = true // if we got here, we are rendering a field if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { @@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { name = fld.Name } // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) } if flags&flagRawStruct == 0 { @@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { buf.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } e := v.Index(i) buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) @@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { i := 0 for it.Next() { if i > 0 { - buf.WriteByte(',') + buf.WriteByte(f.comma()) } // If a map key supports TextMarshaler, use it. keystr := "" @@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { } } buf.WriteString(keystr) - buf.WriteByte(':') + buf.WriteByte(f.colon()) buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) i++ } @@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any { return kvList } +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(group string) { + // Unnamed groups are just inlined. + if group == "" { + return + } + + // Any saved values can no longer be changed. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + continuing := false + + if f.parentValuesStr != "" { + buf.WriteString(f.parentValuesStr) + continuing = true + } + + if f.group != "" && f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + } + + // NOTE: We don't close the scope here - that's done later, when a log line + // is actually rendered (because we have N scopes to close). + + f.parentValuesStr = buf.String() + + // Start collecting new values. + f.group = group + f.groupDepth++ + f.valuesStr = "" + f.values = nil +} + // Init configures this Formatter from runtime info, such as the call depth // imposed by logr itself. // Note that this receiver is a pointer, so depth can be saved. @@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args if policy := f.opts.LogCaller; policy == All || policy == Info { args = append(args, "caller", f.caller()) } - args = append(args, "level", level, "msg", msg) + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) return prefix, f.render(args, kvList) } diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 00000000000..7bd84761e2d --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 2a5075a180f..b4428e105b4 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -207,10 +207,6 @@ limitations under the License. // those. package logr -import ( - "context" -) - // New returns a new Logger instance. This is primarily used by libraries // implementing LogSink, rather than end users. Passing a nil sink will create // a Logger which discards all log lines. @@ -410,45 +406,6 @@ func (l Logger) IsZero() bool { return l.sink == nil } -// contextKey is how we find Loggers in a context.Context. -type contextKey struct{} - -// FromContext returns a Logger from ctx or an error if no Logger is found. -func FromContext(ctx context.Context) (Logger, error) { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v, nil - } - - return Logger{}, notFoundError{} -} - -// notFoundError exists to carry an IsNotFound method. -type notFoundError struct{} - -func (notFoundError) Error() string { - return "no logr.Logger was present" -} - -func (notFoundError) IsNotFound() bool { - return true -} - -// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this -// returns a Logger that discards all log messages. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new Context, derived from ctx, which carries the -// provided Logger. -func NewContext(ctx context.Context, logger Logger) context.Context { - return context.WithValue(ctx, contextKey{}, logger) -} - // RuntimeInfo holds information that the logr "core" library knows which // LogSinks might want to know. type RuntimeInfo struct { diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go similarity index 63% rename from vendor/github.com/go-logr/logr/slogr/sloghandler.go rename to vendor/github.com/go-logr/logr/sloghandler.go index ec6725ce2cd..82d1ba49481 100644 --- a/vendor/github.com/go-logr/logr/slogr/sloghandler.go +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -17,18 +17,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" - - "github.com/go-logr/logr" ) type slogHandler struct { // May be nil, in which case all logs get discarded. - sink logr.LogSink + sink LogSink // Non-nil if sink is non-nil and implements SlogSink. slogSink SlogSink @@ -54,7 +52,7 @@ func (l *slogHandler) GetLevel() slog.Level { return l.levelBias } -func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) } @@ -72,9 +70,7 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { kvList := make([]any, 0, 2*record.NumAttrs()) record.Attrs(func(attr slog.Attr) bool { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) return true }) if record.Level >= slog.LevelError { @@ -90,15 +86,15 @@ func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { // are called by Handle, code in slog gets skipped. // // This offset currently (Go 1.21.0) works for calls through -// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// slog.New(ToSlogHandler(...)). There's no guarantee that the call // chain won't change. Wrapping the handler will also break unwinding. It's // still better than not adjusting at all.... // -// This cannot be done when constructing the handler because NewLogr needs +// This cannot be done when constructing the handler because FromSlogHandler needs // access to the original sink without this adjustment. A second copy would // work, but then WithAttrs would have to be called for both of them. -func (l *slogHandler) sinkWithCallDepth() logr.LogSink { - if sink, ok := l.sink.(logr.CallDepthLogSink); ok { +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { return sink.WithCallDepth(2) } return l.sink @@ -109,60 +105,88 @@ func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { return l } - copy := *l + clone := *l if l.slogSink != nil { - copy.slogSink = l.slogSink.WithAttrs(attrs) - copy.sink = copy.slogSink + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink } else { kvList := make([]any, 0, 2*len(attrs)) for _, attr := range attrs { - if attr.Key != "" { - kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) - } + kvList = attrToKVs(attr, l.groupPrefix, kvList) } - copy.sink = l.sink.WithValues(kvList...) + clone.sink = l.sink.WithValues(kvList...) } - return © + return &clone } func (l *slogHandler) WithGroup(name string) slog.Handler { if l.sink == nil { return l } - copy := *l + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l if l.slogSink != nil { - copy.slogSink = l.slogSink.WithGroup(name) - copy.sink = l.slogSink + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink } else { - copy.groupPrefix = copy.addGroupPrefix(name) + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) } - return © + + return kvList } -func (l *slogHandler) addGroupPrefix(name string) string { - if l.groupPrefix == "" { +func addPrefix(prefix, name string) string { + if prefix == "" { return name } - return l.groupPrefix + groupSeparator + name + if name == "" { + return prefix + } + return prefix + groupSeparator + name } // levelFromSlog adjusts the level by the logger's verbosity and negates it. // It ensures that the result is >= 0. This is necessary because the result is -// passed to a logr.LogSink and that API did not historically document whether +// passed to a LogSink and that API did not historically document whether // levels could be negative or what that meant. // // Some example usage: -// logrV0 := getMyLogger() -// logrV2 := logrV0.V(2) -// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) -// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) -// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) -// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) func (l *slogHandler) levelFromSlog(level slog.Level) int { result := -level - result += l.levelBias // in case the original logr.Logger had a V level + result += l.levelBias // in case the original Logger had a V level if result < 0 { - result = 0 // because logr.LogSink doesn't expect negative V levels + result = 0 // because LogSink doesn't expect negative V levels } return int(result) } diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 00000000000..28a83d02439 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go index eb519ae23f8..36432c56fdf 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogr.go +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -23,10 +23,11 @@ limitations under the License. // // See the README in the top-level [./logr] package for a discussion of // interoperability. +// +// Deprecated: use the main logr package instead. package slogr import ( - "context" "log/slog" "github.com/go-logr/logr" @@ -34,75 +35,27 @@ import ( // NewLogr returns a logr.Logger which writes to the slog.Handler. // -// The logr verbosity level is mapped to slog levels such that V(0) becomes -// slog.LevelInfo and V(4) becomes slog.LevelDebug. +// Deprecated: use [logr.FromSlogHandler] instead. func NewLogr(handler slog.Handler) logr.Logger { - if handler, ok := handler.(*slogHandler); ok { - if handler.sink == nil { - return logr.Discard() - } - return logr.New(handler.sink).V(int(handler.levelBias)) - } - return logr.New(&slogSink{handler: handler}) + return logr.FromSlogHandler(handler) } // NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. // -// The returned logger writes all records with level >= slog.LevelError as -// error log entries with LogSink.Error, regardless of the verbosity level of -// the logr.Logger: -// -// logger := -// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) -// -// The level of all other records gets reduced by the verbosity -// level of the logr.Logger and the result is negated. If it happens -// to be negative, then it gets replaced by zero because a LogSink -// is not expected to handled negative levels: -// -// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) -// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) -// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +// Deprecated: use [logr.ToSlogHandler] instead. func NewSlogHandler(logger logr.Logger) slog.Handler { - if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { - return sink.handler - } + return logr.ToSlogHandler(logger) +} - handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} - if slogSink, ok := handler.sink.(SlogSink); ok { - handler.slogSink = slogSink - } - return handler +// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// Deprecated: use [logr.ToSlogHandler] instead. +func ToSlogHandler(logger logr.Logger) slog.Handler { + return logr.ToSlogHandler(logger) } // SlogSink is an optional interface that a LogSink can implement to support -// logging through the slog.Logger or slog.Handler APIs better. It then should -// also support special slog values like slog.Group. When used as a -// slog.Handler, the advantages are: +// logging through the slog.Logger or slog.Handler APIs better. // -// - stack unwinding gets avoided in favor of logging the pre-recorded PC, -// as intended by slog -// - proper grouping of key/value pairs via WithGroup -// - verbosity levels > slog.LevelInfo can be recorded -// - less overhead -// -// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally -// well. Developers can pick whatever API suits them better and/or mix -// packages which use either API in the same binary with a common logging -// implementation. -// -// This interface is necessary because the type implementing the LogSink -// interface cannot also implement the slog.Handler interface due to the -// different prototype of the common Enabled method. -// -// An implementation could support both interfaces in two different types, but then -// additional interfaces would be needed to convert between those types in NewLogr -// and NewSlogHandler. -type SlogSink interface { - logr.LogSink - - Handle(ctx context.Context, record slog.Record) error - WithAttrs(attrs []slog.Attr) SlogSink - WithGroup(name string) SlogSink -} +// Deprecated: use [logr.SlogSink] instead. +type SlogSink = logr.SlogSink diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go similarity index 82% rename from vendor/github.com/go-logr/logr/slogr/slogsink.go rename to vendor/github.com/go-logr/logr/slogsink.go index 6fbac561d98..4060fcbc2b0 100644 --- a/vendor/github.com/go-logr/logr/slogr/slogsink.go +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -17,24 +17,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package slogr +package logr import ( "context" "log/slog" "runtime" "time" - - "github.com/go-logr/logr" ) var ( - _ logr.LogSink = &slogSink{} - _ logr.CallDepthLogSink = &slogSink{} - _ Underlier = &slogSink{} + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} ) -// Underlier is implemented by the LogSink returned by NewLogr. +// Underlier is implemented by the LogSink returned by NewFromLogHandler. type Underlier interface { // GetUnderlying returns the Handler used by the LogSink. GetUnderlying() slog.Handler @@ -54,7 +52,7 @@ type slogSink struct { handler slog.Handler } -func (l *slogSink) Init(info logr.RuntimeInfo) { +func (l *slogSink) Init(info RuntimeInfo) { l.callDepth = info.CallDepth } @@ -62,7 +60,7 @@ func (l *slogSink) GetUnderlying() slog.Handler { return l.handler } -func (l *slogSink) WithCallDepth(depth int) logr.LogSink { +func (l *slogSink) WithCallDepth(depth int) LogSink { newLogger := *l newLogger.callDepth += depth return &newLogger @@ -93,18 +91,18 @@ func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interf record.AddAttrs(slog.Any(errKey, err)) } record.Add(kvList...) - l.handler.Handle(context.Background(), record) + _ = l.handler.Handle(context.Background(), record) } -func (l slogSink) WithName(name string) logr.LogSink { +func (l slogSink) WithName(name string) LogSink { if l.name != "" { - l.name = l.name + "/" + l.name += "/" } l.name += name return &l } -func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { +func (l slogSink) WithValues(kvList ...interface{}) LogSink { l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) return &l } diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 00000000000..22f8d21cca1 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 813788aff1c..0108f1d572d 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,6 +1,10 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index 7df9853def6..d975773d490 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -26,6 +26,7 @@ package jsonpointer import ( + "encoding/json" "errors" "fmt" "reflect" @@ -40,6 +41,7 @@ const ( pointerSeparator = `/` invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator + notFound = `Can't find the pointer in the document` ) var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() @@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() // JSONPointable is an interface for structs to implement when they need to customize the // json pointer process type JSONPointable interface { - JSONLookup(string) (interface{}, error) + JSONLookup(string) (any, error) } // JSONSetable is an interface for structs to implement when they need to customize the // json pointer process type JSONSetable interface { - JSONSet(string, interface{}) error + JSONSet(string, any) error } // New creates a new json pointer for the given string @@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error { err = errors.New(invalidStart) } else { referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) } } @@ -91,38 +91,58 @@ func (p *Pointer) parse(jsonPointerString string) error { } // Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { +func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, swag.DefaultJSONNameProvider) } // Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { +func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, swag.DefaultJSONNameProvider) } // GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { +func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) } // SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { +func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + +func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -159,7 +179,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam } -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { +func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error { rValue := reflect.Indirect(reflect.ValueOf(node)) if ns, ok := node.(JSONSetable); ok { // pointer impl @@ -170,7 +190,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { + switch rValue.Kind() { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -210,7 +230,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw } -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { +func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { if nameProvider == nil { nameProvider = swag.DefaultJSONNameProvider @@ -231,8 +251,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf if err != nil { return nil, knd, err } - node, kind = r, knd - + node = r } rValue := reflect.ValueOf(node) @@ -241,7 +260,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf return node, kind, nil } -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { +func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { @@ -284,7 +303,7 @@ func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) e continue } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -363,6 +382,128 @@ func (p *Pointer) String() string { return pointerString } +func (p *Pointer) Offset(document string) (int64, error) { + dec := json.NewDecoder(strings.NewReader(document)) + var offset int64 + for _, ttk := range p.DecodedTokens() { + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + offset, err = offsetSingleObject(dec, ttk) + if err != nil { + return 0, err + } + case '[': + offset, err = offsetSingleArray(dec, ttk) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return offset, nil +} + +func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { + for dec.More() { + offset := dec.InputOffset() + tk, err := dec.Token() + if err != nil { + return 0, err + } + switch tk := tk.(type) { + case json.Delim: + switch tk { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + case string: + if tk == decodedToken { + return offset, nil + } + default: + return 0, fmt.Errorf("invalid token %#v", tk) + } + } + return 0, fmt.Errorf("token reference %q not found", decodedToken) +} + +func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { + idx, err := strconv.Atoi(decodedToken) + if err != nil { + return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err) + } + var i int + for i = 0; i < idx && dec.More(); i++ { + tk, err := dec.Token() + if err != nil { + return 0, err + } + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return 0, err + } + case '[': + if err = drainSingle(dec); err != nil { + return 0, err + } + } + } + } + + if !dec.More() { + return 0, fmt.Errorf("token reference %q not found", decodedToken) + } + return dec.InputOffset(), nil +} + +// drainSingle drains a single level of object or array. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. +func drainSingle(dec *json.Decoder) error { + for dec.More() { + tk, err := dec.Token() + if err != nil { + return err + } + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { + case '{': + if err = drainSingle(dec); err != nil { + return err + } + case '[': + if err = drainSingle(dec); err != nil { + return err + } + } + } + } + + // Consumes the ending delim + if _, err := dec.Token(); err != nil { + return err + } + return nil +} + // Specific JSON pointer encoding here // ~0 => ~ // ~1 => / @@ -377,14 +518,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) return step2 } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 013fc1943a9..22f8d21cca1 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,50 +1,61 @@ linters-settings: govet: check-shadowing: true + golint: + min-confidence: 0 gocyclo: - min-complexity: 30 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 2 - min-occurrences: 4 - paralleltest: - ignore-missing: true + min-occurrences: 3 + linters: enable-all: true disable: - maligned + - unparam - lll + - gochecknoinits - gochecknoglobals + - funlen - godox - gocognit - whitespace - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - testpackage - - gci - - gofumpt - - goerr113 + - nlreturn - gomnd - - tparallel + - exhaustivestruct + - goerr113 + - errorlint - nestif - godot - - errorlint - - varcheck - - interfacer - - deadcode - - golint + - gofumpt + - paralleltest + - tparallel + - thelper - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck - structcheck + - golint - nosnakecase - - varnamelen - - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index b94753aa527..c7fc2049c1d 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -1,15 +1,19 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonreference [![Build Status](https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonreference.svg)](https://pkg.go.dev/github.com/go-openapi/jsonreference) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonreference)](https://goreportcard.com/report/github.com/go-openapi/jsonreference) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) An implementation of JSON Reference - Go language ## Status Feature complete. Stable API ## Dependencies -https://github.com/go-openapi/jsonpointer +* https://github.com/go-openapi/jsonpointer ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 +* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 +* http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore index d69b53accc5..c4b1b64f04e 100644 --- a/vendor/github.com/go-openapi/swag/.gitignore +++ b/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e40001..80e2be0042f 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 217f6fa5054..a7292229980 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/initialism_index.go similarity index 98% rename from vendor/github.com/go-openapi/swag/post_go19.go rename to vendor/github.com/go-openapi/swag/initialism_index.go index 7c7da9c0880..03555184d1b 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.9 -// +build go1.9 - package swag import ( diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 00038c3773c..783442fddf6 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go deleted file mode 100644 index f5228b82c0f..00000000000 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.8 -// +build go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go deleted file mode 100644 index 2757d9b95f8..00000000000 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.8 -// +build !go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db377be..00000000000 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index f78ab684a0a..c5322d133d5 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -341,13 +341,22 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. func IsZero(data interface{}) bool { + v := reflect.ValueOf(data) + // check for nil data + switch v.Kind() { //nolint:exhaustive + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return true + } + } + // check for things that have an IsZero method instead if vv, ok := data.(zeroable); ok { return vv.IsZero() } + // continue with slightly more complex reflection - v := reflect.ValueOf(data) - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -358,14 +367,13 @@ func IsZero(data interface{}) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() case reflect.Struct, reflect.Array: return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) case reflect.Invalid: return true + default: + return false } - return false } // AddInitialisms add additional initialisms diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index f09ee609f3b..a8c4e359ea6 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -18,6 +18,8 @@ import ( "encoding/json" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -147,7 +149,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +247,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +287,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +347,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore deleted file mode 100644 index 2de28da1663..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -.DS_Store -.DS_Store? -._* -.Spotlight-V100 -.Trashes -Icon? -ehthumbs.db -Thumbs.db -.idea diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS deleted file mode 100644 index 50afa2c8597..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ /dev/null @@ -1,117 +0,0 @@ -# This is the official list of Go-MySQL-Driver authors for copyright purposes. - -# If you are submitting a patch, please add your name or the name of the -# organization which holds the copyright to this list in alphabetical order. - -# Names should be added to this file as -# Name -# The email address is not required for organizations. -# Please keep the list sorted. - - -# Individual Persons - -Aaron Hopkins -Achille Roussel -Alex Snast -Alexey Palazhchenko -Andrew Reid -Animesh Ray -Arne Hormann -Ariel Mashraki -Asta Xie -Bulat Gaifullin -Caine Jette -Carlos Nieto -Chris Moos -Craig Wilson -Daniel Montoya -Daniel Nichter -Daniël van Eeden -Dave Protasowski -DisposaBoy -Egor Smolyakov -Erwan Martin -Evan Shaw -Frederick Mayle -Gustavo Kristic -Hajime Nakagami -Hanno Braun -Henri Yandell -Hirotaka Yamamoto -Huyiguang -ICHINOSE Shogo -Ilia Cimpoes -INADA Naoki -Jacek Szwec -James Harr -Jeff Hodges -Jeffrey Charles -Jerome Meyer -Jiajia Zhong -Jian Zhen -Joshua Prunier -Julien Lefevre -Julien Schmidt -Justin Li -Justin Nuß -Kamil Dziedzic -Kei Kamikawa -Kevin Malachowski -Kieron Woodhouse -Lennart Rudolph -Leonardo YongUk Kim -Linh Tran Tuan -Lion Yang -Luca Looz -Lucas Liu -Luke Scott -Maciej Zimnoch -Michael Woolnough -Nathanial Murphy -Nicola Peduzzi -Olivier Mengué -oscarzhao -Paul Bonser -Peter Schultz -Rebecca Chin -Reed Allman -Richard Wilkes -Robert Russell -Runrioter Wung -Sho Iizuka -Sho Ikeda -Shuode Li -Simon J Mudd -Soroush Pour -Stan Putrya -Stanley Gunawan -Steven Hartland -Tan Jinhua <312841925 at qq.com> -Thomas Wodarek -Tim Ruffles -Tom Jenkinson -Vladimir Kovpak -Vladyslav Zhelezniak -Xiangyu Hu -Xiaobing Jiang -Xiuming Chen -Xuehong Chan -Zhenye Xie -Zhixin Wen - -# Organizations - -Barracuda Networks, Inc. -Counting Ltd. -DigitalOcean Inc. -Facebook Inc. -GitHub Inc. -Google Inc. -InfoSum Ltd. -Keybase Inc. -Multiplay Ltd. -Percona LLC -Pivotal Inc. -Stripe Inc. -Zendesk Inc. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md deleted file mode 100644 index 72a738ed502..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ /dev/null @@ -1,232 +0,0 @@ -## Version 1.6 (2021-04-01) - -Changes: - - - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190) - - `NullTime` is deprecated (#960, #1144) - - Reduce allocations when building SET command (#1111) - - Performance improvement for time formatting (#1118) - - Performance improvement for time parsing (#1098, #1113) - -New Features: - - - Implement `driver.Validator` interface (#1106, #1174) - - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143) - - Add `json.RawMessage` for converter and prepared statement (#1059) - - Interpolate `json.RawMessage` as `string` (#1058) - - Implements `CheckNamedValue` (#1090) - -Bugfixes: - - - Stop rounding times (#1121, #1172) - - Put zero filler into the SSL handshake packet (#1066) - - Fix checking cancelled connections back into the connection pool (#1095) - - Fix remove last 0 byte for mysql_old_password when password is empty (#1133) - - -## Version 1.5 (2020-01-07) - -Changes: - - - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017) - - Improve buffer handling (#890) - - Document potentially insecure TLS configs (#901) - - Use a double-buffering scheme to prevent data races (#943) - - Pass uint64 values without converting them to string (#838, #955) - - Update collations and make utf8mb4 default (#877, #1054) - - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995) - - Removed CloudSQL support (#993, #1007) - - Add Go Module support (#1003) - -New Features: - - - Implement support of optional TLS (#900) - - Check connection liveness (#934, #964, #997, #1048, #1051, #1052) - - Implement Connector Interface (#941, #958, #1020, #1035) - -Bugfixes: - - - Mark connections as bad on error during ping (#875) - - Mark connections as bad on error during dial (#867) - - Fix connection leak caused by rapid context cancellation (#1024) - - Mark connections as bad on error during Conn.Prepare (#1030) - - -## Version 1.4.1 (2018-11-14) - -Bugfixes: - - - Fix TIME format for binary columns (#818) - - Fix handling of empty auth plugin names (#835) - - Fix caching_sha2_password with empty password (#826) - - Fix canceled context broke mysqlConn (#862) - - Fix OldAuthSwitchRequest support (#870) - - Fix Auth Response packet for cleartext password (#887) - -## Version 1.4 (2018-06-03) - -Changes: - - - Documentation fixes (#530, #535, #567) - - Refactoring (#575, #579, #580, #581, #603, #615, #704) - - Cache column names (#444) - - Sort the DSN parameters in DSNs generated from a config (#637) - - Allow native password authentication by default (#644) - - Use the default port if it is missing in the DSN (#668) - - Removed the `strict` mode (#676) - - Do not query `max_allowed_packet` by default (#680) - - Dropped support Go 1.6 and lower (#696) - - Updated `ConvertValue()` to match the database/sql/driver implementation (#760) - - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783) - - Improved the compatibility of the authentication system (#807) - -New Features: - - - Multi-Results support (#537) - - `rejectReadOnly` DSN option (#604) - - `context.Context` support (#608, #612, #627, #761) - - Transaction isolation level support (#619, #744) - - Read-Only transactions support (#618, #634) - - `NewConfig` function which initializes a config with default values (#679) - - Implemented the `ColumnType` interfaces (#667, #724) - - Support for custom string types in `ConvertValue` (#623) - - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710) - - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802) - - Implemented `driver.SessionResetter` (#779) - - `sha256_password` authentication plugin support (#808) - -Bugfixes: - - - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718) - - Fixed LOAD LOCAL DATA INFILE for empty files (#590) - - Removed columns definition cache since it sometimes cached invalid data (#592) - - Don't mutate registered TLS configs (#600) - - Make RegisterTLSConfig concurrency-safe (#613) - - Handle missing auth data in the handshake packet correctly (#646) - - Do not retry queries when data was written to avoid data corruption (#302, #736) - - Cache the connection pointer for error handling before invalidating it (#678) - - Fixed imports for appengine/cloudsql (#700) - - Fix sending STMT_LONG_DATA for 0 byte data (#734) - - Set correct capacity for []bytes read from length-encoded strings (#766) - - Make RegisterDial concurrency-safe (#773) - - -## Version 1.3 (2016-12-01) - -Changes: - - - Go 1.1 is no longer supported - - Use decimals fields in MySQL to format time types (#249) - - Buffer optimizations (#269) - - TLS ServerName defaults to the host (#283) - - Refactoring (#400, #410, #437) - - Adjusted documentation for second generation CloudSQL (#485) - - Documented DSN system var quoting rules (#502) - - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) - -New Features: - - - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) - - Support for returning table alias on Columns() (#289, #359, #382) - - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) - - Support for uint64 parameters with high bit set (#332, #345) - - Cleartext authentication plugin support (#327) - - Exported ParseDSN function and the Config struct (#403, #419, #429) - - Read / Write timeouts (#401) - - Support for JSON field type (#414) - - Support for multi-statements and multi-results (#411, #431) - - DSN parameter to set the driver-side max_allowed_packet value manually (#489) - - Native password authentication plugin support (#494, #524) - -Bugfixes: - - - Fixed handling of queries without columns and rows (#255) - - Fixed a panic when SetKeepAlive() failed (#298) - - Handle ERR packets while reading rows (#321) - - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) - - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) - - Actually zero out bytes in handshake response (#378) - - Fixed race condition in registering LOAD DATA INFILE handler (#383) - - Fixed tests with MySQL 5.7.9+ (#380) - - QueryUnescape TLS config names (#397) - - Fixed "broken pipe" error by writing to closed socket (#390) - - Fixed LOAD LOCAL DATA INFILE buffering (#424) - - Fixed parsing of floats into float64 when placeholders are used (#434) - - Fixed DSN tests with Go 1.7+ (#459) - - Handle ERR packets while waiting for EOF (#473) - - Invalidate connection on error while discarding additional results (#513) - - Allow terminating packets of length 0 (#516) - - -## Version 1.2 (2014-06-03) - -Changes: - - - We switched back to a "rolling release". `go get` installs the current master branch again - - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver - - Exported errors to allow easy checking from application code - - Enabled TCP Keepalives on TCP connections - - Optimized INFILE handling (better buffer size calculation, lazy init, ...) - - The DSN parser also checks for a missing separating slash - - Faster binary date / datetime to string formatting - - Also exported the MySQLWarning type - - mysqlConn.Close returns the first error encountered instead of ignoring all errors - - writePacket() automatically writes the packet size to the header - - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets - -New Features: - - - `RegisterDial` allows the usage of a custom dial function to establish the network connection - - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter - - Logging of critical errors is configurable with `SetLogger` - - Google CloudSQL support - -Bugfixes: - - - Allow more than 32 parameters in prepared statements - - Various old_password fixes - - Fixed TestConcurrent test to pass Go's race detection - - Fixed appendLengthEncodedInteger for large numbers - - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) - - -## Version 1.1 (2013-11-02) - -Changes: - - - Go-MySQL-Driver now requires Go 1.1 - - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore - - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors - - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` - - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. - - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries - - Optimized the buffer for reading - - stmt.Query now caches column metadata - - New Logo - - Changed the copyright header to include all contributors - - Improved the LOAD INFILE documentation - - The driver struct is now exported to make the driver directly accessible - - Refactored the driver tests - - Added more benchmarks and moved all to a separate file - - Other small refactoring - -New Features: - - - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure - - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs - - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used - -Bugfixes: - - - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification - - Convert to DB timezone when inserting `time.Time` - - Splitted packets (more than 16MB) are now merged correctly - - Fixed false positive `io.EOF` errors when the data was fully read - - Avoid panics on reuse of closed connections - - Fixed empty string producing false nil values - - Fixed sign byte for positive TIME fields - - -## Version 1.0 (2013-05-14) - -Initial Release diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE deleted file mode 100644 index 14e2f777f6c..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/LICENSE +++ /dev/null @@ -1,373 +0,0 @@ -Mozilla Public License Version 2.0 -================================== - -1. Definitions --------------- - -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. - -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or - - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. - -1.6. "Executable Form" - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. - -1.8. "License" - means this document. - -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. - -1.10. "Modifications" - means any of the following: - - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or - - (b) any new file in Source Code Form that contains any Covered - Software. - -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. - -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. - -1.13. "Source Code Form" - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. - -2. License Grants and Conditions --------------------------------- - -2.1. Grants - -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: - -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. - -2.3. Limitations on Grant Scope - -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: - -(a) for any code that a Contributor has removed from Covered Software; - or - -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. - -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). - -2.5. Representation - -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. - -2.7. Conditions - -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. - -3. Responsibilities -------------------- - -3.1. Distribution of Source Form - -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. - -3.2. Distribution of Executable Form - -If You distribute Covered Software in Executable Form then: - -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and - -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). - -3.4. Notices - -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. - -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- - -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. - -5. Termination --------------- - -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. - -************************************************************************ -* * -* 6. Disclaimer of Warranty * -* ------------------------- * -* * -* Covered Software is provided under this License on an "as is" * -* basis, without warranty of any kind, either expressed, implied, or * -* statutory, including, without limitation, warranties that the * -* Covered Software is free of defects, merchantable, fit for a * -* particular purpose or non-infringing. The entire risk as to the * -* quality and performance of the Covered Software is with You. * -* Should any Covered Software prove defective in any respect, You * -* (not any Contributor) assume the cost of any necessary servicing, * -* repair, or correction. This disclaimer of warranty constitutes an * -* essential part of this License. No use of any Covered Software is * -* authorized under this License except under this disclaimer. * -* * -************************************************************************ - -************************************************************************ -* * -* 7. Limitation of Liability * -* -------------------------- * -* * -* Under no circumstances and under no legal theory, whether tort * -* (including negligence), contract, or otherwise, shall any * -* Contributor, or anyone who distributes Covered Software as * -* permitted above, be liable to You for any direct, indirect, * -* special, incidental, or consequential damages of any character * -* including, without limitation, damages for lost profits, loss of * -* goodwill, work stoppage, computer failure or malfunction, or any * -* and all other commercial damages or losses, even if such party * -* shall have been informed of the possibility of such damages. This * -* limitation of liability shall not apply to liability for death or * -* personal injury resulting from such party's negligence to the * -* extent applicable law prohibits such limitation. Some * -* jurisdictions do not allow the exclusion or limitation of * -* incidental or consequential damages, so this exclusion and * -* limitation may not apply to You. * -* * -************************************************************************ - -8. Litigation -------------- - -Any litigation relating to this License may be brought only in the -courts of a jurisdiction where the defendant maintains its principal -place of business and such litigation shall be governed by laws of that -jurisdiction, without reference to its conflict-of-law provisions. -Nothing in this Section shall prevent a party's ability to bring -cross-claims or counter-claims. - -9. Miscellaneous ----------------- - -This License represents the complete agreement concerning the subject -matter hereof. If any provision of this License is held to be -unenforceable, such provision shall be reformed only to the extent -necessary to make it enforceable. Any law or regulation which provides -that the language of a contract shall be construed against the drafter -shall not be used to construe this License against a Contributor. - -10. Versions of the License ---------------------------- - -10.1. New Versions - -Mozilla Foundation is the license steward. Except as provided in Section -10.3, no one other than the license steward has the right to modify or -publish new versions of this License. Each version will be given a -distinguishing version number. - -10.2. Effect of New Versions - -You may distribute the Covered Software under the terms of the version -of the License under which You originally received the Covered Software, -or under the terms of any subsequent version published by the license -steward. - -10.3. Modified Versions - -If you create software not governed by this License, and you want to -create a new license for such software, you may create and use a -modified version of this License if you rename the license and remove -any references to the name of the license steward (except to note that -such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary -Licenses - -If You choose to distribute Source Code Form that is Incompatible With -Secondary Licenses under the terms of this version of the License, the -notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice -------------------------------------------- - - This Source Code Form is subject to the terms of the Mozilla Public - License, v. 2.0. If a copy of the MPL was not distributed with this - file, You can obtain one at http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular -file, then You may include the notice in a location (such as a LICENSE -file in a relevant directory) where a recipient would be likely to look -for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice ---------------------------------------------------------- - - This Source Code Form is "Incompatible With Secondary Licenses", as - defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md deleted file mode 100644 index 0b13154fccf..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ /dev/null @@ -1,520 +0,0 @@ -# Go-MySQL-Driver - -A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package - -![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") - ---------------------------------------- - * [Features](#features) - * [Requirements](#requirements) - * [Installation](#installation) - * [Usage](#usage) - * [DSN (Data Source Name)](#dsn-data-source-name) - * [Password](#password) - * [Protocol](#protocol) - * [Address](#address) - * [Parameters](#parameters) - * [Examples](#examples) - * [Connection pool and timeouts](#connection-pool-and-timeouts) - * [context.Context Support](#contextcontext-support) - * [ColumnType Support](#columntype-support) - * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) - * [time.Time support](#timetime-support) - * [Unicode support](#unicode-support) - * [Testing / Development](#testing--development) - * [License](#license) - ---------------------------------------- - -## Features - * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") - * Native Go implementation. No C-bindings, just pure Go - * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc) - * Automatic handling of broken connections - * Automatic Connection Pooling *(by database/sql package)* - * Supports queries larger than 16MB - * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support. - * Intelligent `LONG DATA` handling in prepared statements - * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support - * Optional `time.Time` parsing - * Optional placeholder interpolation - -## Requirements - * Go 1.10 or higher. We aim to support the 3 latest versions of Go. - * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) - ---------------------------------------- - -## Installation -Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: -```bash -$ go get -u github.com/go-sql-driver/mysql -``` -Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. - -## Usage -_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then. - -Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: - -```go -import ( - "database/sql" - "time" - - _ "github.com/go-sql-driver/mysql" -) - -// ... - -db, err := sql.Open("mysql", "user:password@/dbname") -if err != nil { - panic(err) -} -// See "Important settings" section. -db.SetConnMaxLifetime(time.Minute * 3) -db.SetMaxOpenConns(10) -db.SetMaxIdleConns(10) -``` - -[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). - -### Important settings - -`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too. - -`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server. - -`db.SetMaxIdleConns()` is recommended to be set same to (or greater than) `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed very frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15. - - -### DSN (Data Source Name) - -The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): -``` -[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] -``` - -A DSN in its fullest form: -``` -username:password@protocol(address)/dbname?param=value -``` - -Except for the databasename, all values are optional. So the minimal DSN is: -``` -/dbname -``` - -If you do not want to preselect a database, leave `dbname` empty: -``` -/ -``` -This has the same effect as an empty DSN string: -``` - -``` - -Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. - -#### Password -Passwords can consist of any character. Escaping is **not** necessary. - -#### Protocol -See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available. -In general you should use an Unix domain socket if available and TCP otherwise for best performance. - -#### Address -For TCP and UDP networks, addresses have the form `host[:port]`. -If `port` is omitted, the default port will be used. -If `host` is a literal IPv6 address, it must be enclosed in square brackets. -The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. - -For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. - -#### Parameters -*Parameters are case-sensitive!* - -Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. - -##### `allowAllFiles` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files. -[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) - -##### `allowCleartextPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. - -##### `allowNativePasswords` - -``` -Type: bool -Valid Values: true, false -Default: true -``` -`allowNativePasswords=false` disallows the usage of MySQL native password method. - -##### `allowOldPasswords` - -``` -Type: bool -Valid Values: true, false -Default: false -``` -`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). - -##### `charset` - -``` -Type: string -Valid Values: -Default: none -``` - -Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). - -Usage of the `charset` parameter is discouraged because it issues additional queries to the server. -Unless you need the fallback behavior, please use `collation` instead. - -##### `checkConnLiveness` - -``` -Type: bool -Valid Values: true, false -Default: true -``` - -On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection. -`checkConnLiveness=false` disables this liveness check of connections. - -##### `collation` - -``` -Type: string -Valid Values: -Default: utf8mb4_general_ci -``` - -Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. - -A list of valid charsets for a server is retrievable with `SHOW COLLATION`. - -The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL. - -Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)). - - -##### `clientFoundRows` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. - -##### `columnsWithAlias` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: - -``` -SELECT u.id FROM users as u -``` - -will return `u.id` instead of just `id` if `columnsWithAlias=true`. - -##### `interpolateParams` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. - -*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* - -##### `loc` - -``` -Type: string -Valid Values: -Default: UTC -``` - -Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details. - -Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. - -Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. - -##### `maxAllowedPacket` -``` -Type: decimal number -Default: 4194304 -``` - -Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. - -##### `multiStatements` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. - -When `multiStatements` is used, `?` parameters must only be used in the first statement. - -##### `parseTime` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - -`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` -The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`. - - -##### `readTimeout` - -``` -Type: duration -Default: 0 -``` - -I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. - -##### `rejectReadOnly` - -``` -Type: bool -Valid Values: true, false -Default: false -``` - - -`rejectReadOnly=true` causes the driver to reject read-only connections. This -is for a possible race condition during an automatic failover, where the mysql -client gets connected to a read-only replica after the failover. - -Note that this should be a fairly rare case, as an automatic failover normally -happens when the primary is down, and the race condition shouldn't happen -unless it comes back up online as soon as the failover is kicked off. On the -other hand, when this happens, a MySQL application can get stuck on a -read-only connection until restarted. It is however fairly easy to reproduce, -for example, using a manual failover on AWS Aurora's MySQL-compatible cluster. - -If you are not relying on read-only transactions to reject writes that aren't -supposed to happen, setting this on some MySQL providers (such as AWS Aurora) -is safer for failovers. - -Note that ERROR 1290 can be returned for a `read-only` server and this option will -cause a retry for that error. However the same error number is used for some -other cases. You should ensure your application will never cause an ERROR 1290 -except for `read-only` mode when enabling this option. - - -##### `serverPubKey` - -``` -Type: string -Valid Values: -Default: none -``` - -Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN. -Public keys are used to transmit encrypted data, e.g. for authentication. -If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required. - - -##### `timeout` - -``` -Type: duration -Default: OS default -``` - -Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. - - -##### `tls` - -``` -Type: bool / string -Valid Values: true, false, skip-verify, preferred, -Default: false -``` - -`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). - - -##### `writeTimeout` - -``` -Type: duration -Default: 0 -``` - -I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. - - -##### System Variables - -Any other parameters are interpreted as system variables: - * `=`: `SET =` - * `=`: `SET =` - * `=%27%27`: `SET =''` - -Rules: -* The values for string variables must be quoted with `'`. -* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! - (which implies values of string variables must be wrapped with `%27`). - -Examples: - * `autocommit=1`: `SET autocommit=1` - * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` - * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'` - - -#### Examples -``` -user@unix(/path/to/socket)/dbname -``` - -``` -root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local -``` - -``` -user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true -``` - -Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): -``` -user:password@/dbname?sql_mode=TRADITIONAL -``` - -TCP via IPv6: -``` -user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci -``` - -TCP on a remote host, e.g. Amazon RDS: -``` -id:password@tcp(your-amazonaws-uri.com:3306)/dbname -``` - -Google Cloud SQL on App Engine: -``` -user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname -``` - -TCP using default port (3306) on localhost: -``` -user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped -``` - -Use the default protocol (tcp) and host (localhost:3306): -``` -user:password@/dbname -``` - -No Database preselected: -``` -user:password@/ -``` - - -### Connection pool and timeouts -The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. - -## `ColumnType` Support -This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. - -## `context.Context` Support -Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. -See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. - - -### `LOAD DATA LOCAL INFILE` support -For this feature you need direct access to the package. Therefore you must change the import path (no `_`): -```go -import "github.com/go-sql-driver/mysql" -``` - -Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). - -To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. - -See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. - - -### `time.Time` support -The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. - -However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. - -**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). - - -### Unicode support -Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default. - -Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. - -Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. - -See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support. - -## Testing / Development -To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. - -Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. -If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). - -See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details. - ---------------------------------------- - -## License -Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) - -Mozilla summarizes the license scope as follows: -> MPL: The copyleft applies to any files containing MPLed code. - - -That means: - * You can **use** the **unchanged** source code both in private and commercially. - * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0). - * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**. - -Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license. - -You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE). - -![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go deleted file mode 100644 index b2f19e8f0b6..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/auth.go +++ /dev/null @@ -1,425 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "encoding/pem" - "fmt" - "sync" -) - -// server pub keys registry -var ( - serverPubKeyLock sync.RWMutex - serverPubKeyRegistry map[string]*rsa.PublicKey -) - -// RegisterServerPubKey registers a server RSA public key which can be used to -// send data in a secure manner to the server without receiving the public key -// in a potentially insecure way from the server first. -// Registered keys can afterwards be used adding serverPubKey= to the DSN. -// -// Note: The provided rsa.PublicKey instance is exclusively owned by the driver -// after registering it and may not be modified. -// -// data, err := ioutil.ReadFile("mykey.pem") -// if err != nil { -// log.Fatal(err) -// } -// -// block, _ := pem.Decode(data) -// if block == nil || block.Type != "PUBLIC KEY" { -// log.Fatal("failed to decode PEM block containing public key") -// } -// -// pub, err := x509.ParsePKIXPublicKey(block.Bytes) -// if err != nil { -// log.Fatal(err) -// } -// -// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok { -// mysql.RegisterServerPubKey("mykey", rsaPubKey) -// } else { -// log.Fatal("not a RSA public key") -// } -// -func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) { - serverPubKeyLock.Lock() - if serverPubKeyRegistry == nil { - serverPubKeyRegistry = make(map[string]*rsa.PublicKey) - } - - serverPubKeyRegistry[name] = pubKey - serverPubKeyLock.Unlock() -} - -// DeregisterServerPubKey removes the public key registered with the given name. -func DeregisterServerPubKey(name string) { - serverPubKeyLock.Lock() - if serverPubKeyRegistry != nil { - delete(serverPubKeyRegistry, name) - } - serverPubKeyLock.Unlock() -} - -func getServerPubKey(name string) (pubKey *rsa.PublicKey) { - serverPubKeyLock.RLock() - if v, ok := serverPubKeyRegistry[name]; ok { - pubKey = v - } - serverPubKeyLock.RUnlock() - return -} - -// Hash password using pre 4.1 (old password) method -// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c -type myRnd struct { - seed1, seed2 uint32 -} - -const myRndMaxVal = 0x3FFFFFFF - -// Pseudo random number generator -func newMyRnd(seed1, seed2 uint32) *myRnd { - return &myRnd{ - seed1: seed1 % myRndMaxVal, - seed2: seed2 % myRndMaxVal, - } -} - -// Tested to be equivalent to MariaDB's floating point variant -// http://play.golang.org/p/QHvhd4qved -// http://play.golang.org/p/RG0q4ElWDx -func (r *myRnd) NextByte() byte { - r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal - r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal - - return byte(uint64(r.seed1) * 31 / myRndMaxVal) -} - -// Generate binary hash from byte string using insecure pre 4.1 method -func pwHash(password []byte) (result [2]uint32) { - var add uint32 = 7 - var tmp uint32 - - result[0] = 1345345333 - result[1] = 0x12345671 - - for _, c := range password { - // skip spaces and tabs in password - if c == ' ' || c == '\t' { - continue - } - - tmp = uint32(c) - result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) - result[1] += (result[1] << 8) ^ result[0] - add += tmp - } - - // Remove sign bit (1<<31)-1) - result[0] &= 0x7FFFFFFF - result[1] &= 0x7FFFFFFF - - return -} - -// Hash password using insecure pre 4.1 method -func scrambleOldPassword(scramble []byte, password string) []byte { - scramble = scramble[:8] - - hashPw := pwHash([]byte(password)) - hashSc := pwHash(scramble) - - r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) - - var out [8]byte - for i := range out { - out[i] = r.NextByte() + 64 - } - - mask := r.NextByte() - for i := range out { - out[i] ^= mask - } - - return out[:] -} - -// Hash password using 4.1+ method (SHA1) -func scramblePassword(scramble []byte, password string) []byte { - if len(password) == 0 { - return nil - } - - // stage1Hash = SHA1(password) - crypt := sha1.New() - crypt.Write([]byte(password)) - stage1 := crypt.Sum(nil) - - // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) - // inner Hash - crypt.Reset() - crypt.Write(stage1) - hash := crypt.Sum(nil) - - // outer Hash - crypt.Reset() - crypt.Write(scramble) - crypt.Write(hash) - scramble = crypt.Sum(nil) - - // token = scrambleHash XOR stage1Hash - for i := range scramble { - scramble[i] ^= stage1[i] - } - return scramble -} - -// Hash password using MySQL 8+ method (SHA256) -func scrambleSHA256Password(scramble []byte, password string) []byte { - if len(password) == 0 { - return nil - } - - // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble)) - - crypt := sha256.New() - crypt.Write([]byte(password)) - message1 := crypt.Sum(nil) - - crypt.Reset() - crypt.Write(message1) - message1Hash := crypt.Sum(nil) - - crypt.Reset() - crypt.Write(message1Hash) - crypt.Write(scramble) - message2 := crypt.Sum(nil) - - for i := range message1 { - message1[i] ^= message2[i] - } - - return message1 -} - -func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) { - plain := make([]byte, len(password)+1) - copy(plain, password) - for i := range plain { - j := i % len(seed) - plain[i] ^= seed[j] - } - sha1 := sha1.New() - return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil) -} - -func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error { - enc, err := encryptPassword(mc.cfg.Passwd, seed, pub) - if err != nil { - return err - } - return mc.writeAuthSwitchPacket(enc) -} - -func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) { - switch plugin { - case "caching_sha2_password": - authResp := scrambleSHA256Password(authData, mc.cfg.Passwd) - return authResp, nil - - case "mysql_old_password": - if !mc.cfg.AllowOldPasswords { - return nil, ErrOldPassword - } - if len(mc.cfg.Passwd) == 0 { - return nil, nil - } - // Note: there are edge cases where this should work but doesn't; - // this is currently "wontfix": - // https://github.com/go-sql-driver/mysql/issues/184 - authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0) - return authResp, nil - - case "mysql_clear_password": - if !mc.cfg.AllowCleartextPasswords { - return nil, ErrCleartextPassword - } - // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html - // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html - return append([]byte(mc.cfg.Passwd), 0), nil - - case "mysql_native_password": - if !mc.cfg.AllowNativePasswords { - return nil, ErrNativePassword - } - // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html - // Native password authentication only need and will need 20-byte challenge. - authResp := scramblePassword(authData[:20], mc.cfg.Passwd) - return authResp, nil - - case "sha256_password": - if len(mc.cfg.Passwd) == 0 { - return []byte{0}, nil - } - if mc.cfg.tls != nil || mc.cfg.Net == "unix" { - // write cleartext auth packet - return append([]byte(mc.cfg.Passwd), 0), nil - } - - pubKey := mc.cfg.pubKey - if pubKey == nil { - // request public key from server - return []byte{1}, nil - } - - // encrypted password - enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey) - return enc, err - - default: - errLog.Print("unknown auth plugin:", plugin) - return nil, ErrUnknownPlugin - } -} - -func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { - // Read Result Packet - authData, newPlugin, err := mc.readAuthResult() - if err != nil { - return err - } - - // handle auth plugin switch, if requested - if newPlugin != "" { - // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is - // sent and we have to keep using the cipher sent in the init packet. - if authData == nil { - authData = oldAuthData - } else { - // copy data from read buffer to owned slice - copy(oldAuthData, authData) - } - - plugin = newPlugin - - authResp, err := mc.auth(authData, plugin) - if err != nil { - return err - } - if err = mc.writeAuthSwitchPacket(authResp); err != nil { - return err - } - - // Read Result Packet - authData, newPlugin, err = mc.readAuthResult() - if err != nil { - return err - } - - // Do not allow to change the auth plugin more than once - if newPlugin != "" { - return ErrMalformPkt - } - } - - switch plugin { - - // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/ - case "caching_sha2_password": - switch len(authData) { - case 0: - return nil // auth successful - case 1: - switch authData[0] { - case cachingSha2PasswordFastAuthSuccess: - if err = mc.readResultOK(); err == nil { - return nil // auth successful - } - - case cachingSha2PasswordPerformFullAuthentication: - if mc.cfg.tls != nil || mc.cfg.Net == "unix" { - // write cleartext auth packet - err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0)) - if err != nil { - return err - } - } else { - pubKey := mc.cfg.pubKey - if pubKey == nil { - // request public key from server - data, err := mc.buf.takeSmallBuffer(4 + 1) - if err != nil { - return err - } - data[4] = cachingSha2PasswordRequestPublicKey - mc.writePacket(data) - - // parse public key - if data, err = mc.readPacket(); err != nil { - return err - } - - block, rest := pem.Decode(data[1:]) - if block == nil { - return fmt.Errorf("No Pem data found, data: %s", rest) - } - pkix, err := x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return err - } - pubKey = pkix.(*rsa.PublicKey) - } - - // send encrypted password - err = mc.sendEncryptedPassword(oldAuthData, pubKey) - if err != nil { - return err - } - } - return mc.readResultOK() - - default: - return ErrMalformPkt - } - default: - return ErrMalformPkt - } - - case "sha256_password": - switch len(authData) { - case 0: - return nil // auth successful - default: - block, _ := pem.Decode(authData) - pub, err := x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return err - } - - // send encrypted password - err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey)) - if err != nil { - return err - } - return mc.readResultOK() - } - - default: - return nil // auth successful - } - - return err -} diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go deleted file mode 100644 index 0774c5c8c24..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/buffer.go +++ /dev/null @@ -1,182 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "io" - "net" - "time" -) - -const defaultBufSize = 4096 -const maxCachedBufSize = 256 * 1024 - -// A buffer which is used for both reading and writing. -// This is possible since communication on each connection is synchronous. -// In other words, we can't write and read simultaneously on the same connection. -// The buffer is similar to bufio.Reader / Writer but zero-copy-ish -// Also highly optimized for this particular use case. -// This buffer is backed by two byte slices in a double-buffering scheme -type buffer struct { - buf []byte // buf is a byte buffer who's length and capacity are equal. - nc net.Conn - idx int - length int - timeout time.Duration - dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer - flipcnt uint // flipccnt is the current buffer counter for double-buffering -} - -// newBuffer allocates and returns a new buffer. -func newBuffer(nc net.Conn) buffer { - fg := make([]byte, defaultBufSize) - return buffer{ - buf: fg, - nc: nc, - dbuf: [2][]byte{fg, nil}, - } -} - -// flip replaces the active buffer with the background buffer -// this is a delayed flip that simply increases the buffer counter; -// the actual flip will be performed the next time we call `buffer.fill` -func (b *buffer) flip() { - b.flipcnt += 1 -} - -// fill reads into the buffer until at least _need_ bytes are in it -func (b *buffer) fill(need int) error { - n := b.length - // fill data into its double-buffering target: if we've called - // flip on this buffer, we'll be copying to the background buffer, - // and then filling it with network data; otherwise we'll just move - // the contents of the current buffer to the front before filling it - dest := b.dbuf[b.flipcnt&1] - - // grow buffer if necessary to fit the whole packet. - if need > len(dest) { - // Round up to the next multiple of the default size - dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) - - // if the allocated buffer is not too large, move it to backing storage - // to prevent extra allocations on applications that perform large reads - if len(dest) <= maxCachedBufSize { - b.dbuf[b.flipcnt&1] = dest - } - } - - // if we're filling the fg buffer, move the existing data to the start of it. - // if we're filling the bg buffer, copy over the data - if n > 0 { - copy(dest[:n], b.buf[b.idx:]) - } - - b.buf = dest - b.idx = 0 - - for { - if b.timeout > 0 { - if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { - return err - } - } - - nn, err := b.nc.Read(b.buf[n:]) - n += nn - - switch err { - case nil: - if n < need { - continue - } - b.length = n - return nil - - case io.EOF: - if n >= need { - b.length = n - return nil - } - return io.ErrUnexpectedEOF - - default: - return err - } - } -} - -// returns next N bytes from buffer. -// The returned slice is only guaranteed to be valid until the next read -func (b *buffer) readNext(need int) ([]byte, error) { - if b.length < need { - // refill - if err := b.fill(need); err != nil { - return nil, err - } - } - - offset := b.idx - b.idx += need - b.length -= need - return b.buf[offset:b.idx], nil -} - -// takeBuffer returns a buffer with the requested size. -// If possible, a slice from the existing buffer is returned. -// Otherwise a bigger buffer is made. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeBuffer(length int) ([]byte, error) { - if b.length > 0 { - return nil, ErrBusyBuffer - } - - // test (cheap) general case first - if length <= cap(b.buf) { - return b.buf[:length], nil - } - - if length < maxPacketSize { - b.buf = make([]byte, length) - return b.buf, nil - } - - // buffer is larger than we want to store. - return make([]byte, length), nil -} - -// takeSmallBuffer is shortcut which can be used if length is -// known to be smaller than defaultBufSize. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { - if b.length > 0 { - return nil, ErrBusyBuffer - } - return b.buf[:length], nil -} - -// takeCompleteBuffer returns the complete existing buffer. -// This can be used if the necessary buffer size is unknown. -// cap and len of the returned buffer will be equal. -// Only one buffer (total) can be used at a time. -func (b *buffer) takeCompleteBuffer() ([]byte, error) { - if b.length > 0 { - return nil, ErrBusyBuffer - } - return b.buf, nil -} - -// store stores buf, an updated buffer, if its suitable to do so. -func (b *buffer) store(buf []byte) error { - if b.length > 0 { - return ErrBusyBuffer - } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) { - b.buf = buf[:cap(buf)] - } - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go deleted file mode 100644 index 326a9f7fa8a..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/collations.go +++ /dev/null @@ -1,265 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const defaultCollation = "utf8mb4_general_ci" -const binaryCollation = "binary" - -// A list of available collations mapped to the internal ID. -// To update this map use the following MySQL query: -// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID -// -// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255. -// -// ucs2, utf16, and utf32 can't be used for connection charset. -// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset -// They are commented out to reduce this map. -var collations = map[string]byte{ - "big5_chinese_ci": 1, - "latin2_czech_cs": 2, - "dec8_swedish_ci": 3, - "cp850_general_ci": 4, - "latin1_german1_ci": 5, - "hp8_english_ci": 6, - "koi8r_general_ci": 7, - "latin1_swedish_ci": 8, - "latin2_general_ci": 9, - "swe7_swedish_ci": 10, - "ascii_general_ci": 11, - "ujis_japanese_ci": 12, - "sjis_japanese_ci": 13, - "cp1251_bulgarian_ci": 14, - "latin1_danish_ci": 15, - "hebrew_general_ci": 16, - "tis620_thai_ci": 18, - "euckr_korean_ci": 19, - "latin7_estonian_cs": 20, - "latin2_hungarian_ci": 21, - "koi8u_general_ci": 22, - "cp1251_ukrainian_ci": 23, - "gb2312_chinese_ci": 24, - "greek_general_ci": 25, - "cp1250_general_ci": 26, - "latin2_croatian_ci": 27, - "gbk_chinese_ci": 28, - "cp1257_lithuanian_ci": 29, - "latin5_turkish_ci": 30, - "latin1_german2_ci": 31, - "armscii8_general_ci": 32, - "utf8_general_ci": 33, - "cp1250_czech_cs": 34, - //"ucs2_general_ci": 35, - "cp866_general_ci": 36, - "keybcs2_general_ci": 37, - "macce_general_ci": 38, - "macroman_general_ci": 39, - "cp852_general_ci": 40, - "latin7_general_ci": 41, - "latin7_general_cs": 42, - "macce_bin": 43, - "cp1250_croatian_ci": 44, - "utf8mb4_general_ci": 45, - "utf8mb4_bin": 46, - "latin1_bin": 47, - "latin1_general_ci": 48, - "latin1_general_cs": 49, - "cp1251_bin": 50, - "cp1251_general_ci": 51, - "cp1251_general_cs": 52, - "macroman_bin": 53, - //"utf16_general_ci": 54, - //"utf16_bin": 55, - //"utf16le_general_ci": 56, - "cp1256_general_ci": 57, - "cp1257_bin": 58, - "cp1257_general_ci": 59, - //"utf32_general_ci": 60, - //"utf32_bin": 61, - //"utf16le_bin": 62, - "binary": 63, - "armscii8_bin": 64, - "ascii_bin": 65, - "cp1250_bin": 66, - "cp1256_bin": 67, - "cp866_bin": 68, - "dec8_bin": 69, - "greek_bin": 70, - "hebrew_bin": 71, - "hp8_bin": 72, - "keybcs2_bin": 73, - "koi8r_bin": 74, - "koi8u_bin": 75, - "utf8_tolower_ci": 76, - "latin2_bin": 77, - "latin5_bin": 78, - "latin7_bin": 79, - "cp850_bin": 80, - "cp852_bin": 81, - "swe7_bin": 82, - "utf8_bin": 83, - "big5_bin": 84, - "euckr_bin": 85, - "gb2312_bin": 86, - "gbk_bin": 87, - "sjis_bin": 88, - "tis620_bin": 89, - //"ucs2_bin": 90, - "ujis_bin": 91, - "geostd8_general_ci": 92, - "geostd8_bin": 93, - "latin1_spanish_ci": 94, - "cp932_japanese_ci": 95, - "cp932_bin": 96, - "eucjpms_japanese_ci": 97, - "eucjpms_bin": 98, - "cp1250_polish_ci": 99, - //"utf16_unicode_ci": 101, - //"utf16_icelandic_ci": 102, - //"utf16_latvian_ci": 103, - //"utf16_romanian_ci": 104, - //"utf16_slovenian_ci": 105, - //"utf16_polish_ci": 106, - //"utf16_estonian_ci": 107, - //"utf16_spanish_ci": 108, - //"utf16_swedish_ci": 109, - //"utf16_turkish_ci": 110, - //"utf16_czech_ci": 111, - //"utf16_danish_ci": 112, - //"utf16_lithuanian_ci": 113, - //"utf16_slovak_ci": 114, - //"utf16_spanish2_ci": 115, - //"utf16_roman_ci": 116, - //"utf16_persian_ci": 117, - //"utf16_esperanto_ci": 118, - //"utf16_hungarian_ci": 119, - //"utf16_sinhala_ci": 120, - //"utf16_german2_ci": 121, - //"utf16_croatian_ci": 122, - //"utf16_unicode_520_ci": 123, - //"utf16_vietnamese_ci": 124, - //"ucs2_unicode_ci": 128, - //"ucs2_icelandic_ci": 129, - //"ucs2_latvian_ci": 130, - //"ucs2_romanian_ci": 131, - //"ucs2_slovenian_ci": 132, - //"ucs2_polish_ci": 133, - //"ucs2_estonian_ci": 134, - //"ucs2_spanish_ci": 135, - //"ucs2_swedish_ci": 136, - //"ucs2_turkish_ci": 137, - //"ucs2_czech_ci": 138, - //"ucs2_danish_ci": 139, - //"ucs2_lithuanian_ci": 140, - //"ucs2_slovak_ci": 141, - //"ucs2_spanish2_ci": 142, - //"ucs2_roman_ci": 143, - //"ucs2_persian_ci": 144, - //"ucs2_esperanto_ci": 145, - //"ucs2_hungarian_ci": 146, - //"ucs2_sinhala_ci": 147, - //"ucs2_german2_ci": 148, - //"ucs2_croatian_ci": 149, - //"ucs2_unicode_520_ci": 150, - //"ucs2_vietnamese_ci": 151, - //"ucs2_general_mysql500_ci": 159, - //"utf32_unicode_ci": 160, - //"utf32_icelandic_ci": 161, - //"utf32_latvian_ci": 162, - //"utf32_romanian_ci": 163, - //"utf32_slovenian_ci": 164, - //"utf32_polish_ci": 165, - //"utf32_estonian_ci": 166, - //"utf32_spanish_ci": 167, - //"utf32_swedish_ci": 168, - //"utf32_turkish_ci": 169, - //"utf32_czech_ci": 170, - //"utf32_danish_ci": 171, - //"utf32_lithuanian_ci": 172, - //"utf32_slovak_ci": 173, - //"utf32_spanish2_ci": 174, - //"utf32_roman_ci": 175, - //"utf32_persian_ci": 176, - //"utf32_esperanto_ci": 177, - //"utf32_hungarian_ci": 178, - //"utf32_sinhala_ci": 179, - //"utf32_german2_ci": 180, - //"utf32_croatian_ci": 181, - //"utf32_unicode_520_ci": 182, - //"utf32_vietnamese_ci": 183, - "utf8_unicode_ci": 192, - "utf8_icelandic_ci": 193, - "utf8_latvian_ci": 194, - "utf8_romanian_ci": 195, - "utf8_slovenian_ci": 196, - "utf8_polish_ci": 197, - "utf8_estonian_ci": 198, - "utf8_spanish_ci": 199, - "utf8_swedish_ci": 200, - "utf8_turkish_ci": 201, - "utf8_czech_ci": 202, - "utf8_danish_ci": 203, - "utf8_lithuanian_ci": 204, - "utf8_slovak_ci": 205, - "utf8_spanish2_ci": 206, - "utf8_roman_ci": 207, - "utf8_persian_ci": 208, - "utf8_esperanto_ci": 209, - "utf8_hungarian_ci": 210, - "utf8_sinhala_ci": 211, - "utf8_german2_ci": 212, - "utf8_croatian_ci": 213, - "utf8_unicode_520_ci": 214, - "utf8_vietnamese_ci": 215, - "utf8_general_mysql500_ci": 223, - "utf8mb4_unicode_ci": 224, - "utf8mb4_icelandic_ci": 225, - "utf8mb4_latvian_ci": 226, - "utf8mb4_romanian_ci": 227, - "utf8mb4_slovenian_ci": 228, - "utf8mb4_polish_ci": 229, - "utf8mb4_estonian_ci": 230, - "utf8mb4_spanish_ci": 231, - "utf8mb4_swedish_ci": 232, - "utf8mb4_turkish_ci": 233, - "utf8mb4_czech_ci": 234, - "utf8mb4_danish_ci": 235, - "utf8mb4_lithuanian_ci": 236, - "utf8mb4_slovak_ci": 237, - "utf8mb4_spanish2_ci": 238, - "utf8mb4_roman_ci": 239, - "utf8mb4_persian_ci": 240, - "utf8mb4_esperanto_ci": 241, - "utf8mb4_hungarian_ci": 242, - "utf8mb4_sinhala_ci": 243, - "utf8mb4_german2_ci": 244, - "utf8mb4_croatian_ci": 245, - "utf8mb4_unicode_520_ci": 246, - "utf8mb4_vietnamese_ci": 247, - "gb18030_chinese_ci": 248, - "gb18030_bin": 249, - "gb18030_unicode_520_ci": 250, - "utf8mb4_0900_ai_ci": 255, -} - -// A denylist of collations which is unsafe to interpolate parameters. -// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. -var unsafeCollations = map[string]bool{ - "big5_chinese_ci": true, - "sjis_japanese_ci": true, - "gbk_chinese_ci": true, - "big5_bin": true, - "gb2312_bin": true, - "gbk_bin": true, - "sjis_bin": true, - "cp932_japanese_ci": true, - "cp932_bin": true, - "gb18030_chinese_ci": true, - "gb18030_bin": true, - "gb18030_unicode_520_ci": true, -} diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go deleted file mode 100644 index 024eb28589e..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/conncheck.go +++ /dev/null @@ -1,54 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos - -package mysql - -import ( - "errors" - "io" - "net" - "syscall" -) - -var errUnexpectedRead = errors.New("unexpected read from socket") - -func connCheck(conn net.Conn) error { - var sysErr error - - sysConn, ok := conn.(syscall.Conn) - if !ok { - return nil - } - rawConn, err := sysConn.SyscallConn() - if err != nil { - return err - } - - err = rawConn.Read(func(fd uintptr) bool { - var buf [1]byte - n, err := syscall.Read(int(fd), buf[:]) - switch { - case n == 0 && err == nil: - sysErr = io.EOF - case n > 0: - sysErr = errUnexpectedRead - case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK: - sysErr = nil - default: - sysErr = err - } - return true - }) - if err != nil { - return err - } - - return sysErr -} diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go deleted file mode 100644 index ea7fb607ac4..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go +++ /dev/null @@ -1,17 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos - -package mysql - -import "net" - -func connCheck(conn net.Conn) error { - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go deleted file mode 100644 index 835f89729ad..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ /dev/null @@ -1,650 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "context" - "database/sql" - "database/sql/driver" - "encoding/json" - "io" - "net" - "strconv" - "strings" - "time" -) - -type mysqlConn struct { - buf buffer - netConn net.Conn - rawConn net.Conn // underlying connection when netConn is TLS connection. - affectedRows uint64 - insertId uint64 - cfg *Config - maxAllowedPacket int - maxWriteSize int - writeTimeout time.Duration - flags clientFlag - status statusFlag - sequence uint8 - parseTime bool - reset bool // set when the Go SQL package calls ResetSession - - // for context support (Go 1.8+) - watching bool - watcher chan<- context.Context - closech chan struct{} - finished chan<- struct{} - canceled atomicError // set non-nil if conn is canceled - closed atomicBool // set when conn is closed, before closech is closed -} - -// Handles parameters set in DSN after the connection is established -func (mc *mysqlConn) handleParams() (err error) { - var cmdSet strings.Builder - for param, val := range mc.cfg.Params { - switch param { - // Charset: character_set_connection, character_set_client, character_set_results - case "charset": - charsets := strings.Split(val, ",") - for i := range charsets { - // ignore errors here - a charset may not exist - err = mc.exec("SET NAMES " + charsets[i]) - if err == nil { - break - } - } - if err != nil { - return - } - - // Other system vars accumulated in a single SET command - default: - if cmdSet.Len() == 0 { - // Heuristic: 29 chars for each other key=value to reduce reallocations - cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1)) - cmdSet.WriteString("SET ") - } else { - cmdSet.WriteByte(',') - } - cmdSet.WriteString(param) - cmdSet.WriteByte('=') - cmdSet.WriteString(val) - } - } - - if cmdSet.Len() > 0 { - err = mc.exec(cmdSet.String()) - if err != nil { - return - } - } - - return -} - -func (mc *mysqlConn) markBadConn(err error) error { - if mc == nil { - return err - } - if err != errBadConnNoWrite { - return err - } - return driver.ErrBadConn -} - -func (mc *mysqlConn) Begin() (driver.Tx, error) { - return mc.begin(false) -} - -func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { - if mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - var q string - if readOnly { - q = "START TRANSACTION READ ONLY" - } else { - q = "START TRANSACTION" - } - err := mc.exec(q) - if err == nil { - return &mysqlTx{mc}, err - } - return nil, mc.markBadConn(err) -} - -func (mc *mysqlConn) Close() (err error) { - // Makes Close idempotent - if !mc.closed.IsSet() { - err = mc.writeCommandPacket(comQuit) - } - - mc.cleanup() - - return -} - -// Closes the network connection and unsets internal variables. Do not call this -// function after successfully authentication, call Close instead. This function -// is called before auth or on auth failure because MySQL will have already -// closed the network connection. -func (mc *mysqlConn) cleanup() { - if !mc.closed.TrySet(true) { - return - } - - // Makes cleanup idempotent - close(mc.closech) - if mc.netConn == nil { - return - } - if err := mc.netConn.Close(); err != nil { - errLog.Print(err) - } -} - -func (mc *mysqlConn) error() error { - if mc.closed.IsSet() { - if err := mc.canceled.Value(); err != nil { - return err - } - return ErrInvalidConn - } - return nil -} - -func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { - if mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := mc.writeCommandPacketStr(comStmtPrepare, query) - if err != nil { - // STMT_PREPARE is safe to retry. So we can return ErrBadConn here. - errLog.Print(err) - return nil, driver.ErrBadConn - } - - stmt := &mysqlStmt{ - mc: mc, - } - - // Read Result - columnCount, err := stmt.readPrepareResultPacket() - if err == nil { - if stmt.paramCount > 0 { - if err = mc.readUntilEOF(); err != nil { - return nil, err - } - } - - if columnCount > 0 { - err = mc.readUntilEOF() - } - } - - return stmt, err -} - -func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { - // Number of ? should be same to len(args) - if strings.Count(query, "?") != len(args) { - return "", driver.ErrSkip - } - - buf, err := mc.buf.takeCompleteBuffer() - if err != nil { - // can not take the buffer. Something must be wrong with the connection - errLog.Print(err) - return "", ErrInvalidConn - } - buf = buf[:0] - argPos := 0 - - for i := 0; i < len(query); i++ { - q := strings.IndexByte(query[i:], '?') - if q == -1 { - buf = append(buf, query[i:]...) - break - } - buf = append(buf, query[i:i+q]...) - i += q - - arg := args[argPos] - argPos++ - - if arg == nil { - buf = append(buf, "NULL"...) - continue - } - - switch v := arg.(type) { - case int64: - buf = strconv.AppendInt(buf, v, 10) - case uint64: - // Handle uint64 explicitly because our custom ConvertValue emits unsigned values - buf = strconv.AppendUint(buf, v, 10) - case float64: - buf = strconv.AppendFloat(buf, v, 'g', -1, 64) - case bool: - if v { - buf = append(buf, '1') - } else { - buf = append(buf, '0') - } - case time.Time: - if v.IsZero() { - buf = append(buf, "'0000-00-00'"...) - } else { - buf = append(buf, '\'') - buf, err = appendDateTime(buf, v.In(mc.cfg.Loc)) - if err != nil { - return "", err - } - buf = append(buf, '\'') - } - case json.RawMessage: - buf = append(buf, '\'') - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeBytesBackslash(buf, v) - } else { - buf = escapeBytesQuotes(buf, v) - } - buf = append(buf, '\'') - case []byte: - if v == nil { - buf = append(buf, "NULL"...) - } else { - buf = append(buf, "_binary'"...) - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeBytesBackslash(buf, v) - } else { - buf = escapeBytesQuotes(buf, v) - } - buf = append(buf, '\'') - } - case string: - buf = append(buf, '\'') - if mc.status&statusNoBackslashEscapes == 0 { - buf = escapeStringBackslash(buf, v) - } else { - buf = escapeStringQuotes(buf, v) - } - buf = append(buf, '\'') - default: - return "", driver.ErrSkip - } - - if len(buf)+4 > mc.maxAllowedPacket { - return "", driver.ErrSkip - } - } - if argPos != len(args) { - return "", driver.ErrSkip - } - return string(buf), nil -} - -func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { - if mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.InterpolateParams { - return nil, driver.ErrSkip - } - // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - } - mc.affectedRows = 0 - mc.insertId = 0 - - err := mc.exec(query) - if err == nil { - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, err - } - return nil, mc.markBadConn(err) -} - -// Internal function to execute commands -func (mc *mysqlConn) exec(query string) error { - // Send command - if err := mc.writeCommandPacketStr(comQuery, query); err != nil { - return mc.markBadConn(err) - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return err - } - - if resLen > 0 { - // columns - if err := mc.readUntilEOF(); err != nil { - return err - } - - // rows - if err := mc.readUntilEOF(); err != nil { - return err - } - } - - return mc.discardResults() -} - -func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { - return mc.query(query, args) -} - -func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) { - if mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - if len(args) != 0 { - if !mc.cfg.InterpolateParams { - return nil, driver.ErrSkip - } - // try client-side prepare to reduce roundtrip - prepared, err := mc.interpolateParams(query, args) - if err != nil { - return nil, err - } - query = prepared - } - // Send command - err := mc.writeCommandPacketStr(comQuery, query) - if err == nil { - // Read Result - var resLen int - resLen, err = mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - - if resLen == 0 { - rows.rs.done = true - - switch err := rows.NextResultSet(); err { - case nil, io.EOF: - return rows, nil - default: - return nil, err - } - } - - // Columns - rows.rs.columns, err = mc.readColumns(resLen) - return rows, err - } - } - return nil, mc.markBadConn(err) -} - -// Gets the value of the given MySQL System Variable -// The returned byte slice is only valid until the next read -func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { - // Send command - if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { - return nil, err - } - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err == nil { - rows := new(textRows) - rows.mc = mc - rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}} - - if resLen > 0 { - // Columns - if err := mc.readUntilEOF(); err != nil { - return nil, err - } - } - - dest := make([]driver.Value, resLen) - if err = rows.readRow(dest); err == nil { - return dest[0].([]byte), mc.readUntilEOF() - } - } - return nil, err -} - -// finish is called when the query has canceled. -func (mc *mysqlConn) cancel(err error) { - mc.canceled.Set(err) - mc.cleanup() -} - -// finish is called when the query has succeeded. -func (mc *mysqlConn) finish() { - if !mc.watching || mc.finished == nil { - return - } - select { - case mc.finished <- struct{}{}: - mc.watching = false - case <-mc.closech: - } -} - -// Ping implements driver.Pinger interface -func (mc *mysqlConn) Ping(ctx context.Context) (err error) { - if mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return driver.ErrBadConn - } - - if err = mc.watchCancel(ctx); err != nil { - return - } - defer mc.finish() - - if err = mc.writeCommandPacket(comPing); err != nil { - return mc.markBadConn(err) - } - - return mc.readResultOK() -} - -// BeginTx implements driver.ConnBeginTx interface -func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { - if mc.closed.IsSet() { - return nil, driver.ErrBadConn - } - - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - defer mc.finish() - - if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault { - level, err := mapIsolationLevel(opts.Isolation) - if err != nil { - return nil, err - } - err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level) - if err != nil { - return nil, err - } - } - - return mc.begin(opts.ReadOnly) -} - -func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - - rows, err := mc.query(query, dargs) - if err != nil { - mc.finish() - return nil, err - } - rows.finish = mc.finish - return rows, err -} - -func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - defer mc.finish() - - return mc.Exec(query, dargs) -} - -func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - - stmt, err := mc.Prepare(query) - mc.finish() - if err != nil { - return nil, err - } - - select { - default: - case <-ctx.Done(): - stmt.Close() - return nil, ctx.Err() - } - return stmt, nil -} - -func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := stmt.mc.watchCancel(ctx); err != nil { - return nil, err - } - - rows, err := stmt.query(dargs) - if err != nil { - stmt.mc.finish() - return nil, err - } - rows.finish = stmt.mc.finish - return rows, err -} - -func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := stmt.mc.watchCancel(ctx); err != nil { - return nil, err - } - defer stmt.mc.finish() - - return stmt.Exec(dargs) -} - -func (mc *mysqlConn) watchCancel(ctx context.Context) error { - if mc.watching { - // Reach here if canceled, - // so the connection is already invalid - mc.cleanup() - return nil - } - // When ctx is already cancelled, don't watch it. - if err := ctx.Err(); err != nil { - return err - } - // When ctx is not cancellable, don't watch it. - if ctx.Done() == nil { - return nil - } - // When watcher is not alive, can't watch it. - if mc.watcher == nil { - return nil - } - - mc.watching = true - mc.watcher <- ctx - return nil -} - -func (mc *mysqlConn) startWatcher() { - watcher := make(chan context.Context, 1) - mc.watcher = watcher - finished := make(chan struct{}) - mc.finished = finished - go func() { - for { - var ctx context.Context - select { - case ctx = <-watcher: - case <-mc.closech: - return - } - - select { - case <-ctx.Done(): - mc.cancel(ctx.Err()) - case <-finished: - case <-mc.closech: - return - } - } - }() -} - -func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { - nv.Value, err = converter{}.ConvertValue(nv.Value) - return -} - -// ResetSession implements driver.SessionResetter. -// (From Go 1.10) -func (mc *mysqlConn) ResetSession(ctx context.Context) error { - if mc.closed.IsSet() { - return driver.ErrBadConn - } - mc.reset = true - return nil -} - -// IsValid implements driver.Validator interface -// (From Go 1.15) -func (mc *mysqlConn) IsValid() bool { - return !mc.closed.IsSet() -} diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go deleted file mode 100644 index d567b4e4fc0..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/connector.go +++ /dev/null @@ -1,146 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "context" - "database/sql/driver" - "net" -) - -type connector struct { - cfg *Config // immutable private copy. -} - -// Connect implements driver.Connector interface. -// Connect returns a connection to the database. -func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { - var err error - - // New mysqlConn - mc := &mysqlConn{ - maxAllowedPacket: maxPacketSize, - maxWriteSize: maxPacketSize - 1, - closech: make(chan struct{}), - cfg: c.cfg, - } - mc.parseTime = mc.cfg.ParseTime - - // Connect to Server - dialsLock.RLock() - dial, ok := dials[mc.cfg.Net] - dialsLock.RUnlock() - if ok { - dctx := ctx - if mc.cfg.Timeout > 0 { - var cancel context.CancelFunc - dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) - defer cancel() - } - mc.netConn, err = dial(dctx, mc.cfg.Addr) - } else { - nd := net.Dialer{Timeout: mc.cfg.Timeout} - mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr) - } - - if err != nil { - return nil, err - } - - // Enable TCP Keepalives on TCP connections - if tc, ok := mc.netConn.(*net.TCPConn); ok { - if err := tc.SetKeepAlive(true); err != nil { - // Don't send COM_QUIT before handshake. - mc.netConn.Close() - mc.netConn = nil - return nil, err - } - } - - // Call startWatcher for context support (From Go 1.8) - mc.startWatcher() - if err := mc.watchCancel(ctx); err != nil { - mc.cleanup() - return nil, err - } - defer mc.finish() - - mc.buf = newBuffer(mc.netConn) - - // Set I/O timeouts - mc.buf.timeout = mc.cfg.ReadTimeout - mc.writeTimeout = mc.cfg.WriteTimeout - - // Reading Handshake Initialization Packet - authData, plugin, err := mc.readHandshakePacket() - if err != nil { - mc.cleanup() - return nil, err - } - - if plugin == "" { - plugin = defaultAuthPlugin - } - - // Send Client Authentication Packet - authResp, err := mc.auth(authData, plugin) - if err != nil { - // try the default auth plugin, if using the requested plugin failed - errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) - plugin = defaultAuthPlugin - authResp, err = mc.auth(authData, plugin) - if err != nil { - mc.cleanup() - return nil, err - } - } - if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil { - mc.cleanup() - return nil, err - } - - // Handle response to auth packet, switch methods if possible - if err = mc.handleAuthResult(authData, plugin); err != nil { - // Authentication failed and MySQL has already closed the connection - // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). - // Do not send COM_QUIT, just cleanup and return the error. - mc.cleanup() - return nil, err - } - - if mc.cfg.MaxAllowedPacket > 0 { - mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket - } else { - // Get max allowed packet size - maxap, err := mc.getSystemVar("max_allowed_packet") - if err != nil { - mc.Close() - return nil, err - } - mc.maxAllowedPacket = stringToInt(maxap) - 1 - } - if mc.maxAllowedPacket < maxPacketSize { - mc.maxWriteSize = mc.maxAllowedPacket - } - - // Handle DSN Params - err = mc.handleParams() - if err != nil { - mc.Close() - return nil, err - } - - return mc, nil -} - -// Driver implements driver.Connector interface. -// Driver returns &MySQLDriver{}. -func (c *connector) Driver() driver.Driver { - return &MySQLDriver{} -} diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go deleted file mode 100644 index b1e6b85efca..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/const.go +++ /dev/null @@ -1,174 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -const ( - defaultAuthPlugin = "mysql_native_password" - defaultMaxAllowedPacket = 4 << 20 // 4 MiB - minProtocolVersion = 10 - maxPacketSize = 1<<24 - 1 - timeFormat = "2006-01-02 15:04:05.999999" -) - -// MySQL constants documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -const ( - iOK byte = 0x00 - iAuthMoreData byte = 0x01 - iLocalInFile byte = 0xfb - iEOF byte = 0xfe - iERR byte = 0xff -) - -// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags -type clientFlag uint32 - -const ( - clientLongPassword clientFlag = 1 << iota - clientFoundRows - clientLongFlag - clientConnectWithDB - clientNoSchema - clientCompress - clientODBC - clientLocalFiles - clientIgnoreSpace - clientProtocol41 - clientInteractive - clientSSL - clientIgnoreSIGPIPE - clientTransactions - clientReserved - clientSecureConn - clientMultiStatements - clientMultiResults - clientPSMultiResults - clientPluginAuth - clientConnectAttrs - clientPluginAuthLenEncClientData - clientCanHandleExpiredPasswords - clientSessionTrack - clientDeprecateEOF -) - -const ( - comQuit byte = iota + 1 - comInitDB - comQuery - comFieldList - comCreateDB - comDropDB - comRefresh - comShutdown - comStatistics - comProcessInfo - comConnect - comProcessKill - comDebug - comPing - comTime - comDelayedInsert - comChangeUser - comBinlogDump - comTableDump - comConnectOut - comRegisterSlave - comStmtPrepare - comStmtExecute - comStmtSendLongData - comStmtClose - comStmtReset - comSetOption - comStmtFetch -) - -// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType -type fieldType byte - -const ( - fieldTypeDecimal fieldType = iota - fieldTypeTiny - fieldTypeShort - fieldTypeLong - fieldTypeFloat - fieldTypeDouble - fieldTypeNULL - fieldTypeTimestamp - fieldTypeLongLong - fieldTypeInt24 - fieldTypeDate - fieldTypeTime - fieldTypeDateTime - fieldTypeYear - fieldTypeNewDate - fieldTypeVarChar - fieldTypeBit -) -const ( - fieldTypeJSON fieldType = iota + 0xf5 - fieldTypeNewDecimal - fieldTypeEnum - fieldTypeSet - fieldTypeTinyBLOB - fieldTypeMediumBLOB - fieldTypeLongBLOB - fieldTypeBLOB - fieldTypeVarString - fieldTypeString - fieldTypeGeometry -) - -type fieldFlag uint16 - -const ( - flagNotNULL fieldFlag = 1 << iota - flagPriKey - flagUniqueKey - flagMultipleKey - flagBLOB - flagUnsigned - flagZeroFill - flagBinary - flagEnum - flagAutoIncrement - flagTimestamp - flagSet - flagUnknown1 - flagUnknown2 - flagUnknown3 - flagUnknown4 -) - -// http://dev.mysql.com/doc/internals/en/status-flags.html -type statusFlag uint16 - -const ( - statusInTrans statusFlag = 1 << iota - statusInAutocommit - statusReserved // Not in documentation - statusMoreResultsExists - statusNoGoodIndexUsed - statusNoIndexUsed - statusCursorExists - statusLastRowSent - statusDbDropped - statusNoBackslashEscapes - statusMetadataChanged - statusQueryWasSlow - statusPsOutParams - statusInTransReadonly - statusSessionStateChanged -) - -const ( - cachingSha2PasswordRequestPublicKey = 2 - cachingSha2PasswordFastAuthSuccess = 3 - cachingSha2PasswordPerformFullAuthentication = 4 -) diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go deleted file mode 100644 index c1bdf1199b6..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// Package mysql provides a MySQL driver for Go's database/sql package. -// -// The driver should be used via the database/sql package: -// -// import "database/sql" -// import _ "github.com/go-sql-driver/mysql" -// -// db, err := sql.Open("mysql", "user:password@/dbname") -// -// See https://github.com/go-sql-driver/mysql#usage for details -package mysql - -import ( - "context" - "database/sql" - "database/sql/driver" - "net" - "sync" -) - -// MySQLDriver is exported to make the driver directly accessible. -// In general the driver is used via the database/sql package. -type MySQLDriver struct{} - -// DialFunc is a function which can be used to establish the network connection. -// Custom dial functions must be registered with RegisterDial -// -// Deprecated: users should register a DialContextFunc instead -type DialFunc func(addr string) (net.Conn, error) - -// DialContextFunc is a function which can be used to establish the network connection. -// Custom dial functions must be registered with RegisterDialContext -type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error) - -var ( - dialsLock sync.RWMutex - dials map[string]DialContextFunc -) - -// RegisterDialContext registers a custom dial function. It can then be used by the -// network address mynet(addr), where mynet is the registered new network. -// The current context for the connection and its address is passed to the dial function. -func RegisterDialContext(net string, dial DialContextFunc) { - dialsLock.Lock() - defer dialsLock.Unlock() - if dials == nil { - dials = make(map[string]DialContextFunc) - } - dials[net] = dial -} - -// RegisterDial registers a custom dial function. It can then be used by the -// network address mynet(addr), where mynet is the registered new network. -// addr is passed as a parameter to the dial function. -// -// Deprecated: users should call RegisterDialContext instead -func RegisterDial(network string, dial DialFunc) { - RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) { - return dial(addr) - }) -} - -// Open new Connection. -// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how -// the DSN string is formatted -func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { - cfg, err := ParseDSN(dsn) - if err != nil { - return nil, err - } - c := &connector{ - cfg: cfg, - } - return c.Connect(context.Background()) -} - -func init() { - sql.Register("mysql", &MySQLDriver{}) -} - -// NewConnector returns new driver.Connector. -func NewConnector(cfg *Config) (driver.Connector, error) { - cfg = cfg.Clone() - // normalize the contents of cfg so calls to NewConnector have the same - // behavior as MySQLDriver.OpenConnector - if err := cfg.normalize(); err != nil { - return nil, err - } - return &connector{cfg: cfg}, nil -} - -// OpenConnector implements driver.DriverContext. -func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) { - cfg, err := ParseDSN(dsn) - if err != nil { - return nil, err - } - return &connector{ - cfg: cfg, - }, nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go deleted file mode 100644 index 93f3548cb8c..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ /dev/null @@ -1,560 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "crypto/rsa" - "crypto/tls" - "errors" - "fmt" - "math/big" - "net" - "net/url" - "sort" - "strconv" - "strings" - "time" -) - -var ( - errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") - errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") - errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") - errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") -) - -// Config is a configuration parsed from a DSN string. -// If a new Config is created instead of being parsed from a DSN string, -// the NewConfig function should be used, which sets default values. -type Config struct { - User string // Username - Passwd string // Password (requires User) - Net string // Network type - Addr string // Network address (requires Net) - DBName string // Database name - Params map[string]string // Connection parameters - Collation string // Connection collation - Loc *time.Location // Location for time.Time values - MaxAllowedPacket int // Max packet size allowed - ServerPubKey string // Server public key name - pubKey *rsa.PublicKey // Server public key - TLSConfig string // TLS configuration name - tls *tls.Config // TLS configuration - Timeout time.Duration // Dial timeout - ReadTimeout time.Duration // I/O read timeout - WriteTimeout time.Duration // I/O write timeout - - AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE - AllowCleartextPasswords bool // Allows the cleartext client side plugin - AllowNativePasswords bool // Allows the native password authentication method - AllowOldPasswords bool // Allows the old insecure password method - CheckConnLiveness bool // Check connections for liveness before using them - ClientFoundRows bool // Return number of matching rows instead of rows changed - ColumnsWithAlias bool // Prepend table alias to column names - InterpolateParams bool // Interpolate placeholders into query string - MultiStatements bool // Allow multiple statements in one query - ParseTime bool // Parse time values to time.Time - RejectReadOnly bool // Reject read-only connections -} - -// NewConfig creates a new Config and sets default values. -func NewConfig() *Config { - return &Config{ - Collation: defaultCollation, - Loc: time.UTC, - MaxAllowedPacket: defaultMaxAllowedPacket, - AllowNativePasswords: true, - CheckConnLiveness: true, - } -} - -func (cfg *Config) Clone() *Config { - cp := *cfg - if cp.tls != nil { - cp.tls = cfg.tls.Clone() - } - if len(cp.Params) > 0 { - cp.Params = make(map[string]string, len(cfg.Params)) - for k, v := range cfg.Params { - cp.Params[k] = v - } - } - if cfg.pubKey != nil { - cp.pubKey = &rsa.PublicKey{ - N: new(big.Int).Set(cfg.pubKey.N), - E: cfg.pubKey.E, - } - } - return &cp -} - -func (cfg *Config) normalize() error { - if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { - return errInvalidDSNUnsafeCollation - } - - // Set default network if empty - if cfg.Net == "" { - cfg.Net = "tcp" - } - - // Set default address if empty - if cfg.Addr == "" { - switch cfg.Net { - case "tcp": - cfg.Addr = "127.0.0.1:3306" - case "unix": - cfg.Addr = "/tmp/mysql.sock" - default: - return errors.New("default addr for network '" + cfg.Net + "' unknown") - } - } else if cfg.Net == "tcp" { - cfg.Addr = ensureHavePort(cfg.Addr) - } - - switch cfg.TLSConfig { - case "false", "": - // don't set anything - case "true": - cfg.tls = &tls.Config{} - case "skip-verify", "preferred": - cfg.tls = &tls.Config{InsecureSkipVerify: true} - default: - cfg.tls = getTLSConfigClone(cfg.TLSConfig) - if cfg.tls == nil { - return errors.New("invalid value / unknown config name: " + cfg.TLSConfig) - } - } - - if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify { - host, _, err := net.SplitHostPort(cfg.Addr) - if err == nil { - cfg.tls.ServerName = host - } - } - - if cfg.ServerPubKey != "" { - cfg.pubKey = getServerPubKey(cfg.ServerPubKey) - if cfg.pubKey == nil { - return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey) - } - } - - return nil -} - -func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) { - buf.Grow(1 + len(name) + 1 + len(value)) - if !*hasParam { - *hasParam = true - buf.WriteByte('?') - } else { - buf.WriteByte('&') - } - buf.WriteString(name) - buf.WriteByte('=') - buf.WriteString(value) -} - -// FormatDSN formats the given Config into a DSN string which can be passed to -// the driver. -func (cfg *Config) FormatDSN() string { - var buf bytes.Buffer - - // [username[:password]@] - if len(cfg.User) > 0 { - buf.WriteString(cfg.User) - if len(cfg.Passwd) > 0 { - buf.WriteByte(':') - buf.WriteString(cfg.Passwd) - } - buf.WriteByte('@') - } - - // [protocol[(address)]] - if len(cfg.Net) > 0 { - buf.WriteString(cfg.Net) - if len(cfg.Addr) > 0 { - buf.WriteByte('(') - buf.WriteString(cfg.Addr) - buf.WriteByte(')') - } - } - - // /dbname - buf.WriteByte('/') - buf.WriteString(cfg.DBName) - - // [?param1=value1&...¶mN=valueN] - hasParam := false - - if cfg.AllowAllFiles { - hasParam = true - buf.WriteString("?allowAllFiles=true") - } - - if cfg.AllowCleartextPasswords { - writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true") - } - - if !cfg.AllowNativePasswords { - writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false") - } - - if cfg.AllowOldPasswords { - writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true") - } - - if !cfg.CheckConnLiveness { - writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false") - } - - if cfg.ClientFoundRows { - writeDSNParam(&buf, &hasParam, "clientFoundRows", "true") - } - - if col := cfg.Collation; col != defaultCollation && len(col) > 0 { - writeDSNParam(&buf, &hasParam, "collation", col) - } - - if cfg.ColumnsWithAlias { - writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true") - } - - if cfg.InterpolateParams { - writeDSNParam(&buf, &hasParam, "interpolateParams", "true") - } - - if cfg.Loc != time.UTC && cfg.Loc != nil { - writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String())) - } - - if cfg.MultiStatements { - writeDSNParam(&buf, &hasParam, "multiStatements", "true") - } - - if cfg.ParseTime { - writeDSNParam(&buf, &hasParam, "parseTime", "true") - } - - if cfg.ReadTimeout > 0 { - writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String()) - } - - if cfg.RejectReadOnly { - writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true") - } - - if len(cfg.ServerPubKey) > 0 { - writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey)) - } - - if cfg.Timeout > 0 { - writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String()) - } - - if len(cfg.TLSConfig) > 0 { - writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig)) - } - - if cfg.WriteTimeout > 0 { - writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String()) - } - - if cfg.MaxAllowedPacket != defaultMaxAllowedPacket { - writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket)) - } - - // other params - if cfg.Params != nil { - var params []string - for param := range cfg.Params { - params = append(params, param) - } - sort.Strings(params) - for _, param := range params { - writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param])) - } - } - - return buf.String() -} - -// ParseDSN parses the DSN string to a Config -func ParseDSN(dsn string) (cfg *Config, err error) { - // New config with some default values - cfg = NewConfig() - - // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] - // Find the last '/' (since the password or the net addr might contain a '/') - foundSlash := false - for i := len(dsn) - 1; i >= 0; i-- { - if dsn[i] == '/' { - foundSlash = true - var j, k int - - // left part is empty if i <= 0 - if i > 0 { - // [username[:password]@][protocol[(address)]] - // Find the last '@' in dsn[:i] - for j = i; j >= 0; j-- { - if dsn[j] == '@' { - // username[:password] - // Find the first ':' in dsn[:j] - for k = 0; k < j; k++ { - if dsn[k] == ':' { - cfg.Passwd = dsn[k+1 : j] - break - } - } - cfg.User = dsn[:k] - - break - } - } - - // [protocol[(address)]] - // Find the first '(' in dsn[j+1:i] - for k = j + 1; k < i; k++ { - if dsn[k] == '(' { - // dsn[i-1] must be == ')' if an address is specified - if dsn[i-1] != ')' { - if strings.ContainsRune(dsn[k+1:i], ')') { - return nil, errInvalidDSNUnescaped - } - return nil, errInvalidDSNAddr - } - cfg.Addr = dsn[k+1 : i-1] - break - } - } - cfg.Net = dsn[j+1 : k] - } - - // dbname[?param1=value1&...¶mN=valueN] - // Find the first '?' in dsn[i+1:] - for j = i + 1; j < len(dsn); j++ { - if dsn[j] == '?' { - if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { - return - } - break - } - } - cfg.DBName = dsn[i+1 : j] - - break - } - } - - if !foundSlash && len(dsn) > 0 { - return nil, errInvalidDSNNoSlash - } - - if err = cfg.normalize(); err != nil { - return nil, err - } - return -} - -// parseDSNParams parses the DSN "query string" -// Values must be url.QueryEscape'ed -func parseDSNParams(cfg *Config, params string) (err error) { - for _, v := range strings.Split(params, "&") { - param := strings.SplitN(v, "=", 2) - if len(param) != 2 { - continue - } - - // cfg params - switch value := param[1]; param[0] { - // Disable INFILE allowlist / enable all files - case "allowAllFiles": - var isBool bool - cfg.AllowAllFiles, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Use cleartext authentication mode (MySQL 5.5.10+) - case "allowCleartextPasswords": - var isBool bool - cfg.AllowCleartextPasswords, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Use native password authentication - case "allowNativePasswords": - var isBool bool - cfg.AllowNativePasswords, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Use old authentication mode (pre MySQL 4.1) - case "allowOldPasswords": - var isBool bool - cfg.AllowOldPasswords, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Check connections for Liveness before using them - case "checkConnLiveness": - var isBool bool - cfg.CheckConnLiveness, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Switch "rowsAffected" mode - case "clientFoundRows": - var isBool bool - cfg.ClientFoundRows, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Collation - case "collation": - cfg.Collation = value - break - - case "columnsWithAlias": - var isBool bool - cfg.ColumnsWithAlias, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Compression - case "compress": - return errors.New("compression not implemented yet") - - // Enable client side placeholder substitution - case "interpolateParams": - var isBool bool - cfg.InterpolateParams, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Time Location - case "loc": - if value, err = url.QueryUnescape(value); err != nil { - return - } - cfg.Loc, err = time.LoadLocation(value) - if err != nil { - return - } - - // multiple statements in one query - case "multiStatements": - var isBool bool - cfg.MultiStatements, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // time.Time parsing - case "parseTime": - var isBool bool - cfg.ParseTime, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // I/O read Timeout - case "readTimeout": - cfg.ReadTimeout, err = time.ParseDuration(value) - if err != nil { - return - } - - // Reject read-only connections - case "rejectReadOnly": - var isBool bool - cfg.RejectReadOnly, isBool = readBool(value) - if !isBool { - return errors.New("invalid bool value: " + value) - } - - // Server public key - case "serverPubKey": - name, err := url.QueryUnescape(value) - if err != nil { - return fmt.Errorf("invalid value for server pub key name: %v", err) - } - cfg.ServerPubKey = name - - // Strict mode - case "strict": - panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode") - - // Dial Timeout - case "timeout": - cfg.Timeout, err = time.ParseDuration(value) - if err != nil { - return - } - - // TLS-Encryption - case "tls": - boolValue, isBool := readBool(value) - if isBool { - if boolValue { - cfg.TLSConfig = "true" - } else { - cfg.TLSConfig = "false" - } - } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" { - cfg.TLSConfig = vl - } else { - name, err := url.QueryUnescape(value) - if err != nil { - return fmt.Errorf("invalid value for TLS config name: %v", err) - } - cfg.TLSConfig = name - } - - // I/O write Timeout - case "writeTimeout": - cfg.WriteTimeout, err = time.ParseDuration(value) - if err != nil { - return - } - case "maxAllowedPacket": - cfg.MaxAllowedPacket, err = strconv.Atoi(value) - if err != nil { - return - } - default: - // lazy init - if cfg.Params == nil { - cfg.Params = make(map[string]string) - } - - if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { - return - } - } - } - - return -} - -func ensureHavePort(addr string) string { - if _, _, err := net.SplitHostPort(addr); err != nil { - return net.JoinHostPort(addr, "3306") - } - return addr -} diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go deleted file mode 100644 index 760782ff2fb..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/errors.go +++ /dev/null @@ -1,65 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "errors" - "fmt" - "log" - "os" -) - -// Various errors the driver might return. Can change between driver versions. -var ( - ErrInvalidConn = errors.New("invalid connection") - ErrMalformPkt = errors.New("malformed packet") - ErrNoTLS = errors.New("TLS requested but server does not support TLS") - ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") - ErrNativePassword = errors.New("this user requires mysql native password authentication.") - ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") - ErrUnknownPlugin = errors.New("this authentication plugin is not supported") - ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") - ErrPktSync = errors.New("commands out of sync. You can't run this command now") - ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") - ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") - ErrBusyBuffer = errors.New("busy buffer") - - // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. - // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn - // to trigger a resend. - // See https://github.com/go-sql-driver/mysql/pull/302 - errBadConnNoWrite = errors.New("bad connection") -) - -var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) - -// Logger is used to log critical error messages. -type Logger interface { - Print(v ...interface{}) -} - -// SetLogger is used to set the logger for critical errors. -// The initial logger is os.Stderr. -func SetLogger(logger Logger) error { - if logger == nil { - return errors.New("logger is nil") - } - errLog = logger - return nil -} - -// MySQLError is an error type which represents a single MySQL error -type MySQLError struct { - Number uint16 - Message string -} - -func (me *MySQLError) Error() string { - return fmt.Sprintf("Error %d: %s", me.Number, me.Message) -} diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go deleted file mode 100644 index ed6c7a37d83..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/fields.go +++ /dev/null @@ -1,194 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql" - "reflect" -) - -func (mf *mysqlField) typeDatabaseName() string { - switch mf.fieldType { - case fieldTypeBit: - return "BIT" - case fieldTypeBLOB: - if mf.charSet != collations[binaryCollation] { - return "TEXT" - } - return "BLOB" - case fieldTypeDate: - return "DATE" - case fieldTypeDateTime: - return "DATETIME" - case fieldTypeDecimal: - return "DECIMAL" - case fieldTypeDouble: - return "DOUBLE" - case fieldTypeEnum: - return "ENUM" - case fieldTypeFloat: - return "FLOAT" - case fieldTypeGeometry: - return "GEOMETRY" - case fieldTypeInt24: - return "MEDIUMINT" - case fieldTypeJSON: - return "JSON" - case fieldTypeLong: - return "INT" - case fieldTypeLongBLOB: - if mf.charSet != collations[binaryCollation] { - return "LONGTEXT" - } - return "LONGBLOB" - case fieldTypeLongLong: - return "BIGINT" - case fieldTypeMediumBLOB: - if mf.charSet != collations[binaryCollation] { - return "MEDIUMTEXT" - } - return "MEDIUMBLOB" - case fieldTypeNewDate: - return "DATE" - case fieldTypeNewDecimal: - return "DECIMAL" - case fieldTypeNULL: - return "NULL" - case fieldTypeSet: - return "SET" - case fieldTypeShort: - return "SMALLINT" - case fieldTypeString: - if mf.charSet == collations[binaryCollation] { - return "BINARY" - } - return "CHAR" - case fieldTypeTime: - return "TIME" - case fieldTypeTimestamp: - return "TIMESTAMP" - case fieldTypeTiny: - return "TINYINT" - case fieldTypeTinyBLOB: - if mf.charSet != collations[binaryCollation] { - return "TINYTEXT" - } - return "TINYBLOB" - case fieldTypeVarChar: - if mf.charSet == collations[binaryCollation] { - return "VARBINARY" - } - return "VARCHAR" - case fieldTypeVarString: - if mf.charSet == collations[binaryCollation] { - return "VARBINARY" - } - return "VARCHAR" - case fieldTypeYear: - return "YEAR" - default: - return "" - } -} - -var ( - scanTypeFloat32 = reflect.TypeOf(float32(0)) - scanTypeFloat64 = reflect.TypeOf(float64(0)) - scanTypeInt8 = reflect.TypeOf(int8(0)) - scanTypeInt16 = reflect.TypeOf(int16(0)) - scanTypeInt32 = reflect.TypeOf(int32(0)) - scanTypeInt64 = reflect.TypeOf(int64(0)) - scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) - scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) - scanTypeNullTime = reflect.TypeOf(nullTime{}) - scanTypeUint8 = reflect.TypeOf(uint8(0)) - scanTypeUint16 = reflect.TypeOf(uint16(0)) - scanTypeUint32 = reflect.TypeOf(uint32(0)) - scanTypeUint64 = reflect.TypeOf(uint64(0)) - scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) - scanTypeUnknown = reflect.TypeOf(new(interface{})) -) - -type mysqlField struct { - tableName string - name string - length uint32 - flags fieldFlag - fieldType fieldType - decimals byte - charSet uint8 -} - -func (mf *mysqlField) scanType() reflect.Type { - switch mf.fieldType { - case fieldTypeTiny: - if mf.flags&flagNotNULL != 0 { - if mf.flags&flagUnsigned != 0 { - return scanTypeUint8 - } - return scanTypeInt8 - } - return scanTypeNullInt - - case fieldTypeShort, fieldTypeYear: - if mf.flags&flagNotNULL != 0 { - if mf.flags&flagUnsigned != 0 { - return scanTypeUint16 - } - return scanTypeInt16 - } - return scanTypeNullInt - - case fieldTypeInt24, fieldTypeLong: - if mf.flags&flagNotNULL != 0 { - if mf.flags&flagUnsigned != 0 { - return scanTypeUint32 - } - return scanTypeInt32 - } - return scanTypeNullInt - - case fieldTypeLongLong: - if mf.flags&flagNotNULL != 0 { - if mf.flags&flagUnsigned != 0 { - return scanTypeUint64 - } - return scanTypeInt64 - } - return scanTypeNullInt - - case fieldTypeFloat: - if mf.flags&flagNotNULL != 0 { - return scanTypeFloat32 - } - return scanTypeNullFloat - - case fieldTypeDouble: - if mf.flags&flagNotNULL != 0 { - return scanTypeFloat64 - } - return scanTypeNullFloat - - case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, - fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, - fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, - fieldTypeTime: - return scanTypeRawBytes - - case fieldTypeDate, fieldTypeNewDate, - fieldTypeTimestamp, fieldTypeDateTime: - // NullTime is always returned for more consistent behavior as it can - // handle both cases of parseTime regardless if the field is nullable. - return scanTypeNullTime - - default: - return scanTypeUnknown - } -} diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go deleted file mode 100644 index fa75adf6a09..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/fuzz.go +++ /dev/null @@ -1,24 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package. -// -// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build gofuzz - -package mysql - -import ( - "database/sql" -) - -func Fuzz(data []byte) int { - db, err := sql.Open("mysql", string(data)) - if err != nil { - return 0 - } - db.Close() - return 1 -} diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go deleted file mode 100644 index 60effdfc225..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/infile.go +++ /dev/null @@ -1,182 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "fmt" - "io" - "os" - "strings" - "sync" -) - -var ( - fileRegister map[string]bool - fileRegisterLock sync.RWMutex - readerRegister map[string]func() io.Reader - readerRegisterLock sync.RWMutex -) - -// RegisterLocalFile adds the given file to the file allowlist, -// so that it can be used by "LOAD DATA LOCAL INFILE ". -// Alternatively you can allow the use of all local files with -// the DSN parameter 'allowAllFiles=true' -// -// filePath := "/home/gopher/data.csv" -// mysql.RegisterLocalFile(filePath) -// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterLocalFile(filePath string) { - fileRegisterLock.Lock() - // lazy map init - if fileRegister == nil { - fileRegister = make(map[string]bool) - } - - fileRegister[strings.Trim(filePath, `"`)] = true - fileRegisterLock.Unlock() -} - -// DeregisterLocalFile removes the given filepath from the allowlist. -func DeregisterLocalFile(filePath string) { - fileRegisterLock.Lock() - delete(fileRegister, strings.Trim(filePath, `"`)) - fileRegisterLock.Unlock() -} - -// RegisterReaderHandler registers a handler function which is used -// to receive a io.Reader. -// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". -// If the handler returns a io.ReadCloser Close() is called when the -// request is finished. -// -// mysql.RegisterReaderHandler("data", func() io.Reader { -// var csvReader io.Reader // Some Reader that returns CSV data -// ... // Open Reader here -// return csvReader -// }) -// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") -// if err != nil { -// ... -// -func RegisterReaderHandler(name string, handler func() io.Reader) { - readerRegisterLock.Lock() - // lazy map init - if readerRegister == nil { - readerRegister = make(map[string]func() io.Reader) - } - - readerRegister[name] = handler - readerRegisterLock.Unlock() -} - -// DeregisterReaderHandler removes the ReaderHandler function with -// the given name from the registry. -func DeregisterReaderHandler(name string) { - readerRegisterLock.Lock() - delete(readerRegister, name) - readerRegisterLock.Unlock() -} - -func deferredClose(err *error, closer io.Closer) { - closeErr := closer.Close() - if *err == nil { - *err = closeErr - } -} - -func (mc *mysqlConn) handleInFileRequest(name string) (err error) { - var rdr io.Reader - var data []byte - packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP - if mc.maxWriteSize < packetSize { - packetSize = mc.maxWriteSize - } - - if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader - // The server might return an an absolute path. See issue #355. - name = name[idx+8:] - - readerRegisterLock.RLock() - handler, inMap := readerRegister[name] - readerRegisterLock.RUnlock() - - if inMap { - rdr = handler() - if rdr != nil { - if cl, ok := rdr.(io.Closer); ok { - defer deferredClose(&err, cl) - } - } else { - err = fmt.Errorf("Reader '%s' is ", name) - } - } else { - err = fmt.Errorf("Reader '%s' is not registered", name) - } - } else { // File - name = strings.Trim(name, `"`) - fileRegisterLock.RLock() - fr := fileRegister[name] - fileRegisterLock.RUnlock() - if mc.cfg.AllowAllFiles || fr { - var file *os.File - var fi os.FileInfo - - if file, err = os.Open(name); err == nil { - defer deferredClose(&err, file) - - // get file size - if fi, err = file.Stat(); err == nil { - rdr = file - if fileSize := int(fi.Size()); fileSize < packetSize { - packetSize = fileSize - } - } - } - } else { - err = fmt.Errorf("local file '%s' is not registered", name) - } - } - - // send content packets - // if packetSize == 0, the Reader contains no data - if err == nil && packetSize > 0 { - data := make([]byte, 4+packetSize) - var n int - for err == nil { - n, err = rdr.Read(data[4:]) - if n > 0 { - if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { - return ioErr - } - } - } - if err == io.EOF { - err = nil - } - } - - // send empty packet (termination) - if data == nil { - data = make([]byte, 4) - } - if ioErr := mc.writePacket(data[:4]); ioErr != nil { - return ioErr - } - - // read OK packet - if err == nil { - return mc.readResultOK() - } - - mc.readPacket() - return err -} diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go deleted file mode 100644 index 651723a9618..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/nulltime.go +++ /dev/null @@ -1,50 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "fmt" - "time" -) - -// Scan implements the Scanner interface. -// The value type must be time.Time or string / []byte (formatted time-string), -// otherwise Scan fails. -func (nt *NullTime) Scan(value interface{}) (err error) { - if value == nil { - nt.Time, nt.Valid = time.Time{}, false - return - } - - switch v := value.(type) { - case time.Time: - nt.Time, nt.Valid = v, true - return - case []byte: - nt.Time, err = parseDateTime(v, time.UTC) - nt.Valid = (err == nil) - return - case string: - nt.Time, err = parseDateTime([]byte(v), time.UTC) - nt.Valid = (err == nil) - return - } - - nt.Valid = false - return fmt.Errorf("Can't convert %T to time.Time", value) -} - -// Value implements the driver Valuer interface. -func (nt NullTime) Value() (driver.Value, error) { - if !nt.Valid { - return nil, nil - } - return nt.Time, nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go deleted file mode 100644 index 453b4b3944e..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go +++ /dev/null @@ -1,40 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build go1.13 - -package mysql - -import ( - "database/sql" -) - -// NullTime represents a time.Time that may be NULL. -// NullTime implements the Scanner interface so -// it can be used as a scan destination: -// -// var nt NullTime -// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) -// ... -// if nt.Valid { -// // use nt.Time -// } else { -// // NULL value -// } -// -// This NullTime implementation is not driver-specific -// -// Deprecated: NullTime doesn't honor the loc DSN parameter. -// NullTime.Scan interprets a time as UTC, not the loc DSN parameter. -// Use sql.NullTime instead. -type NullTime sql.NullTime - -// for internal use. -// the mysql package uses sql.NullTime if it is available. -// if not, the package uses mysql.NullTime. -type nullTime = sql.NullTime // sql.NullTime is available diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go deleted file mode 100644 index 9f7ae27a8eb..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go +++ /dev/null @@ -1,39 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -// +build !go1.13 - -package mysql - -import ( - "time" -) - -// NullTime represents a time.Time that may be NULL. -// NullTime implements the Scanner interface so -// it can be used as a scan destination: -// -// var nt NullTime -// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) -// ... -// if nt.Valid { -// // use nt.Time -// } else { -// // NULL value -// } -// -// This NullTime implementation is not driver-specific -type NullTime struct { - Time time.Time - Valid bool // Valid is true if Time is not NULL -} - -// for internal use. -// the mysql package uses sql.NullTime if it is available. -// if not, the package uses mysql.NullTime. -type nullTime = NullTime // sql.NullTime is not available diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go deleted file mode 100644 index 6664e5ae5d3..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ /dev/null @@ -1,1349 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "bytes" - "crypto/tls" - "database/sql/driver" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "time" -) - -// Packets documentation: -// http://dev.mysql.com/doc/internals/en/client-server-protocol.html - -// Read packet to buffer 'data' -func (mc *mysqlConn) readPacket() ([]byte, error) { - var prevData []byte - for { - // read packet header - data, err := mc.buf.readNext(4) - if err != nil { - if cerr := mc.canceled.Value(); cerr != nil { - return nil, cerr - } - errLog.Print(err) - mc.Close() - return nil, ErrInvalidConn - } - - // packet length [24 bit] - pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) - - // check packet sync [8 bit] - if data[3] != mc.sequence { - if data[3] > mc.sequence { - return nil, ErrPktSyncMul - } - return nil, ErrPktSync - } - mc.sequence++ - - // packets with length 0 terminate a previous packet which is a - // multiple of (2^24)-1 bytes long - if pktLen == 0 { - // there was no previous packet - if prevData == nil { - errLog.Print(ErrMalformPkt) - mc.Close() - return nil, ErrInvalidConn - } - - return prevData, nil - } - - // read packet body [pktLen bytes] - data, err = mc.buf.readNext(pktLen) - if err != nil { - if cerr := mc.canceled.Value(); cerr != nil { - return nil, cerr - } - errLog.Print(err) - mc.Close() - return nil, ErrInvalidConn - } - - // return data if this was the last packet - if pktLen < maxPacketSize { - // zero allocations for non-split packets - if prevData == nil { - return data, nil - } - - return append(prevData, data...), nil - } - - prevData = append(prevData, data...) - } -} - -// Write packet buffer 'data' -func (mc *mysqlConn) writePacket(data []byte) error { - pktLen := len(data) - 4 - - if pktLen > mc.maxAllowedPacket { - return ErrPktTooLarge - } - - // Perform a stale connection check. We only perform this check for - // the first query on a connection that has been checked out of the - // connection pool: a fresh connection from the pool is more likely - // to be stale, and it has not performed any previous writes that - // could cause data corruption, so it's safe to return ErrBadConn - // if the check fails. - if mc.reset { - mc.reset = false - conn := mc.netConn - if mc.rawConn != nil { - conn = mc.rawConn - } - var err error - // If this connection has a ReadTimeout which we've been setting on - // reads, reset it to its default value before we attempt a non-blocking - // read, otherwise the scheduler will just time us out before we can read - if mc.cfg.ReadTimeout != 0 { - err = conn.SetReadDeadline(time.Time{}) - } - if err == nil && mc.cfg.CheckConnLiveness { - err = connCheck(conn) - } - if err != nil { - errLog.Print("closing bad idle connection: ", err) - mc.Close() - return driver.ErrBadConn - } - } - - for { - var size int - if pktLen >= maxPacketSize { - data[0] = 0xff - data[1] = 0xff - data[2] = 0xff - size = maxPacketSize - } else { - data[0] = byte(pktLen) - data[1] = byte(pktLen >> 8) - data[2] = byte(pktLen >> 16) - size = pktLen - } - data[3] = mc.sequence - - // Write packet - if mc.writeTimeout > 0 { - if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { - return err - } - } - - n, err := mc.netConn.Write(data[:4+size]) - if err == nil && n == 4+size { - mc.sequence++ - if size != maxPacketSize { - return nil - } - pktLen -= size - data = data[size:] - continue - } - - // Handle error - if err == nil { // n != len(data) - mc.cleanup() - errLog.Print(ErrMalformPkt) - } else { - if cerr := mc.canceled.Value(); cerr != nil { - return cerr - } - if n == 0 && pktLen == len(data)-4 { - // only for the first loop iteration when nothing was written yet - return errBadConnNoWrite - } - mc.cleanup() - errLog.Print(err) - } - return ErrInvalidConn - } -} - -/****************************************************************************** -* Initialization Process * -******************************************************************************/ - -// Handshake Initialization Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake -func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) { - data, err = mc.readPacket() - if err != nil { - // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since - // in connection initialization we don't risk retrying non-idempotent actions. - if err == ErrInvalidConn { - return nil, "", driver.ErrBadConn - } - return - } - - if data[0] == iERR { - return nil, "", mc.handleErrorPacket(data) - } - - // protocol version [1 byte] - if data[0] < minProtocolVersion { - return nil, "", fmt.Errorf( - "unsupported protocol version %d. Version %d or higher is required", - data[0], - minProtocolVersion, - ) - } - - // server version [null terminated string] - // connection id [4 bytes] - pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 - - // first part of the password cipher [8 bytes] - authData := data[pos : pos+8] - - // (filler) always 0x00 [1 byte] - pos += 8 + 1 - - // capability flags (lower 2 bytes) [2 bytes] - mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - if mc.flags&clientProtocol41 == 0 { - return nil, "", ErrOldProtocol - } - if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { - if mc.cfg.TLSConfig == "preferred" { - mc.cfg.tls = nil - } else { - return nil, "", ErrNoTLS - } - } - pos += 2 - - if len(data) > pos { - // character set [1 byte] - // status flags [2 bytes] - // capability flags (upper 2 bytes) [2 bytes] - // length of auth-plugin-data [1 byte] - // reserved (all [00]) [10 bytes] - pos += 1 + 2 + 2 + 1 + 10 - - // second part of the password cipher [mininum 13 bytes], - // where len=MAX(13, length of auth-plugin-data - 8) - // - // The web documentation is ambiguous about the length. However, - // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, - // the 13th byte is "\0 byte, terminating the second part of - // a scramble". So the second part of the password cipher is - // a NULL terminated string that's at least 13 bytes with the - // last byte being NULL. - // - // The official Python library uses the fixed length 12 - // which seems to work but technically could have a hidden bug. - authData = append(authData, data[pos:pos+12]...) - pos += 13 - - // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) - // \NUL otherwise - if end := bytes.IndexByte(data[pos:], 0x00); end != -1 { - plugin = string(data[pos : pos+end]) - } else { - plugin = string(data[pos:]) - } - - // make a memory safe copy of the cipher slice - var b [20]byte - copy(b[:], authData) - return b[:], plugin, nil - } - - // make a memory safe copy of the cipher slice - var b [8]byte - copy(b[:], authData) - return b[:], plugin, nil -} - -// Client Authentication Packet -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse -func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error { - // Adjust client flags based on server support - clientFlags := clientProtocol41 | - clientSecureConn | - clientLongPassword | - clientTransactions | - clientLocalFiles | - clientPluginAuth | - clientMultiResults | - mc.flags&clientLongFlag - - if mc.cfg.ClientFoundRows { - clientFlags |= clientFoundRows - } - - // To enable TLS / SSL - if mc.cfg.tls != nil { - clientFlags |= clientSSL - } - - if mc.cfg.MultiStatements { - clientFlags |= clientMultiStatements - } - - // encode length of the auth plugin data - var authRespLEIBuf [9]byte - authRespLen := len(authResp) - authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen)) - if len(authRespLEI) > 1 { - // if the length can not be written in 1 byte, it must be written as a - // length encoded integer - clientFlags |= clientPluginAuthLenEncClientData - } - - pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1 - - // To specify a db name - if n := len(mc.cfg.DBName); n > 0 { - clientFlags |= clientConnectWithDB - pktLen += n + 1 - } - - // Calculate packet length and get buffer with that size - data, err := mc.buf.takeSmallBuffer(pktLen + 4) - if err != nil { - // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) - return errBadConnNoWrite - } - - // ClientFlags [32 bit] - data[4] = byte(clientFlags) - data[5] = byte(clientFlags >> 8) - data[6] = byte(clientFlags >> 16) - data[7] = byte(clientFlags >> 24) - - // MaxPacketSize [32 bit] (none) - data[8] = 0x00 - data[9] = 0x00 - data[10] = 0x00 - data[11] = 0x00 - - // Charset [1 byte] - var found bool - data[12], found = collations[mc.cfg.Collation] - if !found { - // Note possibility for false negatives: - // could be triggered although the collation is valid if the - // collations map does not contain entries the server supports. - return errors.New("unknown collation") - } - - // Filler [23 bytes] (all 0x00) - pos := 13 - for ; pos < 13+23; pos++ { - data[pos] = 0 - } - - // SSL Connection Request Packet - // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest - if mc.cfg.tls != nil { - // Send TLS / SSL request packet - if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { - return err - } - - // Switch to TLS - tlsConn := tls.Client(mc.netConn, mc.cfg.tls) - if err := tlsConn.Handshake(); err != nil { - return err - } - mc.rawConn = mc.netConn - mc.netConn = tlsConn - mc.buf.nc = tlsConn - } - - // User [null terminated string] - if len(mc.cfg.User) > 0 { - pos += copy(data[pos:], mc.cfg.User) - } - data[pos] = 0x00 - pos++ - - // Auth Data [length encoded integer] - pos += copy(data[pos:], authRespLEI) - pos += copy(data[pos:], authResp) - - // Databasename [null terminated string] - if len(mc.cfg.DBName) > 0 { - pos += copy(data[pos:], mc.cfg.DBName) - data[pos] = 0x00 - pos++ - } - - pos += copy(data[pos:], plugin) - data[pos] = 0x00 - pos++ - - // Send Auth packet - return mc.writePacket(data[:pos]) -} - -// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse -func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { - pktLen := 4 + len(authData) - data, err := mc.buf.takeSmallBuffer(pktLen) - if err != nil { - // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) - return errBadConnNoWrite - } - - // Add the auth data [EOF] - copy(data[4:], authData) - return mc.writePacket(data) -} - -/****************************************************************************** -* Command Packets * -******************************************************************************/ - -func (mc *mysqlConn) writeCommandPacket(command byte) error { - // Reset Packet Sequence - mc.sequence = 0 - - data, err := mc.buf.takeSmallBuffer(4 + 1) - if err != nil { - // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) - return errBadConnNoWrite - } - - // Add command byte - data[4] = command - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { - // Reset Packet Sequence - mc.sequence = 0 - - pktLen := 1 + len(arg) - data, err := mc.buf.takeBuffer(pktLen + 4) - if err != nil { - // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) - return errBadConnNoWrite - } - - // Add command byte - data[4] = command - - // Add arg - copy(data[5:], arg) - - // Send CMD packet - return mc.writePacket(data) -} - -func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { - // Reset Packet Sequence - mc.sequence = 0 - - data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) - if err != nil { - // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) - return errBadConnNoWrite - } - - // Add command byte - data[4] = command - - // Add arg [32 bit] - data[5] = byte(arg) - data[6] = byte(arg >> 8) - data[7] = byte(arg >> 16) - data[8] = byte(arg >> 24) - - // Send CMD packet - return mc.writePacket(data) -} - -/****************************************************************************** -* Result Packets * -******************************************************************************/ - -func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { - data, err := mc.readPacket() - if err != nil { - return nil, "", err - } - - // packet indicator - switch data[0] { - - case iOK: - return nil, "", mc.handleOkPacket(data) - - case iAuthMoreData: - return data[1:], "", err - - case iEOF: - if len(data) == 1 { - // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest - return nil, "mysql_old_password", nil - } - pluginEndIndex := bytes.IndexByte(data, 0x00) - if pluginEndIndex < 0 { - return nil, "", ErrMalformPkt - } - plugin := string(data[1:pluginEndIndex]) - authData := data[pluginEndIndex+1:] - return authData, plugin, nil - - default: // Error otherwise - return nil, "", mc.handleErrorPacket(data) - } -} - -// Returns error if Packet is not an 'Result OK'-Packet -func (mc *mysqlConn) readResultOK() error { - data, err := mc.readPacket() - if err != nil { - return err - } - - if data[0] == iOK { - return mc.handleOkPacket(data) - } - return mc.handleErrorPacket(data) -} - -// Result Set Header Packet -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset -func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { - data, err := mc.readPacket() - if err == nil { - switch data[0] { - - case iOK: - return 0, mc.handleOkPacket(data) - - case iERR: - return 0, mc.handleErrorPacket(data) - - case iLocalInFile: - return 0, mc.handleInFileRequest(string(data[1:])) - } - - // column count - num, _, n := readLengthEncodedInteger(data) - if n-len(data) == 0 { - return int(num), nil - } - - return 0, ErrMalformPkt - } - return 0, err -} - -// Error Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet -func (mc *mysqlConn) handleErrorPacket(data []byte) error { - if data[0] != iERR { - return ErrMalformPkt - } - - // 0xff [1 byte] - - // Error Number [16 bit uint] - errno := binary.LittleEndian.Uint16(data[1:3]) - - // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION - // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) - if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { - // Oops; we are connected to a read-only connection, and won't be able - // to issue any write statements. Since RejectReadOnly is configured, - // we throw away this connection hoping this one would have write - // permission. This is specifically for a possible race condition - // during failover (e.g. on AWS Aurora). See README.md for more. - // - // We explicitly close the connection before returning - // driver.ErrBadConn to ensure that `database/sql` purges this - // connection and initiates a new one for next statement next time. - mc.Close() - return driver.ErrBadConn - } - - pos := 3 - - // SQL State [optional: # + 5bytes string] - if data[3] == 0x23 { - //sqlstate := string(data[4 : 4+5]) - pos = 9 - } - - // Error Message [string] - return &MySQLError{ - Number: errno, - Message: string(data[pos:]), - } -} - -func readStatus(b []byte) statusFlag { - return statusFlag(b[0]) | statusFlag(b[1])<<8 -} - -// Ok Packet -// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet -func (mc *mysqlConn) handleOkPacket(data []byte) error { - var n, m int - - // 0x00 [1 byte] - - // Affected rows [Length Coded Binary] - mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) - - // Insert id [Length Coded Binary] - mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) - - // server_status [2 bytes] - mc.status = readStatus(data[1+n+m : 1+n+m+2]) - if mc.status&statusMoreResultsExists != 0 { - return nil - } - - // warning count [2 bytes] - - return nil -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 -func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { - columns := make([]mysqlField, count) - - for i := 0; ; i++ { - data, err := mc.readPacket() - if err != nil { - return nil, err - } - - // EOF Packet - if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { - if i == count { - return columns, nil - } - return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) - } - - // Catalog - pos, err := skipLengthEncodedString(data) - if err != nil { - return nil, err - } - - // Database [len coded string] - n, err := skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Table [len coded string] - if mc.cfg.ColumnsWithAlias { - tableName, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - columns[i].tableName = string(tableName) - } else { - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - } - - // Original table [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Name [len coded string] - name, _, n, err := readLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - columns[i].name = string(name) - pos += n - - // Original name [len coded string] - n, err = skipLengthEncodedString(data[pos:]) - if err != nil { - return nil, err - } - pos += n - - // Filler [uint8] - pos++ - - // Charset [charset, collation uint8] - columns[i].charSet = data[pos] - pos += 2 - - // Length [uint32] - columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4]) - pos += 4 - - // Field type [uint8] - columns[i].fieldType = fieldType(data[pos]) - pos++ - - // Flags [uint16] - columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) - pos += 2 - - // Decimals [uint8] - columns[i].decimals = data[pos] - //pos++ - - // Default value [len coded binary] - //if pos < len(data) { - // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) - //} - } -} - -// Read Packets as Field Packets until EOF-Packet or an Error appears -// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow -func (rows *textRows) readRow(dest []driver.Value) error { - mc := rows.mc - - if rows.rs.done { - return io.EOF - } - - data, err := mc.readPacket() - if err != nil { - return err - } - - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - // server_status [2 bytes] - rows.mc.status = readStatus(data[3:]) - rows.rs.done = true - if !rows.HasNextResultSet() { - rows.mc = nil - } - return io.EOF - } - if data[0] == iERR { - rows.mc = nil - return mc.handleErrorPacket(data) - } - - // RowSet Packet - var n int - var isNull bool - pos := 0 - - for i := range dest { - // Read bytes and convert to string - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - if !mc.parseTime { - continue - } else { - switch rows.rs.columns[i].fieldType { - case fieldTypeTimestamp, fieldTypeDateTime, - fieldTypeDate, fieldTypeNewDate: - dest[i], err = parseDateTime( - dest[i].([]byte), - mc.cfg.Loc, - ) - if err == nil { - continue - } - default: - continue - } - } - - } else { - dest[i] = nil - continue - } - } - return err // err != nil - } - - return nil -} - -// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read -func (mc *mysqlConn) readUntilEOF() error { - for { - data, err := mc.readPacket() - if err != nil { - return err - } - - switch data[0] { - case iERR: - return mc.handleErrorPacket(data) - case iEOF: - if len(data) == 5 { - mc.status = readStatus(data[3:]) - } - return nil - } - } -} - -/****************************************************************************** -* Prepared Statements * -******************************************************************************/ - -// Prepare Result Packets -// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html -func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { - data, err := stmt.mc.readPacket() - if err == nil { - // packet indicator [1 byte] - if data[0] != iOK { - return 0, stmt.mc.handleErrorPacket(data) - } - - // statement id [4 bytes] - stmt.id = binary.LittleEndian.Uint32(data[1:5]) - - // Column count [16 bit uint] - columnCount := binary.LittleEndian.Uint16(data[5:7]) - - // Param count [16 bit uint] - stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) - - // Reserved [8 bit] - - // Warning count [16 bit uint] - - return columnCount, nil - } - return 0, err -} - -// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html -func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { - maxLen := stmt.mc.maxAllowedPacket - 1 - pktLen := maxLen - - // After the header (bytes 0-3) follows before the data: - // 1 byte command - // 4 bytes stmtID - // 2 bytes paramID - const dataOffset = 1 + 4 + 2 - - // Cannot use the write buffer since - // a) the buffer is too small - // b) it is in use - data := make([]byte, 4+1+4+2+len(arg)) - - copy(data[4+dataOffset:], arg) - - for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { - if dataOffset+argLen < maxLen { - pktLen = dataOffset + argLen - } - - stmt.mc.sequence = 0 - // Add command byte [1 byte] - data[4] = comStmtSendLongData - - // Add stmtID [32 bit] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // Add paramID [16 bit] - data[9] = byte(paramID) - data[10] = byte(paramID >> 8) - - // Send CMD packet - err := stmt.mc.writePacket(data[:4+pktLen]) - if err == nil { - data = data[pktLen-dataOffset:] - continue - } - return err - - } - - // Reset Packet Sequence - stmt.mc.sequence = 0 - return nil -} - -// Execute Prepared Statement -// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html -func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { - if len(args) != stmt.paramCount { - return fmt.Errorf( - "argument count mismatch (got: %d; has: %d)", - len(args), - stmt.paramCount, - ) - } - - const minPktLen = 4 + 1 + 4 + 1 + 4 - mc := stmt.mc - - // Determine threshold dynamically to avoid packet size shortage. - longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1) - if longDataSize < 64 { - longDataSize = 64 - } - - // Reset packet-sequence - mc.sequence = 0 - - var data []byte - var err error - - if len(args) == 0 { - data, err = mc.buf.takeBuffer(minPktLen) - } else { - data, err = mc.buf.takeCompleteBuffer() - // In this case the len(data) == cap(data) which is used to optimise the flow below. - } - if err != nil { - // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) - return errBadConnNoWrite - } - - // command [1 byte] - data[4] = comStmtExecute - - // statement_id [4 bytes] - data[5] = byte(stmt.id) - data[6] = byte(stmt.id >> 8) - data[7] = byte(stmt.id >> 16) - data[8] = byte(stmt.id >> 24) - - // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] - data[9] = 0x00 - - // iteration_count (uint32(1)) [4 bytes] - data[10] = 0x01 - data[11] = 0x00 - data[12] = 0x00 - data[13] = 0x00 - - if len(args) > 0 { - pos := minPktLen - - var nullMask []byte - if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) { - // buffer has to be extended but we don't know by how much so - // we depend on append after all data with known sizes fit. - // We stop at that because we deal with a lot of columns here - // which makes the required allocation size hard to guess. - tmp := make([]byte, pos+maskLen+typesLen) - copy(tmp[:pos], data[:pos]) - data = tmp - nullMask = data[pos : pos+maskLen] - // No need to clean nullMask as make ensures that. - pos += maskLen - } else { - nullMask = data[pos : pos+maskLen] - for i := range nullMask { - nullMask[i] = 0 - } - pos += maskLen - } - - // newParameterBoundFlag 1 [1 byte] - data[pos] = 0x01 - pos++ - - // type of each parameter [len(args)*2 bytes] - paramTypes := data[pos:] - pos += len(args) * 2 - - // value of each parameter [n bytes] - paramValues := data[pos:pos] - valuesCap := cap(paramValues) - - for i, arg := range args { - // build NULL-bitmap - if arg == nil { - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = byte(fieldTypeNULL) - paramTypes[i+i+1] = 0x00 - continue - } - - if v, ok := arg.(json.RawMessage); ok { - arg = []byte(v) - } - // cache types and values - switch v := arg.(type) { - case int64: - paramTypes[i+i] = byte(fieldTypeLongLong) - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } - - case uint64: - paramTypes[i+i] = byte(fieldTypeLongLong) - paramTypes[i+i+1] = 0x80 // type is unsigned - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - uint64(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(uint64(v))..., - ) - } - - case float64: - paramTypes[i+i] = byte(fieldTypeDouble) - paramTypes[i+i+1] = 0x00 - - if cap(paramValues)-len(paramValues)-8 >= 0 { - paramValues = paramValues[:len(paramValues)+8] - binary.LittleEndian.PutUint64( - paramValues[len(paramValues)-8:], - math.Float64bits(v), - ) - } else { - paramValues = append(paramValues, - uint64ToBytes(math.Float64bits(v))..., - ) - } - - case bool: - paramTypes[i+i] = byte(fieldTypeTiny) - paramTypes[i+i+1] = 0x00 - - if v { - paramValues = append(paramValues, 0x01) - } else { - paramValues = append(paramValues, 0x00) - } - - case []byte: - // Common case (non-nil value) first - if v != nil { - paramTypes[i+i] = byte(fieldTypeString) - paramTypes[i+i+1] = 0x00 - - if len(v) < longDataSize { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, v); err != nil { - return err - } - } - continue - } - - // Handle []byte(nil) as a NULL value - nullMask[i/8] |= 1 << (uint(i) & 7) - paramTypes[i+i] = byte(fieldTypeNULL) - paramTypes[i+i+1] = 0x00 - - case string: - paramTypes[i+i] = byte(fieldTypeString) - paramTypes[i+i+1] = 0x00 - - if len(v) < longDataSize { - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(v)), - ) - paramValues = append(paramValues, v...) - } else { - if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { - return err - } - } - - case time.Time: - paramTypes[i+i] = byte(fieldTypeString) - paramTypes[i+i+1] = 0x00 - - var a [64]byte - var b = a[:0] - - if v.IsZero() { - b = append(b, "0000-00-00"...) - } else { - b, err = appendDateTime(b, v.In(mc.cfg.Loc)) - if err != nil { - return err - } - } - - paramValues = appendLengthEncodedInteger(paramValues, - uint64(len(b)), - ) - paramValues = append(paramValues, b...) - - default: - return fmt.Errorf("cannot convert type: %T", arg) - } - } - - // Check if param values exceeded the available buffer - // In that case we must build the data packet with the new values buffer - if valuesCap != cap(paramValues) { - data = append(data[:pos], paramValues...) - if err = mc.buf.store(data); err != nil { - errLog.Print(err) - return errBadConnNoWrite - } - } - - pos += len(paramValues) - data = data[:pos] - } - - return mc.writePacket(data) -} - -func (mc *mysqlConn) discardResults() error { - for mc.status&statusMoreResultsExists != 0 { - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return err - } - if resLen > 0 { - // columns - if err := mc.readUntilEOF(); err != nil { - return err - } - // rows - if err := mc.readUntilEOF(); err != nil { - return err - } - } - } - return nil -} - -// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html -func (rows *binaryRows) readRow(dest []driver.Value) error { - data, err := rows.mc.readPacket() - if err != nil { - return err - } - - // packet indicator [1 byte] - if data[0] != iOK { - // EOF Packet - if data[0] == iEOF && len(data) == 5 { - rows.mc.status = readStatus(data[3:]) - rows.rs.done = true - if !rows.HasNextResultSet() { - rows.mc = nil - } - return io.EOF - } - mc := rows.mc - rows.mc = nil - - // Error otherwise - return mc.handleErrorPacket(data) - } - - // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] - pos := 1 + (len(dest)+7+2)>>3 - nullMask := data[1:pos] - - for i := range dest { - // Field is NULL - // (byte >> bit-pos) % 2 == 1 - if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { - dest[i] = nil - continue - } - - // Convert to byte-coded string - switch rows.rs.columns[i].fieldType { - case fieldTypeNULL: - dest[i] = nil - continue - - // Numeric Types - case fieldTypeTiny: - if rows.rs.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(data[pos]) - } else { - dest[i] = int64(int8(data[pos])) - } - pos++ - continue - - case fieldTypeShort, fieldTypeYear: - if rows.rs.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) - } else { - dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) - } - pos += 2 - continue - - case fieldTypeInt24, fieldTypeLong: - if rows.rs.columns[i].flags&flagUnsigned != 0 { - dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) - } else { - dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) - } - pos += 4 - continue - - case fieldTypeLongLong: - if rows.rs.columns[i].flags&flagUnsigned != 0 { - val := binary.LittleEndian.Uint64(data[pos : pos+8]) - if val > math.MaxInt64 { - dest[i] = uint64ToString(val) - } else { - dest[i] = int64(val) - } - } else { - dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) - } - pos += 8 - continue - - case fieldTypeFloat: - dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])) - pos += 4 - continue - - case fieldTypeDouble: - dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) - pos += 8 - continue - - // Length coded Binary Strings - case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, - fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, - fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, - fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: - var isNull bool - var n int - dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) - pos += n - if err == nil { - if !isNull { - continue - } else { - dest[i] = nil - continue - } - } - return err - - case - fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD - fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] - fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] - - num, isNull, n := readLengthEncodedInteger(data[pos:]) - pos += n - - switch { - case isNull: - dest[i] = nil - continue - case rows.rs.columns[i].fieldType == fieldTypeTime: - // database/sql does not support an equivalent to TIME, return a string - var dstlen uint8 - switch decimals := rows.rs.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 8 - case 1, 2, 3, 4, 5, 6: - dstlen = 8 + 1 + decimals - default: - return fmt.Errorf( - "protocol error, illegal decimals value %d", - rows.rs.columns[i].decimals, - ) - } - dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen) - case rows.mc.parseTime: - dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) - default: - var dstlen uint8 - if rows.rs.columns[i].fieldType == fieldTypeDate { - dstlen = 10 - } else { - switch decimals := rows.rs.columns[i].decimals; decimals { - case 0x00, 0x1f: - dstlen = 19 - case 1, 2, 3, 4, 5, 6: - dstlen = 19 + 1 + decimals - default: - return fmt.Errorf( - "protocol error, illegal decimals value %d", - rows.rs.columns[i].decimals, - ) - } - } - dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen) - } - - if err == nil { - pos += int(num) - continue - } else { - return err - } - - // Please report if this happens! - default: - return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType) - } - } - - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go deleted file mode 100644 index c6438d0347d..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/result.go +++ /dev/null @@ -1,22 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlResult struct { - affectedRows int64 - insertId int64 -} - -func (res *mysqlResult) LastInsertId() (int64, error) { - return res.insertId, nil -} - -func (res *mysqlResult) RowsAffected() (int64, error) { - return res.affectedRows, nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go deleted file mode 100644 index 888bdb5f0ad..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/rows.go +++ /dev/null @@ -1,223 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "io" - "math" - "reflect" -) - -type resultSet struct { - columns []mysqlField - columnNames []string - done bool -} - -type mysqlRows struct { - mc *mysqlConn - rs resultSet - finish func() -} - -type binaryRows struct { - mysqlRows -} - -type textRows struct { - mysqlRows -} - -func (rows *mysqlRows) Columns() []string { - if rows.rs.columnNames != nil { - return rows.rs.columnNames - } - - columns := make([]string, len(rows.rs.columns)) - if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { - for i := range columns { - if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 { - columns[i] = tableName + "." + rows.rs.columns[i].name - } else { - columns[i] = rows.rs.columns[i].name - } - } - } else { - for i := range columns { - columns[i] = rows.rs.columns[i].name - } - } - - rows.rs.columnNames = columns - return columns -} - -func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string { - return rows.rs.columns[i].typeDatabaseName() -} - -// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) { -// return int64(rows.rs.columns[i].length), true -// } - -func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) { - return rows.rs.columns[i].flags&flagNotNULL == 0, true -} - -func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) { - column := rows.rs.columns[i] - decimals := int64(column.decimals) - - switch column.fieldType { - case fieldTypeDecimal, fieldTypeNewDecimal: - if decimals > 0 { - return int64(column.length) - 2, decimals, true - } - return int64(column.length) - 1, decimals, true - case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime: - return decimals, decimals, true - case fieldTypeFloat, fieldTypeDouble: - if decimals == 0x1f { - return math.MaxInt64, math.MaxInt64, true - } - return math.MaxInt64, decimals, true - } - - return 0, 0, false -} - -func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type { - return rows.rs.columns[i].scanType() -} - -func (rows *mysqlRows) Close() (err error) { - if f := rows.finish; f != nil { - f() - rows.finish = nil - } - - mc := rows.mc - if mc == nil { - return nil - } - if err := mc.error(); err != nil { - return err - } - - // flip the buffer for this connection if we need to drain it. - // note that for a successful query (i.e. one where rows.next() - // has been called until it returns false), `rows.mc` will be nil - // by the time the user calls `(*Rows).Close`, so we won't reach this - // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47 - mc.buf.flip() - - // Remove unread packets from stream - if !rows.rs.done { - err = mc.readUntilEOF() - } - if err == nil { - if err = mc.discardResults(); err != nil { - return err - } - } - - rows.mc = nil - return err -} - -func (rows *mysqlRows) HasNextResultSet() (b bool) { - if rows.mc == nil { - return false - } - return rows.mc.status&statusMoreResultsExists != 0 -} - -func (rows *mysqlRows) nextResultSet() (int, error) { - if rows.mc == nil { - return 0, io.EOF - } - if err := rows.mc.error(); err != nil { - return 0, err - } - - // Remove unread packets from stream - if !rows.rs.done { - if err := rows.mc.readUntilEOF(); err != nil { - return 0, err - } - rows.rs.done = true - } - - if !rows.HasNextResultSet() { - rows.mc = nil - return 0, io.EOF - } - rows.rs = resultSet{} - return rows.mc.readResultSetHeaderPacket() -} - -func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) { - for { - resLen, err := rows.nextResultSet() - if err != nil { - return 0, err - } - - if resLen > 0 { - return resLen, nil - } - - rows.rs.done = true - } -} - -func (rows *binaryRows) NextResultSet() error { - resLen, err := rows.nextNotEmptyResultSet() - if err != nil { - return err - } - - rows.rs.columns, err = rows.mc.readColumns(resLen) - return err -} - -func (rows *binaryRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if err := mc.error(); err != nil { - return err - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} - -func (rows *textRows) NextResultSet() (err error) { - resLen, err := rows.nextNotEmptyResultSet() - if err != nil { - return err - } - - rows.rs.columns, err = rows.mc.readColumns(resLen) - return err -} - -func (rows *textRows) Next(dest []driver.Value) error { - if mc := rows.mc; mc != nil { - if err := mc.error(); err != nil { - return err - } - - // Fetch next row from stream - return rows.readRow(dest) - } - return io.EOF -} diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go deleted file mode 100644 index 18a3ae49892..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/statement.go +++ /dev/null @@ -1,220 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "database/sql/driver" - "encoding/json" - "fmt" - "io" - "reflect" -) - -type mysqlStmt struct { - mc *mysqlConn - id uint32 - paramCount int -} - -func (stmt *mysqlStmt) Close() error { - if stmt.mc == nil || stmt.mc.closed.IsSet() { - // driver.Stmt.Close can be called more than once, thus this function - // has to be idempotent. - // See also Issue #450 and golang/go#16019. - //errLog.Print(ErrInvalidConn) - return driver.ErrBadConn - } - - err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) - stmt.mc = nil - return err -} - -func (stmt *mysqlStmt) NumInput() int { - return stmt.paramCount -} - -func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { - return converter{} -} - -func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) { - nv.Value, err = converter{}.ConvertValue(nv.Value) - return -} - -func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { - if stmt.mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, stmt.mc.markBadConn(err) - } - - mc := stmt.mc - - mc.affectedRows = 0 - mc.insertId = 0 - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return nil, err - } - - if resLen > 0 { - // Columns - if err = mc.readUntilEOF(); err != nil { - return nil, err - } - - // Rows - if err := mc.readUntilEOF(); err != nil { - return nil, err - } - } - - if err := mc.discardResults(); err != nil { - return nil, err - } - - return &mysqlResult{ - affectedRows: int64(mc.affectedRows), - insertId: int64(mc.insertId), - }, nil -} - -func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { - return stmt.query(args) -} - -func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { - if stmt.mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return nil, driver.ErrBadConn - } - // Send command - err := stmt.writeExecutePacket(args) - if err != nil { - return nil, stmt.mc.markBadConn(err) - } - - mc := stmt.mc - - // Read Result - resLen, err := mc.readResultSetHeaderPacket() - if err != nil { - return nil, err - } - - rows := new(binaryRows) - - if resLen > 0 { - rows.mc = mc - rows.rs.columns, err = mc.readColumns(resLen) - } else { - rows.rs.done = true - - switch err := rows.NextResultSet(); err { - case nil, io.EOF: - return rows, nil - default: - return nil, err - } - } - - return rows, err -} - -var jsonType = reflect.TypeOf(json.RawMessage{}) - -type converter struct{} - -// ConvertValue mirrors the reference/default converter in database/sql/driver -// with _one_ exception. We support uint64 with their high bit and the default -// implementation does not. This function should be kept in sync with -// database/sql/driver defaultConverter.ConvertValue() except for that -// deliberate difference. -func (c converter) ConvertValue(v interface{}) (driver.Value, error) { - if driver.IsValue(v) { - return v, nil - } - - if vr, ok := v.(driver.Valuer); ok { - sv, err := callValuerValue(vr) - if err != nil { - return nil, err - } - if driver.IsValue(sv) { - return sv, nil - } - // A value returend from the Valuer interface can be "a type handled by - // a database driver's NamedValueChecker interface" so we should accept - // uint64 here as well. - if u, ok := sv.(uint64); ok { - return u, nil - } - return nil, fmt.Errorf("non-Value type %T returned from Value", sv) - } - rv := reflect.ValueOf(v) - switch rv.Kind() { - case reflect.Ptr: - // indirect pointers - if rv.IsNil() { - return nil, nil - } else { - return c.ConvertValue(rv.Elem().Interface()) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return rv.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return rv.Uint(), nil - case reflect.Float32, reflect.Float64: - return rv.Float(), nil - case reflect.Bool: - return rv.Bool(), nil - case reflect.Slice: - switch t := rv.Type(); { - case t == jsonType: - return v, nil - case t.Elem().Kind() == reflect.Uint8: - return rv.Bytes(), nil - default: - return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind()) - } - case reflect.String: - return rv.String(), nil - } - return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) -} - -var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() - -// callValuerValue returns vr.Value(), with one exception: -// If vr.Value is an auto-generated method on a pointer type and the -// pointer is nil, it would panic at runtime in the panicwrap -// method. Treat it like nil instead. -// -// This is so people can implement driver.Value on value types and -// still use nil pointers to those types to mean nil/NULL, just like -// string/*string. -// -// This is an exact copy of the same-named unexported function from the -// database/sql package. -func callValuerValue(vr driver.Valuer) (v driver.Value, err error) { - if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr && - rv.IsNil() && - rv.Type().Elem().Implements(valuerReflectType) { - return nil, nil - } - return vr.Value() -} diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go deleted file mode 100644 index 417d72793b1..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/transaction.go +++ /dev/null @@ -1,31 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -type mysqlTx struct { - mc *mysqlConn -} - -func (tx *mysqlTx) Commit() (err error) { - if tx.mc == nil || tx.mc.closed.IsSet() { - return ErrInvalidConn - } - err = tx.mc.exec("COMMIT") - tx.mc = nil - return -} - -func (tx *mysqlTx) Rollback() (err error) { - if tx.mc == nil || tx.mc.closed.IsSet() { - return ErrInvalidConn - } - err = tx.mc.exec("ROLLBACK") - tx.mc = nil - return -} diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go deleted file mode 100644 index d6545f5be88..00000000000 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ /dev/null @@ -1,868 +0,0 @@ -// Go MySQL Driver - A MySQL-Driver for Go's database/sql package -// -// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. -// -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this file, -// You can obtain one at http://mozilla.org/MPL/2.0/. - -package mysql - -import ( - "crypto/tls" - "database/sql" - "database/sql/driver" - "encoding/binary" - "errors" - "fmt" - "io" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// Registry for custom tls.Configs -var ( - tlsConfigLock sync.RWMutex - tlsConfigRegistry map[string]*tls.Config -) - -// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. -// Use the key as a value in the DSN where tls=value. -// -// Note: The provided tls.Config is exclusively owned by the driver after -// registering it. -// -// rootCertPool := x509.NewCertPool() -// pem, err := ioutil.ReadFile("/path/ca-cert.pem") -// if err != nil { -// log.Fatal(err) -// } -// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { -// log.Fatal("Failed to append PEM.") -// } -// clientCert := make([]tls.Certificate, 0, 1) -// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") -// if err != nil { -// log.Fatal(err) -// } -// clientCert = append(clientCert, certs) -// mysql.RegisterTLSConfig("custom", &tls.Config{ -// RootCAs: rootCertPool, -// Certificates: clientCert, -// }) -// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") -// -func RegisterTLSConfig(key string, config *tls.Config) error { - if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" { - return fmt.Errorf("key '%s' is reserved", key) - } - - tlsConfigLock.Lock() - if tlsConfigRegistry == nil { - tlsConfigRegistry = make(map[string]*tls.Config) - } - - tlsConfigRegistry[key] = config - tlsConfigLock.Unlock() - return nil -} - -// DeregisterTLSConfig removes the tls.Config associated with key. -func DeregisterTLSConfig(key string) { - tlsConfigLock.Lock() - if tlsConfigRegistry != nil { - delete(tlsConfigRegistry, key) - } - tlsConfigLock.Unlock() -} - -func getTLSConfigClone(key string) (config *tls.Config) { - tlsConfigLock.RLock() - if v, ok := tlsConfigRegistry[key]; ok { - config = v.Clone() - } - tlsConfigLock.RUnlock() - return -} - -// Returns the bool value of the input. -// The 2nd return value indicates if the input was a valid bool value -func readBool(input string) (value bool, valid bool) { - switch input { - case "1", "true", "TRUE", "True": - return true, true - case "0", "false", "FALSE", "False": - return false, true - } - - // Not a valid bool value - return -} - -/****************************************************************************** -* Time related utils * -******************************************************************************/ - -func parseDateTime(b []byte, loc *time.Location) (time.Time, error) { - const base = "0000-00-00 00:00:00.000000" - switch len(b) { - case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" - if string(b) == base[:len(b)] { - return time.Time{}, nil - } - - year, err := parseByteYear(b) - if err != nil { - return time.Time{}, err - } - if year <= 0 { - year = 1 - } - - if b[4] != '-' { - return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4]) - } - - m, err := parseByte2Digits(b[5], b[6]) - if err != nil { - return time.Time{}, err - } - if m <= 0 { - m = 1 - } - month := time.Month(m) - - if b[7] != '-' { - return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7]) - } - - day, err := parseByte2Digits(b[8], b[9]) - if err != nil { - return time.Time{}, err - } - if day <= 0 { - day = 1 - } - if len(b) == 10 { - return time.Date(year, month, day, 0, 0, 0, 0, loc), nil - } - - if b[10] != ' ' { - return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10]) - } - - hour, err := parseByte2Digits(b[11], b[12]) - if err != nil { - return time.Time{}, err - } - if b[13] != ':' { - return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13]) - } - - min, err := parseByte2Digits(b[14], b[15]) - if err != nil { - return time.Time{}, err - } - if b[16] != ':' { - return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16]) - } - - sec, err := parseByte2Digits(b[17], b[18]) - if err != nil { - return time.Time{}, err - } - if len(b) == 19 { - return time.Date(year, month, day, hour, min, sec, 0, loc), nil - } - - if b[19] != '.' { - return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19]) - } - nsec, err := parseByteNanoSec(b[20:]) - if err != nil { - return time.Time{}, err - } - return time.Date(year, month, day, hour, min, sec, nsec, loc), nil - default: - return time.Time{}, fmt.Errorf("invalid time bytes: %s", b) - } -} - -func parseByteYear(b []byte) (int, error) { - year, n := 0, 1000 - for i := 0; i < 4; i++ { - v, err := bToi(b[i]) - if err != nil { - return 0, err - } - year += v * n - n = n / 10 - } - return year, nil -} - -func parseByte2Digits(b1, b2 byte) (int, error) { - d1, err := bToi(b1) - if err != nil { - return 0, err - } - d2, err := bToi(b2) - if err != nil { - return 0, err - } - return d1*10 + d2, nil -} - -func parseByteNanoSec(b []byte) (int, error) { - ns, digit := 0, 100000 // max is 6-digits - for i := 0; i < len(b); i++ { - v, err := bToi(b[i]) - if err != nil { - return 0, err - } - ns += v * digit - digit /= 10 - } - // nanoseconds has 10-digits. (needs to scale digits) - // 10 - 6 = 4, so we have to multiple 1000. - return ns * 1000, nil -} - -func bToi(b byte) (int, error) { - if b < '0' || b > '9' { - return 0, errors.New("not [0-9]") - } - return int(b - '0'), nil -} - -func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { - switch num { - case 0: - return time.Time{}, nil - case 4: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - 0, 0, 0, 0, - loc, - ), nil - case 7: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - 0, - loc, - ), nil - case 11: - return time.Date( - int(binary.LittleEndian.Uint16(data[:2])), // year - time.Month(data[2]), // month - int(data[3]), // day - int(data[4]), // hour - int(data[5]), // minutes - int(data[6]), // seconds - int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds - loc, - ), nil - } - return nil, fmt.Errorf("invalid DATETIME packet length %d", num) -} - -func appendDateTime(buf []byte, t time.Time) ([]byte, error) { - year, month, day := t.Date() - hour, min, sec := t.Clock() - nsec := t.Nanosecond() - - if year < 1 || year > 9999 { - return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap - } - year100 := year / 100 - year1 := year % 100 - - var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape - localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1] - localBuf[4] = '-' - localBuf[5], localBuf[6] = digits10[month], digits01[month] - localBuf[7] = '-' - localBuf[8], localBuf[9] = digits10[day], digits01[day] - - if hour == 0 && min == 0 && sec == 0 && nsec == 0 { - return append(buf, localBuf[:10]...), nil - } - - localBuf[10] = ' ' - localBuf[11], localBuf[12] = digits10[hour], digits01[hour] - localBuf[13] = ':' - localBuf[14], localBuf[15] = digits10[min], digits01[min] - localBuf[16] = ':' - localBuf[17], localBuf[18] = digits10[sec], digits01[sec] - - if nsec == 0 { - return append(buf, localBuf[:19]...), nil - } - nsec100000000 := nsec / 100000000 - nsec1000000 := (nsec / 1000000) % 100 - nsec10000 := (nsec / 10000) % 100 - nsec100 := (nsec / 100) % 100 - nsec1 := nsec % 100 - localBuf[19] = '.' - - // milli second - localBuf[20], localBuf[21], localBuf[22] = - digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000] - // micro second - localBuf[23], localBuf[24], localBuf[25] = - digits10[nsec10000], digits01[nsec10000], digits10[nsec100] - // nano second - localBuf[26], localBuf[27], localBuf[28] = - digits01[nsec100], digits10[nsec1], digits01[nsec1] - - // trim trailing zeros - n := len(localBuf) - for n > 0 && localBuf[n-1] == '0' { - n-- - } - - return append(buf, localBuf[:n]...), nil -} - -// zeroDateTime is used in formatBinaryDateTime to avoid an allocation -// if the DATE or DATETIME has the zero value. -// It must never be changed. -// The current behavior depends on database/sql copying the result. -var zeroDateTime = []byte("0000-00-00 00:00:00.000000") - -const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" -const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" - -func appendMicrosecs(dst, src []byte, decimals int) []byte { - if decimals <= 0 { - return dst - } - if len(src) == 0 { - return append(dst, ".000000"[:decimals+1]...) - } - - microsecs := binary.LittleEndian.Uint32(src[:4]) - p1 := byte(microsecs / 10000) - microsecs -= 10000 * uint32(p1) - p2 := byte(microsecs / 100) - microsecs -= 100 * uint32(p2) - p3 := byte(microsecs) - - switch decimals { - default: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], digits01[p3], - ) - case 1: - return append(dst, '.', - digits10[p1], - ) - case 2: - return append(dst, '.', - digits10[p1], digits01[p1], - ) - case 3: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], - ) - case 4: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - ) - case 5: - return append(dst, '.', - digits10[p1], digits01[p1], - digits10[p2], digits01[p2], - digits10[p3], - ) - } -} - -func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) { - // length expects the deterministic length of the zero value, - // negative time and 100+ hours are automatically added if needed - if len(src) == 0 { - return zeroDateTime[:length], nil - } - var dst []byte // return value - var p1, p2, p3 byte // current digit pair - - switch length { - case 10, 19, 21, 22, 23, 24, 25, 26: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s length %d", t, length) - } - switch len(src) { - case 4, 7, 11: - default: - t := "DATE" - if length > 10 { - t += "TIME" - } - return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) - } - dst = make([]byte, 0, length) - // start with the date - year := binary.LittleEndian.Uint16(src[:2]) - pt := year / 100 - p1 = byte(year - 100*uint16(pt)) - p2, p3 = src[2], src[3] - dst = append(dst, - digits10[pt], digits01[pt], - digits10[p1], digits01[p1], '-', - digits10[p2], digits01[p2], '-', - digits10[p3], digits01[p3], - ) - if length == 10 { - return dst, nil - } - if len(src) == 4 { - return append(dst, zeroDateTime[10:length]...), nil - } - dst = append(dst, ' ') - p1 = src[4] // hour - src = src[5:] - - // p1 is 2-digit hour, src is after hour - p2, p3 = src[0], src[1] - dst = append(dst, - digits10[p1], digits01[p1], ':', - digits10[p2], digits01[p2], ':', - digits10[p3], digits01[p3], - ) - return appendMicrosecs(dst, src[2:], int(length)-20), nil -} - -func formatBinaryTime(src []byte, length uint8) (driver.Value, error) { - // length expects the deterministic length of the zero value, - // negative time and 100+ hours are automatically added if needed - if len(src) == 0 { - return zeroDateTime[11 : 11+length], nil - } - var dst []byte // return value - - switch length { - case - 8, // time (can be up to 10 when negative and 100+ hours) - 10, 11, 12, 13, 14, 15: // time with fractional seconds - default: - return nil, fmt.Errorf("illegal TIME length %d", length) - } - switch len(src) { - case 8, 12: - default: - return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) - } - // +2 to enable negative time and 100+ hours - dst = make([]byte, 0, length+2) - if src[0] == 1 { - dst = append(dst, '-') - } - days := binary.LittleEndian.Uint32(src[1:5]) - hours := int64(days)*24 + int64(src[5]) - - if hours >= 100 { - dst = strconv.AppendInt(dst, hours, 10) - } else { - dst = append(dst, digits10[hours], digits01[hours]) - } - - min, sec := src[6], src[7] - dst = append(dst, ':', - digits10[min], digits01[min], ':', - digits10[sec], digits01[sec], - ) - return appendMicrosecs(dst, src[8:], int(length)-9), nil -} - -/****************************************************************************** -* Convert from and to bytes * -******************************************************************************/ - -func uint64ToBytes(n uint64) []byte { - return []byte{ - byte(n), - byte(n >> 8), - byte(n >> 16), - byte(n >> 24), - byte(n >> 32), - byte(n >> 40), - byte(n >> 48), - byte(n >> 56), - } -} - -func uint64ToString(n uint64) []byte { - var a [20]byte - i := 20 - - // U+0030 = 0 - // ... - // U+0039 = 9 - - var q uint64 - for n >= 10 { - i-- - q = n / 10 - a[i] = uint8(n-q*10) + 0x30 - n = q - } - - i-- - a[i] = uint8(n) + 0x30 - - return a[i:] -} - -// treats string value as unsigned integer representation -func stringToInt(b []byte) int { - val := 0 - for i := range b { - val *= 10 - val += int(b[i] - 0x30) - } - return val -} - -// returns the string read as a bytes slice, wheter the value is NULL, -// the number of bytes read and an error, in case the string is longer than -// the input slice -func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { - // Get length - num, isNull, n := readLengthEncodedInteger(b) - if num < 1 { - return b[n:n], isNull, n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return b[n-int(num) : n : n], false, n, nil - } - return nil, false, n, io.EOF -} - -// returns the number of bytes skipped and an error, in case the string is -// longer than the input slice -func skipLengthEncodedString(b []byte) (int, error) { - // Get length - num, _, n := readLengthEncodedInteger(b) - if num < 1 { - return n, nil - } - - n += int(num) - - // Check data length - if len(b) >= n { - return n, nil - } - return n, io.EOF -} - -// returns the number read, whether the value is NULL and the number of bytes read -func readLengthEncodedInteger(b []byte) (uint64, bool, int) { - // See issue #349 - if len(b) == 0 { - return 0, true, 1 - } - - switch b[0] { - // 251: NULL - case 0xfb: - return 0, true, 1 - - // 252: value of following 2 - case 0xfc: - return uint64(b[1]) | uint64(b[2])<<8, false, 3 - - // 253: value of following 3 - case 0xfd: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 - - // 254: value of following 8 - case 0xfe: - return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | - uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | - uint64(b[7])<<48 | uint64(b[8])<<56, - false, 9 - } - - // 0-250: value of first byte - return uint64(b[0]), false, 1 -} - -// encodes a uint64 value and appends it to the given bytes slice -func appendLengthEncodedInteger(b []byte, n uint64) []byte { - switch { - case n <= 250: - return append(b, byte(n)) - - case n <= 0xffff: - return append(b, 0xfc, byte(n), byte(n>>8)) - - case n <= 0xffffff: - return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) - } - return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), - byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) -} - -// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. -// If cap(buf) is not enough, reallocate new buffer. -func reserveBuffer(buf []byte, appendSize int) []byte { - newSize := len(buf) + appendSize - if cap(buf) < newSize { - // Grow buffer exponentially - newBuf := make([]byte, len(buf)*2+appendSize) - copy(newBuf, buf) - buf = newBuf - } - return buf[:newSize] -} - -// escapeBytesBackslash escapes []byte with backslashes (\) -// This escapes the contents of a string (provided as []byte) by adding backslashes before special -// characters, and turning others into specific escape sequences, such as -// turning newlines into \n and null bytes into \0. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 -func escapeBytesBackslash(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeStringBackslash is similar to escapeBytesBackslash but for string. -func escapeStringBackslash(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - switch c { - case '\x00': - buf[pos] = '\\' - buf[pos+1] = '0' - pos += 2 - case '\n': - buf[pos] = '\\' - buf[pos+1] = 'n' - pos += 2 - case '\r': - buf[pos] = '\\' - buf[pos+1] = 'r' - pos += 2 - case '\x1a': - buf[pos] = '\\' - buf[pos+1] = 'Z' - pos += 2 - case '\'': - buf[pos] = '\\' - buf[pos+1] = '\'' - pos += 2 - case '"': - buf[pos] = '\\' - buf[pos+1] = '"' - pos += 2 - case '\\': - buf[pos] = '\\' - buf[pos+1] = '\\' - pos += 2 - default: - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. -// This escapes the contents of a string by doubling up any apostrophes that -// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in -// effect on the server. -// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 -func escapeBytesQuotes(buf, v []byte) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for _, c := range v { - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -// escapeStringQuotes is similar to escapeBytesQuotes but for string. -func escapeStringQuotes(buf []byte, v string) []byte { - pos := len(buf) - buf = reserveBuffer(buf, len(v)*2) - - for i := 0; i < len(v); i++ { - c := v[i] - if c == '\'' { - buf[pos] = '\'' - buf[pos+1] = '\'' - pos += 2 - } else { - buf[pos] = c - pos++ - } - } - - return buf[:pos] -} - -/****************************************************************************** -* Sync utils * -******************************************************************************/ - -// noCopy may be embedded into structs which must not be copied -// after the first use. -// -// See https://github.com/golang/go/issues/8005#issuecomment-190753527 -// for details. -type noCopy struct{} - -// Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} - -// atomicBool is a wrapper around uint32 for usage as a boolean value with -// atomic access. -type atomicBool struct { - _noCopy noCopy - value uint32 -} - -// IsSet returns whether the current boolean value is true -func (ab *atomicBool) IsSet() bool { - return atomic.LoadUint32(&ab.value) > 0 -} - -// Set sets the value of the bool regardless of the previous value -func (ab *atomicBool) Set(value bool) { - if value { - atomic.StoreUint32(&ab.value, 1) - } else { - atomic.StoreUint32(&ab.value, 0) - } -} - -// TrySet sets the value of the bool and returns whether the value changed -func (ab *atomicBool) TrySet(value bool) bool { - if value { - return atomic.SwapUint32(&ab.value, 1) == 0 - } - return atomic.SwapUint32(&ab.value, 0) > 0 -} - -// atomicError is a wrapper for atomically accessed error values -type atomicError struct { - _noCopy noCopy - value atomic.Value -} - -// Set sets the error value regardless of the previous value. -// The value must not be nil -func (ae *atomicError) Set(value error) { - ae.value.Store(value) -} - -// Value returns the current error value -func (ae *atomicError) Value() error { - if v := ae.value.Load(); v != nil { - // this will panic if the value doesn't implement the error interface - return v.(error) - } - return nil -} - -func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { - dargs := make([]driver.Value, len(named)) - for n, param := range named { - if len(param.Name) > 0 { - // TODO: support the use of Named Parameters #561 - return nil, errors.New("mysql: driver does not support the use of Named Parameters") - } - dargs[n] = param.Value - } - return dargs, nil -} - -func mapIsolationLevel(level driver.IsolationLevel) (string, error) { - switch sql.IsolationLevel(level) { - case sql.LevelRepeatableRead: - return "REPEATABLE READ", nil - case sql.LevelReadCommitted: - return "READ COMMITTED", nil - case sql.LevelReadUncommitted: - return "READ UNCOMMITTED", nil - case sql.LevelSerializable: - return "SERIALIZABLE", nil - default: - return "", fmt.Errorf("mysql: unsupported isolation level: %v", level) - } -} diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index 2bd78667afb..7ec5ac7ea90 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,36 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + ## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 5566888726d..a502fdc515a 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -11,7 +11,7 @@ please explain why in the pull request description. ### Releasing -Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits that would precipitate a SemVer change, as described in the Conventional Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) to create a release candidate pull request. Once submitted, `release-please` will create a release. diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec27..dc60082d3b3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc87..c351129279f 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a56138cc4bd..5232b486780 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -182,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { @@ -294,3 +351,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 00000000000..339a959a7a2 --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 00000000000..3167b643d45 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml deleted file mode 100644 index 769849071ed..00000000000 --- a/vendor/github.com/hashicorp/go-uuid/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -sudo: false - -go: - - 1.4 - - 1.5 - - 1.6 - - tip - -script: - - go test -bench . -benchmem -v ./... diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE deleted file mode 100644 index e87a115e462..00000000000 --- a/vendor/github.com/hashicorp/go-uuid/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md deleted file mode 100644 index fbde8b9aef6..00000000000 --- a/vendor/github.com/hashicorp/go-uuid/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) - -Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid). diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go deleted file mode 100644 index 0c10c4e9f5f..00000000000 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ /dev/null @@ -1,83 +0,0 @@ -package uuid - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "io" -) - -// GenerateRandomBytes is used to generate random bytes of given size. -func GenerateRandomBytes(size int) ([]byte, error) { - return GenerateRandomBytesWithReader(size, rand.Reader) -} - -// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader. -func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) { - if reader == nil { - return nil, fmt.Errorf("provided reader is nil") - } - buf := make([]byte, size) - if _, err := io.ReadFull(reader, buf); err != nil { - return nil, fmt.Errorf("failed to read random bytes: %v", err) - } - return buf, nil -} - - -const uuidLen = 16 - -// GenerateUUID is used to generate a random UUID -func GenerateUUID() (string, error) { - return GenerateUUIDWithReader(rand.Reader) -} - -// GenerateUUIDWithReader is used to generate a random UUID with a given Reader -func GenerateUUIDWithReader(reader io.Reader) (string, error) { - if reader == nil { - return "", fmt.Errorf("provided reader is nil") - } - buf, err := GenerateRandomBytesWithReader(uuidLen, reader) - if err != nil { - return "", err - } - return FormatUUID(buf) -} - -func FormatUUID(buf []byte) (string, error) { - if buflen := len(buf); buflen != uuidLen { - return "", fmt.Errorf("wrong length byte slice (%d)", buflen) - } - - return fmt.Sprintf("%x-%x-%x-%x-%x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]), nil -} - -func ParseUUID(uuid string) ([]byte, error) { - if len(uuid) != 2 * uuidLen + 4 { - return nil, fmt.Errorf("uuid string is wrong length") - } - - if uuid[8] != '-' || - uuid[13] != '-' || - uuid[18] != '-' || - uuid[23] != '-' { - return nil, fmt.Errorf("uuid is improperly formatted") - } - - hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36] - - ret, err := hex.DecodeString(hexStr) - if err != nil { - return nil, err - } - if len(ret) != uuidLen { - return nil, fmt.Errorf("decoded hex is the wrong length") - } - - return ret, nil -} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore deleted file mode 100644 index daf913b1b34..00000000000 --- a/vendor/github.com/huandu/xstrings/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md deleted file mode 100644 index d7b4b8d584b..00000000000 --- a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ -# Contributing # - -Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. - -## New API or feature ## - -I want to speak more about how to add new functions to this package. - -Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. - -* Rule 1: Only string algorithm, which takes string as input, can be included. -* Rule 2: If a function has been implemented in package `string`, it must not be included. -* Rule 3: If a function is not language neutral, it must not be included. -* Rule 4: If a function is a part of standard library in other languages, it can be included. -* Rule 5: If a function is quite useful in some famous framework or library, it can be included. - -New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. - -## Pull request ## - -Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. - -If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE deleted file mode 100644 index 27017725936..00000000000 --- a/vendor/github.com/huandu/xstrings/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Huan Du - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md deleted file mode 100644 index 750c3c7eb69..00000000000 --- a/vendor/github.com/huandu/xstrings/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# xstrings - -[![Build Status](https://github.com/huandu/xstrings/workflows/Go/badge.svg)](https://github.com/huandu/xstrings/actions) -[![Go Doc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://pkg.go.dev/github.com/huandu/xstrings) -[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) -[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) - -Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). - -All functions are well tested and carefully tuned for performance. - -## Propose a new function - -Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. - -## Install - -Use `go get` to install this library. - - go get github.com/huandu/xstrings - -## API document - -See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. - -## Function list - -Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. - -Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. - -### Package `xstrings` functions - -_Keep this table sorted by Function in ascending order._ - -| Function | Friends | # | -| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------------- | -| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | -| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | -| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | -| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | -| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | -| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | -| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | -| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | -| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | -| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | -| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | -| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | -| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | -| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | -| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | -| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | -| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | -| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | -| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | -| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | -| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | -| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | -| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | -| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | -| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | - -### Package `strings` functions - -_Keep this table sorted by Function in ascending order._ - -| Function | Friends | -| --------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | -| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | -| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | -| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | -| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | -| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | -| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | -| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | -| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | -| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | -| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | -| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | -| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | -| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | -| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | -| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | -| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | -| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | -| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | -| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | -| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | -| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | -| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | -| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | -| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | -| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | -| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | -| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | -| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | -| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | -| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | -| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | -| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | -| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | -| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | -| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | -| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | -| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | -| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | - -## License - -This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go deleted file mode 100644 index f427cc84e2e..00000000000 --- a/vendor/github.com/huandu/xstrings/common.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -const bufferMaxInitGrowSize = 2048 - -// Lazy initialize a buffer. -func allocBuffer(orig, cur string) *stringBuilder { - output := &stringBuilder{} - maxSize := len(orig) * 4 - - // Avoid to reserve too much memory at once. - if maxSize > bufferMaxInitGrowSize { - maxSize = bufferMaxInitGrowSize - } - - output.Grow(maxSize) - output.WriteString(orig[:len(orig)-len(cur)]) - return output -} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go deleted file mode 100644 index cba0d072520..00000000000 --- a/vendor/github.com/huandu/xstrings/convert.go +++ /dev/null @@ -1,593 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "math/rand" - "unicode" - "unicode/utf8" -) - -// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. -// -// Some samples. -// -// "some_words" => "SomeWords" -// "http_server" => "HttpServer" -// "no_https" => "NoHttps" -// "_complex__case_" => "_Complex_Case_" -// "some words" => "SomeWords" -func ToCamelCase(str string) string { - if len(str) == 0 { - return "" - } - - buf := &stringBuilder{} - var r0, r1 rune - var size int - - // leading connector will appear in output. - for len(str) > 0 { - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if !isConnector(r0) { - r0 = unicode.ToUpper(r0) - break - } - - buf.WriteRune(r0) - } - - if len(str) == 0 { - // A special case for a string contains only 1 rune. - if size != 0 { - buf.WriteRune(r0) - } - - return buf.String() - } - - for len(str) > 0 { - r1 = r0 - r0, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if isConnector(r0) && isConnector(r1) { - buf.WriteRune(r1) - continue - } - - if isConnector(r1) { - r0 = unicode.ToUpper(r0) - } else { - buf.WriteRune(r1) - } - } - - buf.WriteRune(r0) - return buf.String() -} - -// ToSnakeCase can convert all upper case characters in a string to -// snake case format. -// -// Some samples. -// -// "FirstName" => "first_name" -// "HTTPServer" => "http_server" -// "NoHTTPS" => "no_https" -// "GO_PATH" => "go_path" -// "GO PATH" => "go_path" // space is converted to underscore. -// "GO-PATH" => "go_path" // hyphen is converted to underscore. -// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. -// "HTTP20xOK" => "http_20x_ok" -// "Duration2m3s" => "duration_2m3s" -// "Bld4Floor3rd" => "bld4_floor_3rd" -func ToSnakeCase(str string) string { - return camelCaseToLowerCase(str, '_') -} - -// ToKebabCase can convert all upper case characters in a string to -// kebab case format. -// -// Some samples. -// -// "FirstName" => "first-name" -// "HTTPServer" => "http-server" -// "NoHTTPS" => "no-https" -// "GO_PATH" => "go-path" -// "GO PATH" => "go-path" // space is converted to '-'. -// "GO-PATH" => "go-path" // hyphen is converted to '-'. -// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. -// "HTTP20xOK" => "http-20x-ok" -// "Duration2m3s" => "duration-2m3s" -// "Bld4Floor3rd" => "bld4-floor-3rd" -func ToKebabCase(str string) string { - return camelCaseToLowerCase(str, '-') -} - -func camelCaseToLowerCase(str string, connector rune) string { - if len(str) == 0 { - return "" - } - - buf := &stringBuilder{} - wt, word, remaining := nextWord(str) - - for len(remaining) > 0 { - if wt != connectorWord { - toLower(buf, wt, word, connector) - } - - prev := wt - last := word - wt, word, remaining = nextWord(remaining) - - switch prev { - case numberWord: - for wt == alphabetWord || wt == numberWord { - toLower(buf, wt, word, connector) - wt, word, remaining = nextWord(remaining) - } - - if wt != invalidWord && wt != punctWord && wt != connectorWord { - buf.WriteRune(connector) - } - - case connectorWord: - toLower(buf, prev, last, connector) - - case punctWord: - // nothing. - - default: - if wt != numberWord { - if wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - - break - } - - if len(remaining) == 0 { - break - } - - last := word - wt, word, remaining = nextWord(remaining) - - // consider number as a part of previous word. - // e.g. "Bld4Floor" => "bld4_floor" - if wt != alphabetWord { - toLower(buf, numberWord, last, connector) - - if wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - - break - } - - // if there are some lower case letters following a number, - // add connector before the number. - // e.g. "HTTP2xx" => "http_2xx" - buf.WriteRune(connector) - toLower(buf, numberWord, last, connector) - - for wt == alphabetWord || wt == numberWord { - toLower(buf, wt, word, connector) - wt, word, remaining = nextWord(remaining) - } - - if wt != invalidWord && wt != connectorWord && wt != punctWord { - buf.WriteRune(connector) - } - } - } - - toLower(buf, wt, word, connector) - return buf.String() -} - -func isConnector(r rune) bool { - return r == '-' || r == '_' || unicode.IsSpace(r) -} - -type wordType int - -const ( - invalidWord wordType = iota - numberWord - upperCaseWord - alphabetWord - connectorWord - punctWord - otherWord -) - -func nextWord(str string) (wt wordType, word, remaining string) { - if len(str) == 0 { - return - } - - var offset int - remaining = str - r, size := nextValidRune(remaining, utf8.RuneError) - offset += size - - if r == utf8.RuneError { - wt = invalidWord - word = str[:offset] - remaining = str[offset:] - return - } - - switch { - case isConnector(r): - wt = connectorWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isConnector(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsPunct(r): - wt = punctWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsPunct(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsUpper(r): - wt = upperCaseWord - remaining = remaining[size:] - - if len(remaining) == 0 { - break - } - - r, size = nextValidRune(remaining, r) - - switch { - case unicode.IsUpper(r): - prevSize := size - offset += size - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsUpper(r) { - break - } - - prevSize = size - offset += size - remaining = remaining[size:] - } - - // it's a bit complex when dealing with a case like "HTTPStatus". - // it's expected to be splitted into "HTTP" and "Status". - // Therefore "S" should be in remaining instead of word. - if len(remaining) > 0 && isAlphabet(r) { - offset -= prevSize - remaining = str[offset:] - } - - case isAlphabet(r): - offset += size - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isAlphabet(r) || unicode.IsUpper(r) { - break - } - - offset += size - remaining = remaining[size:] - } - } - - case isAlphabet(r): - wt = alphabetWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !isAlphabet(r) || unicode.IsUpper(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - case unicode.IsNumber(r): - wt = numberWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if !unicode.IsNumber(r) { - break - } - - offset += size - remaining = remaining[size:] - } - - default: - wt = otherWord - remaining = remaining[size:] - - for len(remaining) > 0 { - r, size = nextValidRune(remaining, r) - - if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { - break - } - - offset += size - remaining = remaining[size:] - } - } - - word = str[:offset] - return -} - -func nextValidRune(str string, prev rune) (r rune, size int) { - var sz int - - for len(str) > 0 { - r, sz = utf8.DecodeRuneInString(str) - size += sz - - if r != utf8.RuneError { - return - } - - str = str[sz:] - } - - r = prev - return -} - -func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { - buf.Grow(buf.Len() + len(str)) - - if wt != upperCaseWord && wt != connectorWord { - buf.WriteString(str) - return - } - - for len(str) > 0 { - r, size := utf8.DecodeRuneInString(str) - str = str[size:] - - if isConnector(r) { - buf.WriteRune(connector) - } else if unicode.IsUpper(r) { - buf.WriteRune(unicode.ToLower(r)) - } else { - buf.WriteRune(r) - } - } -} - -// SwapCase will swap characters case from upper to lower or lower to upper. -func SwapCase(str string) string { - var r rune - var size int - - buf := &stringBuilder{} - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case unicode.IsUpper(r): - buf.WriteRune(unicode.ToLower(r)) - - case unicode.IsLower(r): - buf.WriteRune(unicode.ToUpper(r)) - - default: - buf.WriteRune(r) - } - - str = str[size:] - } - - return buf.String() -} - -// FirstRuneToUpper converts first rune to upper case if necessary. -func FirstRuneToUpper(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsLower(r) { - return str - } - - buf := &stringBuilder{} - buf.WriteRune(unicode.ToUpper(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// FirstRuneToLower converts first rune to lower case if necessary. -func FirstRuneToLower(str string) string { - if str == "" { - return str - } - - r, size := utf8.DecodeRuneInString(str) - - if !unicode.IsUpper(r) { - return str - } - - buf := &stringBuilder{} - buf.WriteRune(unicode.ToLower(r)) - buf.WriteString(str[size:]) - return buf.String() -} - -// Shuffle randomizes runes in a string and returns the result. -// It uses default random source in `math/rand`. -func Shuffle(str string) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - - for i := len(runes) - 1; i > 0; i-- { - index = rand.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// ShuffleSource randomizes runes in a string with given random source. -func ShuffleSource(str string, src rand.Source) string { - if str == "" { - return str - } - - runes := []rune(str) - index := 0 - r := rand.New(src) - - for i := len(runes) - 1; i > 0; i-- { - index = r.Intn(i + 1) - - if i != index { - runes[i], runes[index] = runes[index], runes[i] - } - } - - return string(runes) -} - -// Successor returns the successor to string. -// -// If there is one alphanumeric rune is found in string, increase the rune by 1. -// If increment generates a "carry", the rune to the left of it is incremented. -// This process repeats until there is no carry, adding an additional rune if necessary. -// -// If there is no alphanumeric rune, the rightmost rune will be increased by 1 -// regardless whether the result is a valid rune or not. -// -// Only following characters are alphanumeric. -// - a - z -// - A - Z -// - 0 - 9 -// -// Samples (borrowed from ruby's String#succ document): -// -// "abcd" => "abce" -// "THX1138" => "THX1139" -// "<>" => "<>" -// "1999zzz" => "2000aaa" -// "ZZZ9999" => "AAAA0000" -// "***" => "**+" -func Successor(str string) string { - if str == "" { - return str - } - - var r rune - var i int - carry := ' ' - runes := []rune(str) - l := len(runes) - lastAlphanumeric := l - - for i = l - 1; i >= 0; i-- { - r = runes[i] - - if ('a' <= r && r <= 'y') || - ('A' <= r && r <= 'Y') || - ('0' <= r && r <= '8') { - runes[i]++ - carry = ' ' - lastAlphanumeric = i - break - } - - switch r { - case 'z': - runes[i] = 'a' - carry = 'a' - lastAlphanumeric = i - - case 'Z': - runes[i] = 'A' - carry = 'A' - lastAlphanumeric = i - - case '9': - runes[i] = '0' - carry = '0' - lastAlphanumeric = i - } - } - - // Needs to add one character for carry. - if i < 0 && carry != ' ' { - buf := &stringBuilder{} - buf.Grow(l + 4) // Reserve enough space for write. - - if lastAlphanumeric != 0 { - buf.WriteString(str[:lastAlphanumeric]) - } - - buf.WriteRune(carry) - - for _, r = range runes[lastAlphanumeric:] { - buf.WriteRune(r) - } - - return buf.String() - } - - // No alphanumeric character. Simply increase last rune's value. - if lastAlphanumeric == l { - runes[l-1]++ - } - - return string(runes) -} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go deleted file mode 100644 index f96e38703a3..00000000000 --- a/vendor/github.com/huandu/xstrings/count.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode" - "unicode/utf8" -) - -// Len returns str's utf8 rune length. -func Len(str string) int { - return utf8.RuneCountInString(str) -} - -// WordCount returns number of words in a string. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordCount(str string) int { - var r rune - var size, n int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - n++ - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - inWord = false - } - - str = str[size:] - } - - return n -} - -const minCJKCharacter = '\u3400' - -// Checks r is a letter but not CJK character. -func isAlphabet(r rune) bool { - if !unicode.IsLetter(r) { - return false - } - - switch { - // Quick check for non-CJK character. - case r < minCJKCharacter: - return true - - // Common CJK characters. - case r >= '\u4E00' && r <= '\u9FCC': - return false - - // Rare CJK characters. - case r >= '\u3400' && r <= '\u4D85': - return false - - // Rare and historic CJK characters. - case r >= '\U00020000' && r <= '\U0002B81D': - return false - } - - return true -} - -// Width returns string width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func Width(str string) int { - var r rune - var size, n int - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - n += RuneWidth(r) - str = str[size:] - } - - return n -} - -// RuneWidth returns character width in monotype font. -// Multi-byte characters are usually twice the width of single byte characters. -// -// Algorithm comes from `mb_strwidth` in PHP. -// http://php.net/manual/en/function.mb-strwidth.php -func RuneWidth(r rune) int { - switch { - case r == utf8.RuneError || r < '\x20': - return 0 - - case '\x20' <= r && r < '\u2000': - return 1 - - case '\u2000' <= r && r < '\uFF61': - return 2 - - case '\uFF61' <= r && r < '\uFFA0': - return 1 - - case '\uFFA0' <= r: - return 2 - } - - return 0 -} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go deleted file mode 100644 index 1a6ef069f61..00000000000 --- a/vendor/github.com/huandu/xstrings/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. -// See project home page for details. https://github.com/huandu/xstrings -// -// Package xstrings assumes all strings are encoded in utf8. -package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go deleted file mode 100644 index b32219bbd58..00000000000 --- a/vendor/github.com/huandu/xstrings/format.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode/utf8" -) - -// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on -// current column and tabSize. -// The column number is reset to zero after each newline ('\n') occurring in the str. -// -// ExpandTabs uses RuneWidth to decide rune's width. -// For example, CJK characters will be treated as two characters. -// -// If tabSize <= 0, ExpandTabs panics with error. -// -// Samples: -// -// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" -// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" -// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" -func ExpandTabs(str string, tabSize int) string { - if tabSize <= 0 { - panic("tab size must be positive") - } - - var r rune - var i, size, column, expand int - var output *stringBuilder - - orig := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == '\t' { - expand = tabSize - column%tabSize - - if output == nil { - output = allocBuffer(orig, str) - } - - for i = 0; i < expand; i++ { - output.WriteRune(' ') - } - - column += expand - } else { - if r == '\n' { - column = 0 - } else { - column += RuneWidth(r) - } - - if output != nil { - output.WriteRune(r) - } - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} - -// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// -// LeftJustify("hello", 4, " ") => "hello" -// LeftJustify("hello", 10, " ") => "hello " -// LeftJustify("hello", 10, "123") => "hello12312" -func LeftJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - output.WriteString(str) - writePadString(output, pad, padLen, remains) - return output.String() -} - -// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// -// RightJustify("hello", 4, " ") => "hello" -// RightJustify("hello", 10, " ") => " hello" -// RightJustify("hello", 10, "123") => "12312hello" -func RightJustify(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains) - output.WriteString(str) - return output.String() -} - -// Center returns a string with pad string at both side if str's rune length is smaller than length. -// If str's rune length is larger than length, str itself will be returned. -// -// If pad is an empty string, str will be returned. -// -// Samples: -// -// Center("hello", 4, " ") => "hello" -// Center("hello", 10, " ") => " hello " -// Center("hello", 10, "123") => "12hello123" -func Center(str string, length int, pad string) string { - l := Len(str) - - if l >= length || pad == "" { - return str - } - - remains := length - l - padLen := Len(pad) - - output := &stringBuilder{} - output.Grow(len(str) + (remains/padLen+1)*len(pad)) - writePadString(output, pad, padLen, remains/2) - output.WriteString(str) - writePadString(output, pad, padLen, (remains+1)/2) - return output.String() -} - -func writePadString(output *stringBuilder, pad string, padLen, remains int) { - var r rune - var size int - - repeats := remains / padLen - - for i := 0; i < repeats; i++ { - output.WriteString(pad) - } - - remains = remains % padLen - - if remains != 0 { - for i := 0; i < remains; i++ { - r, size = utf8.DecodeRuneInString(pad) - output.WriteRune(r) - pad = pad[size:] - } - } -} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go deleted file mode 100644 index ab42fe0fec6..00000000000 --- a/vendor/github.com/huandu/xstrings/manipulate.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "strings" - "unicode/utf8" -) - -// Reverse a utf8 encoded string. -func Reverse(str string) string { - var size int - - tail := len(str) - buf := make([]byte, tail) - s := buf - - for len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - tail -= size - s = append(s[:tail], []byte(str[:size])...) - str = str[size:] - } - - return string(buf) -} - -// Slice a string by rune. -// -// Start must satisfy 0 <= start <= rune length. -// -// End can be positive, zero or negative. -// If end >= 0, start and end must satisfy start <= end <= rune length. -// If end < 0, it means slice to the end of string. -// -// Otherwise, Slice will panic as out of range. -func Slice(str string, start, end int) string { - var size, startPos, endPos int - - origin := str - - if start < 0 || end > len(str) || (end >= 0 && start > end) { - panic("out of range") - } - - if end >= 0 { - end -= start - } - - for start > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - start-- - startPos += size - str = str[size:] - } - - if end < 0 { - return origin[startPos:] - } - - endPos = startPos - - for end > 0 && len(str) > 0 { - _, size = utf8.DecodeRuneInString(str) - end-- - endPos += size - str = str[size:] - } - - if len(str) == 0 && (start > 0 || end > 0) { - panic("out of range") - } - - return origin[startPos:endPos] -} - -// Partition splits a string by sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", Partition returns -// -// "he", "l", "lo" -// -// If str doesn't contain sep, for example "hello" and "x", Partition returns -// -// "hello", "", "" -func Partition(str, sep string) (head, match, tail string) { - index := strings.Index(str, sep) - - if index == -1 { - head = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// LastPartition splits a string by last instance of sep into three parts. -// The return value is a slice of strings with head, match and tail. -// -// If str contains sep, for example "hello" and "l", LastPartition returns -// -// "hel", "l", "o" -// -// If str doesn't contain sep, for example "hello" and "x", LastPartition returns -// -// "", "", "hello" -func LastPartition(str, sep string) (head, match, tail string) { - index := strings.LastIndex(str, sep) - - if index == -1 { - tail = str - return - } - - head = str[:index] - match = str[index : index+len(sep)] - tail = str[index+len(sep):] - return -} - -// Insert src into dst at given rune index. -// Index is counted by runes instead of bytes. -// -// If index is out of range of dst, panic with out of range. -func Insert(dst, src string, index int) string { - return Slice(dst, 0, index) + src + Slice(dst, index, -1) -} - -// Scrub scrubs invalid utf8 bytes with repl string. -// Adjacent invalid bytes are replaced only once. -func Scrub(str, repl string) string { - var buf *stringBuilder - var r rune - var size, pos int - var hasError bool - - origin := str - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - if r == utf8.RuneError { - if !hasError { - if buf == nil { - buf = &stringBuilder{} - } - - buf.WriteString(origin[:pos]) - hasError = true - } - } else if hasError { - hasError = false - buf.WriteString(repl) - - origin = origin[pos:] - pos = 0 - } - - pos += size - str = str[size:] - } - - if buf != nil { - buf.WriteString(origin) - return buf.String() - } - - // No invalid byte. - return origin -} - -// WordSplit splits a string into words. Returns a slice of words. -// If there is no word in a string, return nil. -// -// Word is defined as a locale dependent string containing alphabetic characters, -// which may also contain but not start with `'` and `-` characters. -func WordSplit(str string) []string { - var word string - var words []string - var r rune - var size, pos int - - inWord := false - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - switch { - case isAlphabet(r): - if !inWord { - inWord = true - word = str - pos = 0 - } - - case inWord && (r == '\'' || r == '-'): - // Still in word. - - default: - if inWord { - inWord = false - words = append(words, word[:pos]) - } - } - - pos += size - str = str[size:] - } - - if inWord { - words = append(words, word[:pos]) - } - - return words -} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go deleted file mode 100644 index 06812fea07d..00000000000 --- a/vendor/github.com/huandu/xstrings/stringbuilder.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build go1.10 -// +build go1.10 - -package xstrings - -import "strings" - -type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go deleted file mode 100644 index ccaa5aedd33..00000000000 --- a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !go1.10 -// +build !go1.10 - -package xstrings - -import "bytes" - -type stringBuilder struct { - bytes.Buffer -} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go deleted file mode 100644 index 1fac6a00be3..00000000000 --- a/vendor/github.com/huandu/xstrings/translate.go +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright 2015 Huan Du. All rights reserved. -// Licensed under the MIT license that can be found in the LICENSE file. - -package xstrings - -import ( - "unicode" - "unicode/utf8" -) - -type runeRangeMap struct { - FromLo rune // Lower bound of range map. - FromHi rune // An inclusive higher bound of range map. - ToLo rune - ToHi rune -} - -type runeDict struct { - Dict [unicode.MaxASCII + 1]rune -} - -type runeMap map[rune]rune - -// Translator can translate string with pre-compiled from and to patterns. -// If a from/to pattern pair needs to be used more than once, it's recommended -// to create a Translator and reuse it. -type Translator struct { - quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. - runeMap runeMap // Rune map for translation. - ranges []*runeRangeMap // Ranges of runes. - mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. - reverted bool // If to pattern is empty, all matched characters will be deleted. - hasPattern bool -} - -// NewTranslator creates new Translator through a from/to pattern pair. -func NewTranslator(from, to string) *Translator { - tr := &Translator{} - - if from == "" { - return tr - } - - reverted := from[0] == '^' - deletion := len(to) == 0 - - if reverted { - from = from[1:] - } - - var fromStart, fromEnd, fromRangeStep rune - var toStart, toEnd, toRangeStep rune - var fromRangeSize, toRangeSize rune - var singleRunes []rune - - // Update the to rune range. - updateRange := func() { - // No more rune to read in the to rune pattern. - if toEnd == utf8.RuneError { - return - } - - if toRangeStep == 0 { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) - return - } - - // Current range is not empty. Consume 1 rune from start. - if toStart != toEnd { - toStart += toRangeStep - return - } - - // No more rune. Repeat the last rune. - if to == "" { - toEnd = utf8.RuneError - return - } - - // Both start and end are used. Read two more runes from the to pattern. - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - - if deletion { - toStart = utf8.RuneError - toEnd = utf8.RuneError - } else { - // If from pattern is reverted, only the last rune in the to pattern will be used. - if reverted { - var size int - - for len(to) > 0 { - toStart, size = utf8.DecodeRuneInString(to) - to = to[size:] - } - - toEnd = utf8.RuneError - } else { - to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) - } - } - - fromEnd = utf8.RuneError - - for len(from) > 0 { - from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) - - // fromStart is a single character. Just map it with a rune in the to pattern. - if fromRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - continue - } - - for toEnd != utf8.RuneError && fromStart != fromEnd { - // If mapped rune is a single character instead of a range, simply shift first - // rune in the range. - if toRangeStep == 0 { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - fromStart += fromRangeStep - continue - } - - fromRangeSize = (fromEnd - fromStart) * fromRangeStep - toRangeSize = (toEnd - toStart) * toRangeStep - - // Not enough runes in the to pattern. Need to read more. - if fromRangeSize > toRangeSize { - fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) - fromStart += fromRangeStep - updateRange() - - // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered - // as a single rune. - if fromStart == fromEnd { - singleRunes = tr.addRune(fromStart, toStart, singleRunes) - updateRange() - } - - continue - } - - fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) - updateRange() - break - } - - if fromStart == fromEnd { - fromEnd = utf8.RuneError - continue - } - - _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) - fromEnd = utf8.RuneError - } - - if fromEnd != utf8.RuneError { - tr.addRune(fromEnd, toStart, singleRunes) - } - - tr.reverted = reverted - tr.mappedRune = -1 - tr.hasPattern = true - - // Translate RuneError only if in deletion or reverted mode. - if deletion || reverted { - tr.mappedRune = toStart - } - - return tr -} - -func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { - if from <= unicode.MaxASCII { - if tr.quickDict == nil { - tr.quickDict = &runeDict{} - } - - tr.quickDict.Dict[from] = to - } else { - if tr.runeMap == nil { - tr.runeMap = make(runeMap) - } - - tr.runeMap[from] = to - } - - singleRunes = append(singleRunes, from) - return singleRunes -} - -func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { - var r rune - var rrm *runeRangeMap - - if fromLo < fromHi { - rrm = &runeRangeMap{ - FromLo: fromLo, - FromHi: fromHi, - ToLo: toLo, - ToHi: toHi, - } - } else { - rrm = &runeRangeMap{ - FromLo: fromHi, - FromHi: fromLo, - ToLo: toHi, - ToHi: toLo, - } - } - - // If there is any single rune conflicts with this rune range, clear single rune record. - for _, r = range singleRunes { - if rrm.FromLo <= r && r <= rrm.FromHi { - if r <= unicode.MaxASCII { - tr.quickDict.Dict[r] = 0 - } else { - delete(tr.runeMap, r) - } - } - } - - tr.ranges = append(tr.ranges, rrm) - return fromHi, toHi -} - -func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { - var r rune - var size int - - remaining = str - escaping := false - isRange := false - - for len(remaining) > 0 { - r, size = utf8.DecodeRuneInString(remaining) - remaining = remaining[size:] - - // Parse special characters. - if !escaping { - if r == '\\' { - escaping = true - continue - } - - if r == '-' { - // Ignore slash at beginning of string. - if last == utf8.RuneError { - continue - } - - start = last - isRange = true - continue - } - } - - escaping = false - - if last != utf8.RuneError { - // This is a range which start and end are the same. - // Considier it as a normal character. - if isRange && last == r { - isRange = false - continue - } - - start = last - end = r - - if isRange { - if start < end { - rangeStep = 1 - } else { - rangeStep = -1 - } - } - - return - } - - last = r - } - - start = last - end = utf8.RuneError - return -} - -// Translate str with a from/to pattern pair. -// -// See comment in Translate function for usage and samples. -func (tr *Translator) Translate(str string) string { - if !tr.hasPattern || str == "" { - return str - } - - var r rune - var size int - var needTr bool - - orig := str - - var output *stringBuilder - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - r, needTr = tr.TranslateRune(r) - - if needTr && output == nil { - output = allocBuffer(orig, str) - } - - if r != utf8.RuneError && output != nil { - output.WriteRune(r) - } - - str = str[size:] - } - - // No character is translated. - if output == nil { - return orig - } - - return output.String() -} - -// TranslateRune return translated rune and true if r matches the from pattern. -// If r doesn't match the pattern, original r is returned and translated is false. -func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { - switch { - case tr.quickDict != nil: - if r <= unicode.MaxASCII { - result = tr.quickDict.Dict[r] - - if result != 0 { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - } - - fallthrough - - case tr.runeMap != nil: - var ok bool - - if result, ok = tr.runeMap[r]; ok { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - } - - break - } - - fallthrough - - default: - var rrm *runeRangeMap - ranges := tr.ranges - - for i := len(ranges) - 1; i >= 0; i-- { - rrm = ranges[i] - - if rrm.FromLo <= r && r <= rrm.FromHi { - translated = true - - if tr.mappedRune >= 0 { - result = tr.mappedRune - break - } - - if rrm.ToLo < rrm.ToHi { - result = rrm.ToLo + r - rrm.FromLo - } else if rrm.ToLo > rrm.ToHi { - // ToHi can be smaller than ToLo if range is from higher to lower. - result = rrm.ToLo - r + rrm.FromLo - } else { - result = rrm.ToLo - } - - break - } - } - } - - if tr.reverted { - if !translated { - result = tr.mappedRune - } - - translated = !translated - } - - if !translated { - result = r - } - - return -} - -// HasPattern returns true if Translator has one pattern at least. -func (tr *Translator) HasPattern() bool { - return tr.hasPattern -} - -// Translate str with the characters defined in from replaced by characters defined in to. -// -// From and to are patterns representing a set of characters. Pattern is defined as following. -// -// Special characters: -// -// 1. '-' means a range of runes, e.g. -// "a-z" means all characters from 'a' to 'z' inclusive; -// "z-a" means all characters from 'z' to 'a' inclusive. -// 2. '^' as first character means a set of all runes excepted listed, e.g. -// "^a-z" means all characters except 'a' to 'z' inclusive. -// 3. '\' escapes special characters. -// -// Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. -// -// Translate will try to find a 1:1 mapping from from to to. -// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. -// -// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. -// -// If the to pattern is an empty string, Translate works exactly the same as Delete. -// -// Samples: -// -// Translate("hello", "aeiou", "12345") => "h2ll4" -// Translate("hello", "a-z", "A-Z") => "HELLO" -// Translate("hello", "z-a", "a-z") => "svool" -// Translate("hello", "aeiou", "*") => "h*ll*" -// Translate("hello", "^l", "*") => "**ll*" -// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" -func Translate(str, from, to string) string { - tr := NewTranslator(from, to) - return tr.Translate(str) -} - -// Delete runes in str matching the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// -// Delete("hello", "aeiou") => "hll" -// Delete("hello", "a-k") => "llo" -// Delete("hello", "^a-k") => "he" -func Delete(str, pattern string) string { - tr := NewTranslator(pattern, "") - return tr.Translate(str) -} - -// Count how many runes in str match the pattern. -// Pattern is defined in Translate function. -// -// Samples: -// -// Count("hello", "aeiou") => 3 -// Count("hello", "a-k") => 3 -// Count("hello", "^a-k") => 2 -func Count(str, pattern string) int { - if pattern == "" || str == "" { - return 0 - } - - var r rune - var size int - var matched bool - - tr := NewTranslator(pattern, "") - cnt := 0 - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - str = str[size:] - - if _, matched = tr.TranslateRune(r); matched { - cnt++ - } - } - - return cnt -} - -// Squeeze deletes adjacent repeated runes in str. -// If pattern is not empty, only runes matching the pattern will be squeezed. -// -// Samples: -// -// Squeeze("hello", "") => "helo" -// Squeeze("hello", "m-z") => "hello" -// Squeeze("hello world", " ") => "hello world" -func Squeeze(str, pattern string) string { - var last, r rune - var size int - var skipSqueeze, matched bool - var tr *Translator - var output *stringBuilder - - orig := str - last = -1 - - if len(pattern) > 0 { - tr = NewTranslator(pattern, "") - } - - for len(str) > 0 { - r, size = utf8.DecodeRuneInString(str) - - // Need to squeeze the str. - if last == r && !skipSqueeze { - if tr != nil { - if _, matched = tr.TranslateRune(r); !matched { - skipSqueeze = true - } - } - - if output == nil { - output = allocBuffer(orig, str) - } - - if skipSqueeze { - output.WriteRune(r) - } - } else { - if output != nil { - output.WriteRune(r) - } - - last = r - skipSqueeze = false - } - - str = str[size:] - } - - if output == nil { - return orig - } - - return output.String() -} diff --git a/vendor/github.com/jcmturner/gofork/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE deleted file mode 100644 index 6a66aea5eaf..00000000000 --- a/vendor/github.com/jcmturner/gofork/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md deleted file mode 100644 index 66a2a8cca71..00000000000 --- a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This is a temporary repository that will be removed when the issues below are fixed in the core golang code. - -## Issues -* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832) -* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834) \ No newline at end of file diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go deleted file mode 100644 index f1bb7671795..00000000000 --- a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go +++ /dev/null @@ -1,1003 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package asn1 implements parsing of DER-encoded ASN.1 data structures, -// as defined in ITU-T Rec X.690. -// -// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,'' -// http://luca.ntop.org/Teaching/Appunti/asn1.html. -package asn1 - -// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc -// are different encoding formats for those objects. Here, we'll be dealing -// with DER, the Distinguished Encoding Rules. DER is used in X.509 because -// it's fast to parse and, unlike BER, has a unique encoding for every object. -// When calculating hashes over objects, it's important that the resulting -// bytes be the same at both ends and DER removes this margin of error. -// -// ASN.1 is very complex and this package doesn't attempt to implement -// everything by any means. - -import ( - "errors" - "fmt" - "math/big" - "reflect" - "strconv" - "time" - "unicode/utf8" -) - -// A StructuralError suggests that the ASN.1 data is valid, but the Go type -// which is receiving it doesn't match. -type StructuralError struct { - Msg string -} - -func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg } - -// A SyntaxError suggests that the ASN.1 data is invalid. -type SyntaxError struct { - Msg string -} - -func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg } - -// We start by dealing with each of the primitive types in turn. - -// BOOLEAN - -func parseBool(bytes []byte) (ret bool, err error) { - if len(bytes) != 1 { - err = SyntaxError{"invalid boolean"} - return - } - - // DER demands that "If the encoding represents the boolean value TRUE, - // its single contents octet shall have all eight bits set to one." - // Thus only 0 and 255 are valid encoded values. - switch bytes[0] { - case 0: - ret = false - case 0xff: - ret = true - default: - err = SyntaxError{"invalid boolean"} - } - - return -} - -// INTEGER - -// checkInteger returns nil if the given bytes are a valid DER-encoded -// INTEGER and an error otherwise. -func checkInteger(bytes []byte) error { - if len(bytes) == 0 { - return StructuralError{"empty integer"} - } - if len(bytes) == 1 { - return nil - } - if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { - return StructuralError{"integer not minimally-encoded"} - } - return nil -} - -// parseInt64 treats the given bytes as a big-endian, signed integer and -// returns the result. -func parseInt64(bytes []byte) (ret int64, err error) { - err = checkInteger(bytes) - if err != nil { - return - } - if len(bytes) > 8 { - // We'll overflow an int64 in this case. - err = StructuralError{"integer too large"} - return - } - for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { - ret <<= 8 - ret |= int64(bytes[bytesRead]) - } - - // Shift up and down in order to sign extend the result. - ret <<= 64 - uint8(len(bytes))*8 - ret >>= 64 - uint8(len(bytes))*8 - return -} - -// parseInt treats the given bytes as a big-endian, signed integer and returns -// the result. -func parseInt32(bytes []byte) (int32, error) { - if err := checkInteger(bytes); err != nil { - return 0, err - } - ret64, err := parseInt64(bytes) - if err != nil { - return 0, err - } - if ret64 != int64(int32(ret64)) { - return 0, StructuralError{"integer too large"} - } - return int32(ret64), nil -} - -var bigOne = big.NewInt(1) - -// parseBigInt treats the given bytes as a big-endian, signed integer and returns -// the result. -func parseBigInt(bytes []byte) (*big.Int, error) { - if err := checkInteger(bytes); err != nil { - return nil, err - } - ret := new(big.Int) - if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { - // This is a negative number. - notBytes := make([]byte, len(bytes)) - for i := range notBytes { - notBytes[i] = ^bytes[i] - } - ret.SetBytes(notBytes) - ret.Add(ret, bigOne) - ret.Neg(ret) - return ret, nil - } - ret.SetBytes(bytes) - return ret, nil -} - -// BIT STRING - -// BitString is the structure to use when you want an ASN.1 BIT STRING type. A -// bit string is padded up to the nearest byte in memory and the number of -// valid bits is recorded. Padding bits will be zero. -type BitString struct { - Bytes []byte // bits packed into bytes. - BitLength int // length in bits. -} - -// At returns the bit at the given index. If the index is out of range it -// returns false. -func (b BitString) At(i int) int { - if i < 0 || i >= b.BitLength { - return 0 - } - x := i / 8 - y := 7 - uint(i%8) - return int(b.Bytes[x]>>y) & 1 -} - -// RightAlign returns a slice where the padding bits are at the beginning. The -// slice may share memory with the BitString. -func (b BitString) RightAlign() []byte { - shift := uint(8 - (b.BitLength % 8)) - if shift == 8 || len(b.Bytes) == 0 { - return b.Bytes - } - - a := make([]byte, len(b.Bytes)) - a[0] = b.Bytes[0] >> shift - for i := 1; i < len(b.Bytes); i++ { - a[i] = b.Bytes[i-1] << (8 - shift) - a[i] |= b.Bytes[i] >> shift - } - - return a -} - -// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. -func parseBitString(bytes []byte) (ret BitString, err error) { - if len(bytes) == 0 { - err = SyntaxError{"zero length BIT STRING"} - return - } - paddingBits := int(bytes[0]) - if paddingBits > 7 || - len(bytes) == 1 && paddingBits > 0 || - bytes[len(bytes)-1]&((1< 0 { - s += "." - } - s += strconv.Itoa(v) - } - - return s -} - -// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and -// returns it. An object identifier is a sequence of variable length integers -// that are assigned in a hierarchy. -func parseObjectIdentifier(bytes []byte) (s []int, err error) { - if len(bytes) == 0 { - err = SyntaxError{"zero length OBJECT IDENTIFIER"} - return - } - - // In the worst case, we get two elements from the first byte (which is - // encoded differently) and then every varint is a single byte long. - s = make([]int, len(bytes)+1) - - // The first varint is 40*value1 + value2: - // According to this packing, value1 can take the values 0, 1 and 2 only. - // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, - // then there are no restrictions on value2. - v, offset, err := parseBase128Int(bytes, 0) - if err != nil { - return - } - if v < 80 { - s[0] = v / 40 - s[1] = v % 40 - } else { - s[0] = 2 - s[1] = v - 80 - } - - i := 2 - for ; offset < len(bytes); i++ { - v, offset, err = parseBase128Int(bytes, offset) - if err != nil { - return - } - s[i] = v - } - s = s[0:i] - return -} - -// ENUMERATED - -// An Enumerated is represented as a plain int. -type Enumerated int - -// FLAG - -// A Flag accepts any data and is set to true if present. -type Flag bool - -// parseBase128Int parses a base-128 encoded int from the given offset in the -// given byte slice. It returns the value and the new offset. -func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) { - offset = initOffset - for shifted := 0; offset < len(bytes); shifted++ { - if shifted == 4 { - err = StructuralError{"base 128 integer too large"} - return - } - ret <<= 7 - b := bytes[offset] - ret |= int(b & 0x7f) - offset++ - if b&0x80 == 0 { - return - } - } - err = SyntaxError{"truncated base 128 integer"} - return -} - -// UTCTime - -func parseUTCTime(bytes []byte) (ret time.Time, err error) { - s := string(bytes) - - formatStr := "0601021504Z0700" - ret, err = time.Parse(formatStr, s) - if err != nil { - formatStr = "060102150405Z0700" - ret, err = time.Parse(formatStr, s) - } - if err != nil { - return - } - - if serialized := ret.Format(formatStr); serialized != s { - err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) - return - } - - if ret.Year() >= 2050 { - // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 - ret = ret.AddDate(-100, 0, 0) - } - - return -} - -// parseGeneralizedTime parses the GeneralizedTime from the given byte slice -// and returns the resulting time. -func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { - const formatStr = "20060102150405Z0700" - s := string(bytes) - - if ret, err = time.Parse(formatStr, s); err != nil { - return - } - - if serialized := ret.Format(formatStr); serialized != s { - err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) - } - - return -} - -// PrintableString - -// parsePrintableString parses a ASN.1 PrintableString from the given byte -// array and returns it. -func parsePrintableString(bytes []byte) (ret string, err error) { - for _, b := range bytes { - if !isPrintable(b) { - err = SyntaxError{"PrintableString contains invalid character"} - return - } - } - ret = string(bytes) - return -} - -// isPrintable reports whether the given b is in the ASN.1 PrintableString set. -func isPrintable(b byte) bool { - return 'a' <= b && b <= 'z' || - 'A' <= b && b <= 'Z' || - '0' <= b && b <= '9' || - '\'' <= b && b <= ')' || - '+' <= b && b <= '/' || - b == ' ' || - b == ':' || - b == '=' || - b == '?' || - // This is technically not allowed in a PrintableString. - // However, x509 certificates with wildcard strings don't - // always use the correct string type so we permit it. - b == '*' -} - -// IA5String - -// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given -// byte slice and returns it. -func parseIA5String(bytes []byte) (ret string, err error) { - for _, b := range bytes { - if b >= utf8.RuneSelf { - err = SyntaxError{"IA5String contains invalid character"} - return - } - } - ret = string(bytes) - return -} - -// T61String - -// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given -// byte slice and returns it. -func parseT61String(bytes []byte) (ret string, err error) { - return string(bytes), nil -} - -// UTF8String - -// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte -// array and returns it. -func parseUTF8String(bytes []byte) (ret string, err error) { - if !utf8.Valid(bytes) { - return "", errors.New("asn1: invalid UTF-8 string") - } - return string(bytes), nil -} - -// A RawValue represents an undecoded ASN.1 object. -type RawValue struct { - Class, Tag int - IsCompound bool - Bytes []byte - FullBytes []byte // includes the tag and length -} - -// RawContent is used to signal that the undecoded, DER data needs to be -// preserved for a struct. To use it, the first field of the struct must have -// this type. It's an error for any of the other fields to have this type. -type RawContent []byte - -// Tagging - -// parseTagAndLength parses an ASN.1 tag and length pair from the given offset -// into a byte slice. It returns the parsed data and the new offset. SET and -// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we -// don't distinguish between ordered and unordered objects in this code. -func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) { - offset = initOffset - // parseTagAndLength should not be called without at least a single - // byte to read. Thus this check is for robustness: - if offset >= len(bytes) { - err = errors.New("asn1: internal error in parseTagAndLength") - return - } - b := bytes[offset] - offset++ - ret.class = int(b >> 6) - ret.isCompound = b&0x20 == 0x20 - ret.tag = int(b & 0x1f) - - // If the bottom five bits are set, then the tag number is actually base 128 - // encoded afterwards - if ret.tag == 0x1f { - ret.tag, offset, err = parseBase128Int(bytes, offset) - if err != nil { - return - } - // Tags should be encoded in minimal form. - if ret.tag < 0x1f { - err = SyntaxError{"non-minimal tag"} - return - } - } - if offset >= len(bytes) { - err = SyntaxError{"truncated tag or length"} - return - } - b = bytes[offset] - offset++ - if b&0x80 == 0 { - // The length is encoded in the bottom 7 bits. - ret.length = int(b & 0x7f) - } else { - // Bottom 7 bits give the number of length bytes to follow. - numBytes := int(b & 0x7f) - if numBytes == 0 { - err = SyntaxError{"indefinite length found (not DER)"} - return - } - ret.length = 0 - for i := 0; i < numBytes; i++ { - if offset >= len(bytes) { - err = SyntaxError{"truncated tag or length"} - return - } - b = bytes[offset] - offset++ - if ret.length >= 1<<23 { - // We can't shift ret.length up without - // overflowing. - err = StructuralError{"length too large"} - return - } - ret.length <<= 8 - ret.length |= int(b) - if ret.length == 0 { - // DER requires that lengths be minimal. - err = StructuralError{"superfluous leading zeros in length"} - return - } - } - // Short lengths must be encoded in short form. - if ret.length < 0x80 { - err = StructuralError{"non-minimal length"} - return - } - } - - return -} - -// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse -// a number of ASN.1 values from the given byte slice and returns them as a -// slice of Go values of the given type. -func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) { - expectedTag, compoundType, ok := getUniversalType(elemType) - if !ok { - err = StructuralError{"unknown Go type for slice"} - return - } - - // First we iterate over the input and count the number of elements, - // checking that the types are correct in each case. - numElements := 0 - for offset := 0; offset < len(bytes); { - var t tagAndLength - t, offset, err = parseTagAndLength(bytes, offset) - if err != nil { - return - } - switch t.tag { - case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: - // We pretend that various other string types are - // PRINTABLE STRINGs so that a sequence of them can be - // parsed into a []string. - t.tag = TagPrintableString - case TagGeneralizedTime, TagUTCTime: - // Likewise, both time types are treated the same. - t.tag = TagUTCTime - } - - if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag { - err = StructuralError{"sequence tag mismatch"} - return - } - if invalidLength(offset, t.length, len(bytes)) { - err = SyntaxError{"truncated sequence"} - return - } - offset += t.length - numElements++ - } - ret = reflect.MakeSlice(sliceType, numElements, numElements) - params := fieldParameters{} - offset := 0 - for i := 0; i < numElements; i++ { - offset, err = parseField(ret.Index(i), bytes, offset, params) - if err != nil { - return - } - } - return -} - -var ( - bitStringType = reflect.TypeOf(BitString{}) - objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) - enumeratedType = reflect.TypeOf(Enumerated(0)) - flagType = reflect.TypeOf(Flag(false)) - timeType = reflect.TypeOf(time.Time{}) - rawValueType = reflect.TypeOf(RawValue{}) - rawContentsType = reflect.TypeOf(RawContent(nil)) - bigIntType = reflect.TypeOf(new(big.Int)) -) - -// invalidLength returns true iff offset + length > sliceLength, or if the -// addition would overflow. -func invalidLength(offset, length, sliceLength int) bool { - return offset+length < offset || offset+length > sliceLength -} - -// parseField is the main parsing function. Given a byte slice and an offset -// into the array, it will try to parse a suitable ASN.1 value out and store it -// in the given Value. -func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { - offset = initOffset - fieldType := v.Type() - - // If we have run out of data, it may be that there are optional elements at the end. - if offset == len(bytes) { - if !setDefaultValue(v, params) { - err = SyntaxError{"sequence truncated"} - } - return - } - - // Deal with raw values. - if fieldType == rawValueType { - var t tagAndLength - t, offset, err = parseTagAndLength(bytes, offset) - if err != nil { - return - } - if invalidLength(offset, t.length, len(bytes)) { - err = SyntaxError{"data truncated"} - return - } - result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]} - offset += t.length - v.Set(reflect.ValueOf(result)) - return - } - - // Deal with the ANY type. - if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { - var t tagAndLength - t, offset, err = parseTagAndLength(bytes, offset) - if err != nil { - return - } - if invalidLength(offset, t.length, len(bytes)) { - err = SyntaxError{"data truncated"} - return - } - var result interface{} - if !t.isCompound && t.class == ClassUniversal { - innerBytes := bytes[offset : offset+t.length] - switch t.tag { - case TagPrintableString: - result, err = parsePrintableString(innerBytes) - case TagIA5String: - result, err = parseIA5String(innerBytes) - // jtasn1 addition of following case - case TagGeneralString: - result, err = parseIA5String(innerBytes) - case TagT61String: - result, err = parseT61String(innerBytes) - case TagUTF8String: - result, err = parseUTF8String(innerBytes) - case TagInteger: - result, err = parseInt64(innerBytes) - case TagBitString: - result, err = parseBitString(innerBytes) - case TagOID: - result, err = parseObjectIdentifier(innerBytes) - case TagUTCTime: - result, err = parseUTCTime(innerBytes) - case TagGeneralizedTime: - result, err = parseGeneralizedTime(innerBytes) - case TagOctetString: - result = innerBytes - default: - // If we don't know how to handle the type, we just leave Value as nil. - } - } - offset += t.length - if err != nil { - return - } - if result != nil { - v.Set(reflect.ValueOf(result)) - } - return - } - universalTag, compoundType, ok1 := getUniversalType(fieldType) - if !ok1 { - err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)} - return - } - - t, offset, err := parseTagAndLength(bytes, offset) - if err != nil { - return - } - if params.explicit { - expectedClass := ClassContextSpecific - if params.application { - expectedClass = ClassApplication - } - if offset == len(bytes) { - err = StructuralError{"explicit tag has no child"} - return - } - if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { - if t.length > 0 { - t, offset, err = parseTagAndLength(bytes, offset) - if err != nil { - return - } - } else { - if fieldType != flagType { - err = StructuralError{"zero length explicit tag was not an asn1.Flag"} - return - } - v.SetBool(true) - return - } - } else { - // The tags didn't match, it might be an optional element. - ok := setDefaultValue(v, params) - if ok { - offset = initOffset - } else { - err = StructuralError{"explicitly tagged member didn't match"} - } - return - } - } - - // Special case for strings: all the ASN.1 string types map to the Go - // type string. getUniversalType returns the tag for PrintableString - // when it sees a string, so if we see a different string type on the - // wire, we change the universal type to match. - if universalTag == TagPrintableString { - if t.class == ClassUniversal { - switch t.tag { - case TagIA5String, TagGeneralString, TagT61String, TagUTF8String: - universalTag = t.tag - } - } else if params.stringType != 0 { - universalTag = params.stringType - } - } - - // Special case for time: UTCTime and GeneralizedTime both map to the - // Go type time.Time. - if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { - universalTag = TagGeneralizedTime - } - - if params.set { - universalTag = TagSet - } - - expectedClass := ClassUniversal - expectedTag := universalTag - - if !params.explicit && params.tag != nil { - expectedClass = ClassContextSpecific - expectedTag = *params.tag - } - - if !params.explicit && params.application && params.tag != nil { - expectedClass = ClassApplication - expectedTag = *params.tag - } - - // We have unwrapped any explicit tagging at this point. - if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType { - // Tags don't match. Again, it could be an optional element. - ok := setDefaultValue(v, params) - if ok { - offset = initOffset - } else { - err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)} - } - return - } - if invalidLength(offset, t.length, len(bytes)) { - err = SyntaxError{"data truncated"} - return - } - innerBytes := bytes[offset : offset+t.length] - offset += t.length - - // We deal with the structures defined in this package first. - switch fieldType { - case objectIdentifierType: - newSlice, err1 := parseObjectIdentifier(innerBytes) - v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) - if err1 == nil { - reflect.Copy(v, reflect.ValueOf(newSlice)) - } - err = err1 - return - case bitStringType: - bs, err1 := parseBitString(innerBytes) - if err1 == nil { - v.Set(reflect.ValueOf(bs)) - } - err = err1 - return - case timeType: - var time time.Time - var err1 error - if universalTag == TagUTCTime { - time, err1 = parseUTCTime(innerBytes) - } else { - time, err1 = parseGeneralizedTime(innerBytes) - } - if err1 == nil { - v.Set(reflect.ValueOf(time)) - } - err = err1 - return - case enumeratedType: - parsedInt, err1 := parseInt32(innerBytes) - if err1 == nil { - v.SetInt(int64(parsedInt)) - } - err = err1 - return - case flagType: - v.SetBool(true) - return - case bigIntType: - parsedInt, err1 := parseBigInt(innerBytes) - if err1 == nil { - v.Set(reflect.ValueOf(parsedInt)) - } - err = err1 - return - } - switch val := v; val.Kind() { - case reflect.Bool: - parsedBool, err1 := parseBool(innerBytes) - if err1 == nil { - val.SetBool(parsedBool) - } - err = err1 - return - case reflect.Int, reflect.Int32, reflect.Int64: - if val.Type().Size() == 4 { - parsedInt, err1 := parseInt32(innerBytes) - if err1 == nil { - val.SetInt(int64(parsedInt)) - } - err = err1 - } else { - parsedInt, err1 := parseInt64(innerBytes) - if err1 == nil { - val.SetInt(parsedInt) - } - err = err1 - } - return - // TODO(dfc) Add support for the remaining integer types - case reflect.Struct: - structType := fieldType - - if structType.NumField() > 0 && - structType.Field(0).Type == rawContentsType { - bytes := bytes[initOffset:offset] - val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) - } - - innerOffset := 0 - for i := 0; i < structType.NumField(); i++ { - field := structType.Field(i) - if i == 0 && field.Type == rawContentsType { - continue - } - innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1"))) - if err != nil { - return - } - } - // We allow extra bytes at the end of the SEQUENCE because - // adding elements to the end has been used in X.509 as the - // version numbers have increased. - return - case reflect.Slice: - sliceType := fieldType - if sliceType.Elem().Kind() == reflect.Uint8 { - val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) - reflect.Copy(val, reflect.ValueOf(innerBytes)) - return - } - newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem()) - if err1 == nil { - val.Set(newSlice) - } - err = err1 - return - case reflect.String: - var v string - switch universalTag { - case TagPrintableString: - v, err = parsePrintableString(innerBytes) - case TagIA5String: - v, err = parseIA5String(innerBytes) - case TagT61String: - v, err = parseT61String(innerBytes) - case TagUTF8String: - v, err = parseUTF8String(innerBytes) - case TagGeneralString: - // GeneralString is specified in ISO-2022/ECMA-35, - // A brief review suggests that it includes structures - // that allow the encoding to change midstring and - // such. We give up and pass it as an 8-bit string. - v, err = parseT61String(innerBytes) - default: - err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)} - } - if err == nil { - val.SetString(v) - } - return - } - err = StructuralError{"unsupported: " + v.Type().String()} - return -} - -// canHaveDefaultValue reports whether k is a Kind that we will set a default -// value for. (A signed integer, essentially.) -func canHaveDefaultValue(k reflect.Kind) bool { - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - } - - return false -} - -// setDefaultValue is used to install a default value, from a tag string, into -// a Value. It is successful if the field was optional, even if a default value -// wasn't provided or it failed to install it into the Value. -func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { - if !params.optional { - return - } - ok = true - if params.defaultValue == nil { - return - } - if canHaveDefaultValue(v.Kind()) { - v.SetInt(*params.defaultValue) - } - return -} - -// Unmarshal parses the DER-encoded ASN.1 data structure b -// and uses the reflect package to fill in an arbitrary value pointed at by val. -// Because Unmarshal uses the reflect package, the structs -// being written to must use upper case field names. -// -// An ASN.1 INTEGER can be written to an int, int32, int64, -// or *big.Int (from the math/big package). -// If the encoded value does not fit in the Go type, -// Unmarshal returns a parse error. -// -// An ASN.1 BIT STRING can be written to a BitString. -// -// An ASN.1 OCTET STRING can be written to a []byte. -// -// An ASN.1 OBJECT IDENTIFIER can be written to an -// ObjectIdentifier. -// -// An ASN.1 ENUMERATED can be written to an Enumerated. -// -// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. -// -// An ASN.1 PrintableString or IA5String can be written to a string. -// -// Any of the above ASN.1 values can be written to an interface{}. -// The value stored in the interface has the corresponding Go type. -// For integers, that type is int64. -// -// An ASN.1 SEQUENCE OF x or SET OF x can be written -// to a slice if an x can be written to the slice's element type. -// -// An ASN.1 SEQUENCE or SET can be written to a struct -// if each of the elements in the sequence can be -// written to the corresponding element in the struct. -// -// The following tags on struct fields have special meaning to Unmarshal: -// -// application specifies that a APPLICATION tag is used -// default:x sets the default value for optional integer fields -// explicit specifies that an additional, explicit tag wraps the implicit one -// optional marks the field as ASN.1 OPTIONAL -// set causes a SET, rather than a SEQUENCE type to be expected -// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC -// -// If the type of the first field of a structure is RawContent then the raw -// ASN1 contents of the struct will be stored in it. -// -// If the type name of a slice element ends with "SET" then it's treated as if -// the "set" tag was set on it. This can be used with nested slices where a -// struct tag cannot be given. -// -// Other ASN.1 types are not supported; if it encounters them, -// Unmarshal returns a parse error. -func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { - return UnmarshalWithParams(b, val, "") -} - -// UnmarshalWithParams allows field parameters to be specified for the -// top-level element. The form of the params is the same as the field tags. -func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { - v := reflect.ValueOf(val).Elem() - offset, err := parseField(v, b, 0, parseFieldParameters(params)) - if err != nil { - return nil, err - } - return b[offset:], nil -} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go deleted file mode 100644 index 7a9da49f396..00000000000 --- a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asn1 - -import ( - "reflect" - "strconv" - "strings" -) - -// ASN.1 objects have metadata preceding them: -// the tag: the type of the object -// a flag denoting if this object is compound or not -// the class type: the namespace of the tag -// the length of the object, in bytes - -// Here are some standard tags and classes - -// ASN.1 tags represent the type of the following object. -const ( - TagBoolean = 1 - TagInteger = 2 - TagBitString = 3 - TagOctetString = 4 - TagOID = 6 - TagEnum = 10 - TagUTF8String = 12 - TagSequence = 16 - TagSet = 17 - TagPrintableString = 19 - TagT61String = 20 - TagIA5String = 22 - TagUTCTime = 23 - TagGeneralizedTime = 24 - TagGeneralString = 27 -) - -// ASN.1 class types represent the namespace of the tag. -const ( - ClassUniversal = 0 - ClassApplication = 1 - ClassContextSpecific = 2 - ClassPrivate = 3 -) - -type tagAndLength struct { - class, tag, length int - isCompound bool -} - -// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead -// of" and "in addition to". When not specified, every primitive type has a -// default tag in the UNIVERSAL class. -// -// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 -// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT -// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. -// -// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an -// /additional/ tag would wrap the default tag. This explicit tag will have the -// compound flag set. -// -// (This is used in order to remove ambiguity with optional elements.) -// -// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we -// don't support that here. We support a single layer of EXPLICIT or IMPLICIT -// tagging with tag strings on the fields of a structure. - -// fieldParameters is the parsed representation of tag string from a structure field. -type fieldParameters struct { - optional bool // true iff the field is OPTIONAL - explicit bool // true iff an EXPLICIT tag is in use. - application bool // true iff an APPLICATION tag is in use. - defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). - tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). - stringType int // the string tag to use when marshaling. - timeType int // the time tag to use when marshaling. - set bool // true iff this should be encoded as a SET - omitEmpty bool // true iff this should be omitted if empty when marshaling. - - // Invariants: - // if explicit is set, tag is non-nil. -} - -// Given a tag string with the format specified in the package comment, -// parseFieldParameters will parse it into a fieldParameters structure, -// ignoring unknown parts of the string. -func parseFieldParameters(str string) (ret fieldParameters) { - for _, part := range strings.Split(str, ",") { - switch { - case part == "optional": - ret.optional = true - case part == "explicit": - ret.explicit = true - if ret.tag == nil { - ret.tag = new(int) - } - case part == "generalized": - ret.timeType = TagGeneralizedTime - case part == "utc": - ret.timeType = TagUTCTime - case part == "ia5": - ret.stringType = TagIA5String - // jtasn1 case below added - case part == "generalstring": - ret.stringType = TagGeneralString - case part == "printable": - ret.stringType = TagPrintableString - case part == "utf8": - ret.stringType = TagUTF8String - case strings.HasPrefix(part, "default:"): - i, err := strconv.ParseInt(part[8:], 10, 64) - if err == nil { - ret.defaultValue = new(int64) - *ret.defaultValue = i - } - case strings.HasPrefix(part, "tag:"): - i, err := strconv.Atoi(part[4:]) - if err == nil { - ret.tag = new(int) - *ret.tag = i - } - case part == "set": - ret.set = true - case part == "application": - ret.application = true - if ret.tag == nil { - ret.tag = new(int) - } - case part == "omitempty": - ret.omitEmpty = true - } - } - return -} - -// Given a reflected Go type, getUniversalType returns the default tag number -// and expected compound flag. -func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) { - switch t { - case objectIdentifierType: - return TagOID, false, true - case bitStringType: - return TagBitString, false, true - case timeType: - return TagUTCTime, false, true - case enumeratedType: - return TagEnum, false, true - case bigIntType: - return TagInteger, false, true - } - switch t.Kind() { - case reflect.Bool: - return TagBoolean, false, true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return TagInteger, false, true - case reflect.Struct: - return TagSequence, true, true - case reflect.Slice: - if t.Elem().Kind() == reflect.Uint8 { - return TagOctetString, false, true - } - if strings.HasSuffix(t.Name(), "SET") { - return TagSet, true, true - } - return TagSequence, true, true - case reflect.String: - return TagPrintableString, false, true - } - return 0, false, false -} diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go deleted file mode 100644 index f52eee9d261..00000000000 --- a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package asn1 - -import ( - "bytes" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "time" - "unicode/utf8" -) - -// A forkableWriter is an in-memory buffer that can be -// 'forked' to create new forkableWriters that bracket the -// original. After -// pre, post := w.fork() -// the overall sequence of bytes represented is logically w+pre+post. -type forkableWriter struct { - *bytes.Buffer - pre, post *forkableWriter -} - -func newForkableWriter() *forkableWriter { - return &forkableWriter{new(bytes.Buffer), nil, nil} -} - -func (f *forkableWriter) fork() (pre, post *forkableWriter) { - if f.pre != nil || f.post != nil { - panic("have already forked") - } - f.pre = newForkableWriter() - f.post = newForkableWriter() - return f.pre, f.post -} - -func (f *forkableWriter) Len() (l int) { - l += f.Buffer.Len() - if f.pre != nil { - l += f.pre.Len() - } - if f.post != nil { - l += f.post.Len() - } - return -} - -func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) { - n, err = out.Write(f.Bytes()) - if err != nil { - return - } - - var nn int - - if f.pre != nil { - nn, err = f.pre.writeTo(out) - n += nn - if err != nil { - return - } - } - - if f.post != nil { - nn, err = f.post.writeTo(out) - n += nn - } - return -} - -func marshalBase128Int(out *forkableWriter, n int64) (err error) { - if n == 0 { - err = out.WriteByte(0) - return - } - - l := 0 - for i := n; i > 0; i >>= 7 { - l++ - } - - for i := l - 1; i >= 0; i-- { - o := byte(n >> uint(i*7)) - o &= 0x7f - if i != 0 { - o |= 0x80 - } - err = out.WriteByte(o) - if err != nil { - return - } - } - - return nil -} - -func marshalInt64(out *forkableWriter, i int64) (err error) { - n := int64Length(i) - - for ; n > 0; n-- { - err = out.WriteByte(byte(i >> uint((n-1)*8))) - if err != nil { - return - } - } - - return nil -} - -func int64Length(i int64) (numBytes int) { - numBytes = 1 - - for i > 127 { - numBytes++ - i >>= 8 - } - - for i < -128 { - numBytes++ - i >>= 8 - } - - return -} - -func marshalBigInt(out *forkableWriter, n *big.Int) (err error) { - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - err = out.WriteByte(0xff) - if err != nil { - return - } - } - _, err = out.Write(bytes) - } else if n.Sign() == 0 { - // Zero is written as a single 0 zero rather than no bytes. - err = out.WriteByte(0x00) - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with 0x00 in order to stop it - // looking like a negative number. - err = out.WriteByte(0) - if err != nil { - return - } - } - _, err = out.Write(bytes) - } - return -} - -func marshalLength(out *forkableWriter, i int) (err error) { - n := lengthLength(i) - - for ; n > 0; n-- { - err = out.WriteByte(byte(i >> uint((n-1)*8))) - if err != nil { - return - } - } - - return nil -} - -func lengthLength(i int) (numBytes int) { - numBytes = 1 - for i > 255 { - numBytes++ - i >>= 8 - } - return -} - -func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) { - b := uint8(t.class) << 6 - if t.isCompound { - b |= 0x20 - } - if t.tag >= 31 { - b |= 0x1f - err = out.WriteByte(b) - if err != nil { - return - } - err = marshalBase128Int(out, int64(t.tag)) - if err != nil { - return - } - } else { - b |= uint8(t.tag) - err = out.WriteByte(b) - if err != nil { - return - } - } - - if t.length >= 128 { - l := lengthLength(t.length) - err = out.WriteByte(0x80 | byte(l)) - if err != nil { - return - } - err = marshalLength(out, t.length) - if err != nil { - return - } - } else { - err = out.WriteByte(byte(t.length)) - if err != nil { - return - } - } - - return nil -} - -func marshalBitString(out *forkableWriter, b BitString) (err error) { - paddingBits := byte((8 - b.BitLength%8) % 8) - err = out.WriteByte(paddingBits) - if err != nil { - return - } - _, err = out.Write(b.Bytes) - return -} - -func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) { - if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { - return StructuralError{"invalid object identifier"} - } - - err = marshalBase128Int(out, int64(oid[0]*40+oid[1])) - if err != nil { - return - } - for i := 2; i < len(oid); i++ { - err = marshalBase128Int(out, int64(oid[i])) - if err != nil { - return - } - } - - return -} - -func marshalPrintableString(out *forkableWriter, s string) (err error) { - b := []byte(s) - for _, c := range b { - if !isPrintable(c) { - return StructuralError{"PrintableString contains invalid character"} - } - } - - _, err = out.Write(b) - return -} - -func marshalIA5String(out *forkableWriter, s string) (err error) { - b := []byte(s) - for _, c := range b { - if c > 127 { - return StructuralError{"IA5String contains invalid character"} - } - } - - _, err = out.Write(b) - return -} - -func marshalUTF8String(out *forkableWriter, s string) (err error) { - _, err = out.Write([]byte(s)) - return -} - -func marshalTwoDigits(out *forkableWriter, v int) (err error) { - err = out.WriteByte(byte('0' + (v/10)%10)) - if err != nil { - return - } - return out.WriteByte(byte('0' + v%10)) -} - -func marshalFourDigits(out *forkableWriter, v int) (err error) { - var bytes [4]byte - for i := range bytes { - bytes[3-i] = '0' + byte(v%10) - v /= 10 - } - _, err = out.Write(bytes[:]) - return -} - -func outsideUTCRange(t time.Time) bool { - year := t.Year() - return year < 1950 || year >= 2050 -} - -func marshalUTCTime(out *forkableWriter, t time.Time) (err error) { - year := t.Year() - - switch { - case 1950 <= year && year < 2000: - err = marshalTwoDigits(out, year-1900) - case 2000 <= year && year < 2050: - err = marshalTwoDigits(out, year-2000) - default: - return StructuralError{"cannot represent time as UTCTime"} - } - if err != nil { - return - } - - return marshalTimeCommon(out, t) -} - -func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) { - year := t.Year() - if year < 0 || year > 9999 { - return StructuralError{"cannot represent time as GeneralizedTime"} - } - if err = marshalFourDigits(out, year); err != nil { - return - } - - return marshalTimeCommon(out, t) -} - -func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) { - _, month, day := t.Date() - - err = marshalTwoDigits(out, int(month)) - if err != nil { - return - } - - err = marshalTwoDigits(out, day) - if err != nil { - return - } - - hour, min, sec := t.Clock() - - err = marshalTwoDigits(out, hour) - if err != nil { - return - } - - err = marshalTwoDigits(out, min) - if err != nil { - return - } - - err = marshalTwoDigits(out, sec) - if err != nil { - return - } - - _, offset := t.Zone() - - switch { - case offset/60 == 0: - err = out.WriteByte('Z') - return - case offset > 0: - err = out.WriteByte('+') - case offset < 0: - err = out.WriteByte('-') - } - - if err != nil { - return - } - - offsetMinutes := offset / 60 - if offsetMinutes < 0 { - offsetMinutes = -offsetMinutes - } - - err = marshalTwoDigits(out, offsetMinutes/60) - if err != nil { - return - } - - err = marshalTwoDigits(out, offsetMinutes%60) - return -} - -func stripTagAndLength(in []byte) []byte { - _, offset, err := parseTagAndLength(in, 0) - if err != nil { - return in - } - return in[offset:] -} - -func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) { - switch value.Type() { - case flagType: - return nil - case timeType: - t := value.Interface().(time.Time) - if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { - return marshalGeneralizedTime(out, t) - } else { - return marshalUTCTime(out, t) - } - case bitStringType: - return marshalBitString(out, value.Interface().(BitString)) - case objectIdentifierType: - return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier)) - case bigIntType: - return marshalBigInt(out, value.Interface().(*big.Int)) - } - - switch v := value; v.Kind() { - case reflect.Bool: - if v.Bool() { - return out.WriteByte(255) - } else { - return out.WriteByte(0) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return marshalInt64(out, v.Int()) - case reflect.Struct: - t := v.Type() - - startingField := 0 - - // If the first element of the structure is a non-empty - // RawContents, then we don't bother serializing the rest. - if t.NumField() > 0 && t.Field(0).Type == rawContentsType { - s := v.Field(0) - if s.Len() > 0 { - bytes := make([]byte, s.Len()) - for i := 0; i < s.Len(); i++ { - bytes[i] = uint8(s.Index(i).Uint()) - } - /* The RawContents will contain the tag and - * length fields but we'll also be writing - * those ourselves, so we strip them out of - * bytes */ - _, err = out.Write(stripTagAndLength(bytes)) - return - } else { - startingField = 1 - } - } - - for i := startingField; i < t.NumField(); i++ { - var pre *forkableWriter - pre, out = out.fork() - err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1"))) - if err != nil { - return - } - } - return - case reflect.Slice: - sliceType := v.Type() - if sliceType.Elem().Kind() == reflect.Uint8 { - bytes := make([]byte, v.Len()) - for i := 0; i < v.Len(); i++ { - bytes[i] = uint8(v.Index(i).Uint()) - } - _, err = out.Write(bytes) - return - } - - // jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value - //var fp fieldParameters - params.explicit = false - params.tag = nil - for i := 0; i < v.Len(); i++ { - var pre *forkableWriter - pre, out = out.fork() - err = marshalField(pre, v.Index(i), params) - if err != nil { - return - } - } - return - case reflect.String: - switch params.stringType { - case TagIA5String: - return marshalIA5String(out, v.String()) - case TagPrintableString: - return marshalPrintableString(out, v.String()) - default: - return marshalUTF8String(out, v.String()) - } - } - - return StructuralError{"unknown Go type"} -} - -func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) { - if !v.IsValid() { - return fmt.Errorf("asn1: cannot marshal nil value") - } - // If the field is an interface{} then recurse into it. - if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { - return marshalField(out, v.Elem(), params) - } - - if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { - return - } - - if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { - defaultValue := reflect.New(v.Type()).Elem() - defaultValue.SetInt(*params.defaultValue) - - if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { - return - } - } - - // If no default value is given then the zero value for the type is - // assumed to be the default value. This isn't obviously the correct - // behaviour, but it's what Go has traditionally done. - if params.optional && params.defaultValue == nil { - if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { - return - } - } - - if v.Type() == rawValueType { - rv := v.Interface().(RawValue) - if len(rv.FullBytes) != 0 { - _, err = out.Write(rv.FullBytes) - } else { - err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound}) - if err != nil { - return - } - _, err = out.Write(rv.Bytes) - } - return - } - - tag, isCompound, ok := getUniversalType(v.Type()) - if !ok { - err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())} - return - } - class := ClassUniversal - - if params.timeType != 0 && tag != TagUTCTime { - return StructuralError{"explicit time type given to non-time member"} - } - - // jtasn1 updated to allow slices of strings - if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) { - return StructuralError{"explicit string type given to non-string member"} - } - - switch tag { - case TagPrintableString: - if params.stringType == 0 { - // This is a string without an explicit string type. We'll use - // a PrintableString if the character set in the string is - // sufficiently limited, otherwise we'll use a UTF8String. - for _, r := range v.String() { - if r >= utf8.RuneSelf || !isPrintable(byte(r)) { - if !utf8.ValidString(v.String()) { - return errors.New("asn1: string not valid UTF-8") - } - tag = TagUTF8String - break - } - } - } else { - tag = params.stringType - } - case TagUTCTime: - if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { - tag = TagGeneralizedTime - } - } - - if params.set { - if tag != TagSequence { - return StructuralError{"non sequence tagged as set"} - } - tag = TagSet - } - - tags, body := out.fork() - - err = marshalBody(body, v, params) - if err != nil { - return - } - - bodyLen := body.Len() - - var explicitTag *forkableWriter - if params.explicit { - explicitTag, tags = tags.fork() - } - - if !params.explicit && params.tag != nil { - // implicit tag. - tag = *params.tag - class = ClassContextSpecific - } - - err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound}) - if err != nil { - return - } - - if params.explicit { - err = marshalTagAndLength(explicitTag, tagAndLength{ - class: ClassContextSpecific, - tag: *params.tag, - length: bodyLen + tags.Len(), - isCompound: true, - }) - } - - return err -} - -// Marshal returns the ASN.1 encoding of val. -// -// In addition to the struct tags recognised by Unmarshal, the following can be -// used: -// -// ia5: causes strings to be marshaled as ASN.1, IA5 strings -// omitempty: causes empty slices to be skipped -// printable: causes strings to be marshaled as ASN.1, PrintableString strings. -// utf8: causes strings to be marshaled as ASN.1, UTF8 strings -func Marshal(val interface{}) ([]byte, error) { - var out bytes.Buffer - v := reflect.ValueOf(val) - f := newForkableWriter() - err := marshalField(f, v, fieldParameters{}) - if err != nil { - return nil, err - } - _, err = f.writeTo(&out) - return out.Bytes(), err -} diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go deleted file mode 100644 index 75d418763db..00000000000 --- a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC -2898 / PKCS #5 v2.0. - -A key derivation function is useful when encrypting data based on a password -or any other not-fully-random data. It uses a pseudorandom function to derive -a secure encryption key based on the password. - -While v2.0 of the standard defines only one pseudorandom function to use, -HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved -Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To -choose, you can pass the `New` functions from the different SHA packages to -pbkdf2.Key. -*/ -package pbkdf2 - -import ( - "crypto/hmac" - "hash" -) - -// Key derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. The key is -// derived based on the method described as PBKDF2 with the HMAC variant using -// the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { - return Key64(password, salt, int64(iter), int64(keyLen), h) -} - -// Key64 derives a key from the password, salt and iteration count, returning a -// []byte of length keylen that can be used as cryptographic key. Key64 uses -// int64 for the iteration count and key length to allow larger values. -// The key is derived based on the method described as PBKDF2 with the HMAC -// variant using the supplied hash function. -// -// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you -// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by -// doing: -// -// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) -// -// Remember to get a good random salt. At least 8 bytes is recommended by the -// RFC. -// -// Using a higher iteration count will increase the cost of an exhaustive -// search but will also make derivation proportionally slower. -func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte { - prf := hmac.New(h, password) - hashLen := int64(prf.Size()) - numBlocks := (keyLen + hashLen - 1) / hashLen - - var buf [4]byte - dk := make([]byte, 0, numBlocks*hashLen) - U := make([]byte, hashLen) - for block := int64(1); block <= numBlocks; block++ { - // N.B.: || means concatenation, ^ means XOR - // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter - // U_1 = PRF(password, salt || uint(i)) - prf.Reset() - prf.Write(salt) - buf[0] = byte(block >> 24) - buf[1] = byte(block >> 16) - buf[2] = byte(block >> 8) - buf[3] = byte(block) - prf.Write(buf[:4]) - dk = prf.Sum(dk) - T := dk[int64(len(dk))-hashLen:] - copy(U, T) - - // U_n = PRF(password, U_(n-1)) - for n := int64(2); n <= iter; n++ { - prf.Reset() - prf.Write(U) - U = U[:0] - U = prf.Sum(U) - for x := range U { - T[x] ^= U[x] - } - } - } - return dk[:keyLen] -} diff --git a/vendor/github.com/klauspost/pgzip/.gitignore b/vendor/github.com/klauspost/pgzip/.gitignore deleted file mode 100644 index daf913b1b34..00000000000 --- a/vendor/github.com/klauspost/pgzip/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/pgzip/.travis.yml b/vendor/github.com/klauspost/pgzip/.travis.yml deleted file mode 100644 index acfec4bb09d..00000000000 --- a/vendor/github.com/klauspost/pgzip/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -os: - - linux - - osx - -go: - - 1.13.x - - 1.14.x - - 1.15.x - - master - -env: - - GO111MODULE=off - -script: - - diff <(gofmt -d .) <(printf "") - - go test -v -cpu=1,2,4 . - - go test -v -cpu=2 -race -short . - -matrix: - allow_failures: - - go: 'master' - fast_finish: true diff --git a/vendor/github.com/klauspost/pgzip/GO_LICENSE b/vendor/github.com/klauspost/pgzip/GO_LICENSE deleted file mode 100644 index 74487567632..00000000000 --- a/vendor/github.com/klauspost/pgzip/GO_LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE deleted file mode 100644 index 3909da4103b..00000000000 --- a/vendor/github.com/klauspost/pgzip/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2014 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md deleted file mode 100644 index 171b978fdc9..00000000000 --- a/vendor/github.com/klauspost/pgzip/README.md +++ /dev/null @@ -1,135 +0,0 @@ -pgzip -===== - -Go parallel gzip compression/decompression. This is a fully gzip compatible drop in replacement for "compress/gzip". - -This will split compression into blocks that are compressed in parallel. -This can be useful for compressing big amounts of data. The output is a standard gzip file. - -The gzip decompression is modified so it decompresses ahead of the current reader. -This means that reads will be non-blocking if the decompressor can keep ahead of your code reading from it. -CRC calculation also takes place in a separate goroutine. - -You should only use this if you are (de)compressing big amounts of data, -say **more than 1MB** at the time, otherwise you will not see any benefit, -and it will likely be faster to use the internal gzip library -or [this package](https://github.com/klauspost/compress). - -It is important to note that this library creates and reads *standard gzip files*. -You do not have to match the compressor/decompressor to get the described speedups, -and the gzip files are fully compatible with other gzip readers/writers. - -A golang variant of this is [bgzf](https://godoc.org/github.com/biogo/hts/bgzf), -which has the same feature, as well as seeking in the resulting file. -The only drawback is a slightly bigger overhead compared to this and pure gzip. -See a comparison below. - -[![GoDoc][1]][2] [![Build Status][3]][4] - -[1]: https://godoc.org/github.com/klauspost/pgzip?status.svg -[2]: https://godoc.org/github.com/klauspost/pgzip -[3]: https://travis-ci.org/klauspost/pgzip.svg -[4]: https://travis-ci.org/klauspost/pgzip - -Installation -==== -```go get github.com/klauspost/pgzip/...``` - -You might need to get/update the dependencies: - -``` -go get -u github.com/klauspost/compress -``` - -Usage -==== -[Godoc Doumentation](https://godoc.org/github.com/klauspost/pgzip) - -To use as a replacement for gzip, exchange - -```import "compress/gzip"``` -with -```import gzip "github.com/klauspost/pgzip"```. - -# Changes - -* Oct 6, 2016: Fixed an issue if the destination writer returned an error. -* Oct 6, 2016: Better buffer reuse, should now generate less garbage. -* Oct 6, 2016: Output does not change based on write sizes. -* Dec 8, 2015: Decoder now supports the io.WriterTo interface, giving a speedup and less GC pressure. -* Oct 9, 2015: Reduced allocations by ~35 by using sync.Pool. ~15% overall speedup. - -Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress#changelog) are also carried over, so see that for more changes. - -## Compression -The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip). - -To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(1MB, runtime.GOMAXPROCS(0)), meaning blocks are split at 1 MB and up to the number of CPU threads blocks can be processing at once before the writer blocks. - - -Example: -``` -var b bytes.Buffer -w := gzip.NewWriter(&b) -w.SetConcurrency(100000, 10) -w.Write([]byte("hello, world\n")) -w.Close() -``` - -To get any performance gains, you should at least be compressing more than 1 megabyte of data at the time. - -You should at least have a block size of 100k and at least a number of blocks that match the number of cores your would like to utilize, but about twice the number of blocks would be the best. - -Another side effect of this is, that it is likely to speed up your other code, since writes to the compressor only blocks if the compressor is already compressing the number of blocks you have specified. This also means you don't have worry about buffering input to the compressor. - -## Decompression - -Decompression works similar to compression. That means that you simply call pgzip the same way as you would call [compress/gzip](http://golang.org/pkg/compress/gzip). - -The only difference is that if you want to specify your own readahead, you have to use `pgzip.NewReaderN(r io.Reader, blockSize, blocks int)` to get a reader with your custom blocksizes. The `blockSize` is the size of each block decoded, and `blocks` is the maximum number of blocks that is decoded ahead. - -See [Example on playground](http://play.golang.org/p/uHv1B5NbDh) - -Performance -==== -## Compression - -See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gzipdeflate-benchmarks/). - -Compression cost is usually about 0.2% with default settings with a block size of 250k. - -Example with GOMAXPROC set to 32 (16 core CPU) - -Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6. - -Compressor | MB/sec | speedup | size | size overhead (lower=better) -------------|----------|---------|------|--------- -[gzip](http://golang.org/pkg/compress/gzip) (golang) | 15.44MB/s (1 thread) | 1.0x | 4781329307 | 0% -[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 135.04MB/s (1 thread) | 8.74x | 4894858258 | +2.37% -[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 1573.23MB/s| 101.9x | 4902285651 | +2.53% -[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 361.40MB/s | 23.4x | 4869686090 | +1.85% -[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 306.01MB/s | 19.8x | 4786890417 | +0.12% - -pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression-huffman-only) mode, that will allow compression at ~250MB per core per second, independent of the content. - -See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings. - -## Decompression - -The decompression speedup is there because it allows you to do other work while the decompression is taking place. - -In the example above, the numbers are as follows on a 4 CPU machine: - -Decompressor | Time | Speedup --------------|------|-------- -[gzip](http://golang.org/pkg/compress/gzip) (golang) | 1m28.85s | 0% -[pgzip](https://github.com/klauspost/pgzip) (golang) | 43.48s | 104% - -But wait, since gzip decompression is inherently singlethreaded (aside from CRC calculation) how can it be more than 100% faster? Because pgzip due to its design also acts as a buffer. When using unbuffered gzip, you are also waiting for io when you are decompressing. If the gzip decoder can keep up, it will always have data ready for your reader, and you will not be waiting for input to the gzip decompressor to complete. - -This is pretty much an optimal situation for pgzip, but it reflects most common usecases for CPU intensive gzip usage. - -I haven't included [bgzf](https://godoc.org/github.com/biogo/hts/bgzf) in this comparison, since it only can decompress files created by a compatible encoder, and therefore cannot be considered a generic gzip decompressor. But if you are able to compress your files with a bgzf compatible program, you can expect it to scale beyond 100%. - -# License -This contains large portions of code from the go repository - see GO_LICENSE for more information. The changes are released under MIT License. See LICENSE for more information. diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go deleted file mode 100644 index d1ae730b25f..00000000000 --- a/vendor/github.com/klauspost/pgzip/gunzip.go +++ /dev/null @@ -1,584 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pgzip implements reading and writing of gzip format compressed files, -// as specified in RFC 1952. -// -// This is a drop in replacement for "compress/gzip". -// This will split compression into blocks that are compressed in parallel. -// This can be useful for compressing big amounts of data. -// The gzip decompression has not been modified, but remains in the package, -// so you can use it as a complete replacement for "compress/gzip". -// -// See more at https://github.com/klauspost/pgzip -package pgzip - -import ( - "bufio" - "errors" - "hash" - "hash/crc32" - "io" - "sync" - "time" - - "github.com/klauspost/compress/flate" -) - -const ( - gzipID1 = 0x1f - gzipID2 = 0x8b - gzipDeflate = 8 - flagText = 1 << 0 - flagHdrCrc = 1 << 1 - flagExtra = 1 << 2 - flagName = 1 << 3 - flagComment = 1 << 4 -) - -func makeReader(r io.Reader) flate.Reader { - if rr, ok := r.(flate.Reader); ok { - return rr - } - return bufio.NewReader(r) -} - -var ( - // ErrChecksum is returned when reading GZIP data that has an invalid checksum. - ErrChecksum = errors.New("gzip: invalid checksum") - // ErrHeader is returned when reading GZIP data that has an invalid header. - ErrHeader = errors.New("gzip: invalid header") -) - -// The gzip file stores a header giving metadata about the compressed file. -// That header is exposed as the fields of the Writer and Reader structs. -type Header struct { - Comment string // comment - Extra []byte // "extra data" - ModTime time.Time // modification time - Name string // file name - OS byte // operating system type -} - -// A Reader is an io.Reader that can be read to retrieve -// uncompressed data from a gzip-format compressed file. -// -// In general, a gzip file can be a concatenation of gzip files, -// each with its own header. Reads from the Reader -// return the concatenation of the uncompressed data of each. -// Only the first header is recorded in the Reader fields. -// -// Gzip files store a length and checksum of the uncompressed data. -// The Reader will return a ErrChecksum when Read -// reaches the end of the uncompressed data if it does not -// have the expected length or checksum. Clients should treat data -// returned by Read as tentative until they receive the io.EOF -// marking the end of the data. -type Reader struct { - Header - r flate.Reader - decompressor io.ReadCloser - digest hash.Hash32 - size uint32 - flg byte - buf [512]byte - err error - closeErr chan error - multistream bool - - readAhead chan read - roff int // read offset - current []byte - closeReader chan struct{} - lastBlock bool - blockSize int - blocks int - - activeRA bool // Indication if readahead is active - mu sync.Mutex // Lock for above - - blockPool chan []byte -} - -type read struct { - b []byte - err error -} - -// NewReader creates a new Reader reading the given reader. -// The implementation buffers input and may read more data than necessary from r. -// It is the caller's responsibility to call Close on the Reader when done. -func NewReader(r io.Reader) (*Reader, error) { - z := new(Reader) - z.blocks = defaultBlocks - z.blockSize = defaultBlockSize - z.r = makeReader(r) - z.digest = crc32.NewIEEE() - z.multistream = true - z.blockPool = make(chan []byte, z.blocks) - for i := 0; i < z.blocks; i++ { - z.blockPool <- make([]byte, z.blockSize) - } - if err := z.readHeader(true); err != nil { - return nil, err - } - return z, nil -} - -// NewReaderN creates a new Reader reading the given reader. -// The implementation buffers input and may read more data than necessary from r. -// It is the caller's responsibility to call Close on the Reader when done. -// -// With this you can control the approximate size of your blocks, -// as well as how many blocks you want to have prefetched. -// -// Default values for this is blockSize = 250000, blocks = 16, -// meaning up to 16 blocks of maximum 250000 bytes will be -// prefetched. -func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) { - z := new(Reader) - z.blocks = blocks - z.blockSize = blockSize - z.r = makeReader(r) - z.digest = crc32.NewIEEE() - z.multistream = true - - // Account for too small values - if z.blocks <= 0 { - z.blocks = defaultBlocks - } - if z.blockSize <= 512 { - z.blockSize = defaultBlockSize - } - z.blockPool = make(chan []byte, z.blocks) - for i := 0; i < z.blocks; i++ { - z.blockPool <- make([]byte, z.blockSize) - } - if err := z.readHeader(true); err != nil { - return nil, err - } - return z, nil -} - -// Reset discards the Reader z's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) error { - z.killReadAhead() - z.r = makeReader(r) - z.digest = crc32.NewIEEE() - z.size = 0 - z.err = nil - z.multistream = true - - // Account for uninitialized values - if z.blocks <= 0 { - z.blocks = defaultBlocks - } - if z.blockSize <= 512 { - z.blockSize = defaultBlockSize - } - - if z.blockPool == nil { - z.blockPool = make(chan []byte, z.blocks) - for i := 0; i < z.blocks; i++ { - z.blockPool <- make([]byte, z.blockSize) - } - } - - return z.readHeader(true) -} - -// Multistream controls whether the reader supports multistream files. -// -// If enabled (the default), the Reader expects the input to be a sequence -// of individually gzipped data streams, each with its own header and -// trailer, ending at EOF. The effect is that the concatenation of a sequence -// of gzipped files is treated as equivalent to the gzip of the concatenation -// of the sequence. This is standard behavior for gzip readers. -// -// Calling Multistream(false) disables this behavior; disabling the behavior -// can be useful when reading file formats that distinguish individual gzip -// data streams or mix gzip data streams with other data streams. -// In this mode, when the Reader reaches the end of the data stream, -// Read returns io.EOF. If the underlying reader implements io.ByteReader, -// it will be left positioned just after the gzip stream. -// To start the next stream, call z.Reset(r) followed by z.Multistream(false). -// If there is no next stream, z.Reset(r) will return io.EOF. -func (z *Reader) Multistream(ok bool) { - z.multistream = ok -} - -// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). -func get4(p []byte) uint32 { - return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 -} - -func (z *Reader) readString() (string, error) { - var err error - needconv := false - for i := 0; ; i++ { - if i >= len(z.buf) { - return "", ErrHeader - } - z.buf[i], err = z.r.ReadByte() - if err != nil { - return "", err - } - if z.buf[i] > 0x7f { - needconv = true - } - if z.buf[i] == 0 { - // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). - if needconv { - s := make([]rune, 0, i) - for _, v := range z.buf[0:i] { - s = append(s, rune(v)) - } - return string(s), nil - } - return string(z.buf[0:i]), nil - } - } -} - -func (z *Reader) read2() (uint32, error) { - _, err := io.ReadFull(z.r, z.buf[0:2]) - if err != nil { - return 0, err - } - return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil -} - -func (z *Reader) readHeader(save bool) error { - z.killReadAhead() - - _, err := io.ReadFull(z.r, z.buf[0:10]) - if err != nil { - return err - } - if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { - return ErrHeader - } - z.flg = z.buf[3] - if save { - z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) - // z.buf[8] is xfl, ignored - z.OS = z.buf[9] - } - z.digest.Reset() - z.digest.Write(z.buf[0:10]) - - if z.flg&flagExtra != 0 { - n, err := z.read2() - if err != nil { - return err - } - data := make([]byte, n) - if _, err = io.ReadFull(z.r, data); err != nil { - return err - } - if save { - z.Extra = data - } - } - - var s string - if z.flg&flagName != 0 { - if s, err = z.readString(); err != nil { - return err - } - if save { - z.Name = s - } - } - - if z.flg&flagComment != 0 { - if s, err = z.readString(); err != nil { - return err - } - if save { - z.Comment = s - } - } - - if z.flg&flagHdrCrc != 0 { - n, err := z.read2() - if err != nil { - return err - } - sum := z.digest.Sum32() & 0xFFFF - if n != sum { - return ErrHeader - } - } - - z.digest.Reset() - z.decompressor = flate.NewReader(z.r) - z.doReadAhead() - return nil -} - -func (z *Reader) killReadAhead() error { - z.mu.Lock() - defer z.mu.Unlock() - if z.activeRA { - if z.closeReader != nil { - close(z.closeReader) - } - - // Wait for decompressor to be closed and return error, if any. - e, ok := <-z.closeErr - z.activeRA = false - - for blk := range z.readAhead { - if blk.b != nil { - z.blockPool <- blk.b - } - } - if cap(z.current) > 0 { - z.blockPool <- z.current - z.current = nil - } - if !ok { - // Channel is closed, so if there was any error it has already been returned. - return nil - } - return e - } - return nil -} - -// Starts readahead. -// Will return on error (including io.EOF) -// or when z.closeReader is closed. -func (z *Reader) doReadAhead() { - z.mu.Lock() - defer z.mu.Unlock() - z.activeRA = true - - if z.blocks <= 0 { - z.blocks = defaultBlocks - } - if z.blockSize <= 512 { - z.blockSize = defaultBlockSize - } - ra := make(chan read, z.blocks) - z.readAhead = ra - closeReader := make(chan struct{}, 0) - z.closeReader = closeReader - z.lastBlock = false - closeErr := make(chan error, 1) - z.closeErr = closeErr - z.size = 0 - z.roff = 0 - z.current = nil - decomp := z.decompressor - - go func() { - defer func() { - closeErr <- decomp.Close() - close(closeErr) - close(ra) - }() - - // We hold a local reference to digest, since - // it way be changed by reset. - digest := z.digest - var wg sync.WaitGroup - for { - var buf []byte - select { - case buf = <-z.blockPool: - case <-closeReader: - return - } - buf = buf[0:z.blockSize] - // Try to fill the buffer - n, err := io.ReadFull(decomp, buf) - if err == io.ErrUnexpectedEOF { - if n > 0 { - err = nil - } else { - // If we got zero bytes, we need to establish if - // we reached end of stream or truncated stream. - _, err = decomp.Read([]byte{}) - if err == io.EOF { - err = nil - } - } - } - if n < len(buf) { - buf = buf[0:n] - } - wg.Wait() - wg.Add(1) - go func() { - digest.Write(buf) - wg.Done() - }() - z.size += uint32(n) - - // If we return any error, out digest must be ready - if err != nil { - wg.Wait() - } - select { - case z.readAhead <- read{b: buf, err: err}: - case <-closeReader: - // Sent on close, we don't care about the next results - z.blockPool <- buf - return - } - if err != nil { - return - } - } - }() -} - -func (z *Reader) Read(p []byte) (n int, err error) { - if z.err != nil { - return 0, z.err - } - if len(p) == 0 { - return 0, nil - } - - for { - if len(z.current) == 0 && !z.lastBlock { - read := <-z.readAhead - - if read.err != nil { - // If not nil, the reader will have exited - z.closeReader = nil - - if read.err != io.EOF { - z.err = read.err - return - } - if read.err == io.EOF { - z.lastBlock = true - err = nil - } - } - z.current = read.b - z.roff = 0 - } - avail := z.current[z.roff:] - if len(p) >= len(avail) { - // If len(p) >= len(current), return all content of current - n = copy(p, avail) - z.blockPool <- z.current - z.current = nil - if z.lastBlock { - err = io.EOF - break - } - } else { - // We copy as much as there is space for - n = copy(p, avail) - z.roff += n - } - return - } - - // Finished file; check checksum + size. - if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { - z.err = err - return 0, err - } - crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) - sum := z.digest.Sum32() - if sum != crc32 || isize != z.size { - z.err = ErrChecksum - return 0, z.err - } - - // File is ok; should we attempt reading one more? - if !z.multistream { - return 0, io.EOF - } - - // Is there another? - if err = z.readHeader(false); err != nil { - z.err = err - return - } - - // Yes. Reset and read from it. - return z.Read(p) -} - -func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { - total := int64(0) - for { - if z.err != nil { - return total, z.err - } - // We write both to output and digest. - for { - // Read from input - read := <-z.readAhead - if read.err != nil { - // If not nil, the reader will have exited - z.closeReader = nil - - if read.err != io.EOF { - z.err = read.err - return total, z.err - } - if read.err == io.EOF { - z.lastBlock = true - err = nil - } - } - // Write what we got - n, err := w.Write(read.b) - if n != len(read.b) { - return total, io.ErrShortWrite - } - total += int64(n) - if err != nil { - return total, err - } - // Put block back - z.blockPool <- read.b - if z.lastBlock { - break - } - } - - // Finished file; check checksum + size. - if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { - z.err = err - return total, err - } - crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) - sum := z.digest.Sum32() - if sum != crc32 || isize != z.size { - z.err = ErrChecksum - return total, z.err - } - // File is ok; should we attempt reading one more? - if !z.multistream { - return total, nil - } - - // Is there another? - err = z.readHeader(false) - if err == io.EOF { - return total, nil - } - if err != nil { - z.err = err - return total, err - } - } -} - -// Close closes the Reader. It does not close the underlying io.Reader. -func (z *Reader) Close() error { - return z.killReadAhead() -} diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go deleted file mode 100644 index 257c4d299f7..00000000000 --- a/vendor/github.com/klauspost/pgzip/gzip.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pgzip - -import ( - "bytes" - "errors" - "fmt" - "hash" - "hash/crc32" - "io" - "runtime" - "sync" - "time" - - "github.com/klauspost/compress/flate" -) - -const ( - defaultBlockSize = 1 << 20 - tailSize = 16384 - defaultBlocks = 4 -) - -// These constants are copied from the flate package, so that code that imports -// "compress/gzip" does not also have to import "compress/flate". -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression - ConstantCompression = flate.ConstantCompression - HuffmanOnly = flate.HuffmanOnly -) - -// A Writer is an io.WriteCloser. -// Writes to a Writer are compressed and written to w. -type Writer struct { - Header - w io.Writer - level int - wroteHeader bool - blockSize int - blocks int - currentBuffer []byte - prevTail []byte - digest hash.Hash32 - size int - closed bool - buf [10]byte - errMu sync.RWMutex - err error - pushedErr chan struct{} - results chan result - dictFlatePool sync.Pool - dstPool sync.Pool - wg sync.WaitGroup -} - -type result struct { - result chan []byte - notifyWritten chan struct{} -} - -// Use SetConcurrency to finetune the concurrency level if needed. -// -// With this you can control the approximate size of your blocks, -// as well as how many you want to be processing in parallel. -// -// Default values for this is SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)), -// meaning blocks are split at 1 MB and up to the number of CPU threads -// can be processing at once before the writer blocks. -func (z *Writer) SetConcurrency(blockSize, blocks int) error { - if blockSize <= tailSize { - return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize) - } - if blocks <= 0 { - return errors.New("gzip: blocks cannot be zero or less") - } - if blockSize == z.blockSize && blocks == z.blocks { - return nil - } - z.blockSize = blockSize - z.results = make(chan result, blocks) - z.blocks = blocks - z.dstPool.New = func() interface{} { return make([]byte, 0, blockSize+(blockSize)>>4) } - return nil -} - -// NewWriter returns a new Writer. -// Writes to the returned writer are compressed and written to w. -// -// It is the caller's responsibility to call Close on the WriteCloser when done. -// Writes may be buffered and not flushed until Close. -// -// Callers that wish to set the fields in Writer.Header must do so before -// the first call to Write or Close. The Comment and Name header fields are -// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO -// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an -// error on Write. -func NewWriter(w io.Writer) *Writer { - z, _ := NewWriterLevel(w, DefaultCompression) - return z -} - -// NewWriterLevel is like NewWriter but specifies the compression level instead -// of assuming DefaultCompression. -// -// The compression level can be DefaultCompression, NoCompression, or any -// integer value between BestSpeed and BestCompression inclusive. The error -// returned will be nil if the level is valid. -func NewWriterLevel(w io.Writer, level int) (*Writer, error) { - if level < ConstantCompression || level > BestCompression { - return nil, fmt.Errorf("gzip: invalid compression level: %d", level) - } - z := new(Writer) - z.SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)) - z.init(w, level) - return z, nil -} - -// This function must be used by goroutines to set an -// error condition, since z.err access is restricted -// to the callers goruotine. -func (z *Writer) pushError(err error) { - z.errMu.Lock() - if z.err != nil { - z.errMu.Unlock() - return - } - z.err = err - close(z.pushedErr) - z.errMu.Unlock() -} - -func (z *Writer) init(w io.Writer, level int) { - z.wg.Wait() - digest := z.digest - if digest != nil { - digest.Reset() - } else { - digest = crc32.NewIEEE() - } - z.Header = Header{OS: 255} - z.w = w - z.level = level - z.digest = digest - z.pushedErr = make(chan struct{}, 0) - z.results = make(chan result, z.blocks) - z.err = nil - z.closed = false - z.Comment = "" - z.Extra = nil - z.ModTime = time.Time{} - z.wroteHeader = false - z.currentBuffer = nil - z.buf = [10]byte{} - z.prevTail = nil - z.size = 0 - if z.dictFlatePool.New == nil { - z.dictFlatePool.New = func() interface{} { - f, _ := flate.NewWriterDict(w, level, nil) - return f - } - } -} - -// Reset discards the Writer z's state and makes it equivalent to the -// result of its original state from NewWriter or NewWriterLevel, but -// writing to w instead. This permits reusing a Writer rather than -// allocating a new one. -func (z *Writer) Reset(w io.Writer) { - if z.results != nil && !z.closed { - close(z.results) - } - z.SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(0)) - z.init(w, z.level) -} - -// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). -func put2(p []byte, v uint16) { - p[0] = uint8(v >> 0) - p[1] = uint8(v >> 8) -} - -func put4(p []byte, v uint32) { - p[0] = uint8(v >> 0) - p[1] = uint8(v >> 8) - p[2] = uint8(v >> 16) - p[3] = uint8(v >> 24) -} - -// writeBytes writes a length-prefixed byte slice to z.w. -func (z *Writer) writeBytes(b []byte) error { - if len(b) > 0xffff { - return errors.New("gzip.Write: Extra data is too large") - } - put2(z.buf[0:2], uint16(len(b))) - _, err := z.w.Write(z.buf[0:2]) - if err != nil { - return err - } - _, err = z.w.Write(b) - return err -} - -// writeString writes a UTF-8 string s in GZIP's format to z.w. -// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). -func (z *Writer) writeString(s string) (err error) { - // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. - needconv := false - for _, v := range s { - if v == 0 || v > 0xff { - return errors.New("gzip.Write: non-Latin-1 header string") - } - if v > 0x7f { - needconv = true - } - } - if needconv { - b := make([]byte, 0, len(s)) - for _, v := range s { - b = append(b, byte(v)) - } - _, err = z.w.Write(b) - } else { - _, err = io.WriteString(z.w, s) - } - if err != nil { - return err - } - // GZIP strings are NUL-terminated. - z.buf[0] = 0 - _, err = z.w.Write(z.buf[0:1]) - return err -} - -// compressCurrent will compress the data currently buffered -// This should only be called from the main writer/flush/closer -func (z *Writer) compressCurrent(flush bool) { - c := z.currentBuffer - if len(c) > z.blockSize { - // This can never happen through the public interface. - panic("len(z.currentBuffer) > z.blockSize (most likely due to concurrent Write race)") - } - - r := result{} - r.result = make(chan []byte, 1) - r.notifyWritten = make(chan struct{}, 0) - // Reserve a result slot - select { - case z.results <- r: - case <-z.pushedErr: - return - } - - z.wg.Add(1) - tail := z.prevTail - if len(c) > tailSize { - buf := z.dstPool.Get().([]byte) // Put in .compressBlock - // Copy tail from current buffer before handing the buffer over to the - // compressBlock goroutine. - buf = append(buf[:0], c[len(c)-tailSize:]...) - z.prevTail = buf - } else { - z.prevTail = nil - } - go z.compressBlock(c, tail, r, z.closed) - - z.currentBuffer = z.dstPool.Get().([]byte) // Put in .compressBlock - z.currentBuffer = z.currentBuffer[:0] - - // Wait if flushing - if flush { - <-r.notifyWritten - } -} - -// Returns an error if it has been set. -// Cannot be used by functions that are from internal goroutines. -func (z *Writer) checkError() error { - z.errMu.RLock() - err := z.err - z.errMu.RUnlock() - return err -} - -// Write writes a compressed form of p to the underlying io.Writer. The -// compressed bytes are not necessarily flushed to output until -// the Writer is closed or Flush() is called. -// -// The function will return quickly, if there are unused buffers. -// The sent slice (p) is copied, and the caller is free to re-use the buffer -// when the function returns. -// -// Errors that occur during compression will be reported later, and a nil error -// does not signify that the compression succeeded (since it is most likely still running) -// That means that the call that returns an error may not be the call that caused it. -// Only Flush and Close functions are guaranteed to return any errors up to that point. -func (z *Writer) Write(p []byte) (int, error) { - if err := z.checkError(); err != nil { - return 0, err - } - // Write the GZIP header lazily. - if !z.wroteHeader { - z.wroteHeader = true - z.buf[0] = gzipID1 - z.buf[1] = gzipID2 - z.buf[2] = gzipDeflate - z.buf[3] = 0 - if z.Extra != nil { - z.buf[3] |= 0x04 - } - if z.Name != "" { - z.buf[3] |= 0x08 - } - if z.Comment != "" { - z.buf[3] |= 0x10 - } - put4(z.buf[4:8], uint32(z.ModTime.Unix())) - if z.level == BestCompression { - z.buf[8] = 2 - } else if z.level == BestSpeed { - z.buf[8] = 4 - } else { - z.buf[8] = 0 - } - z.buf[9] = z.OS - var n int - var err error - n, err = z.w.Write(z.buf[0:10]) - if err != nil { - z.pushError(err) - return n, err - } - if z.Extra != nil { - err = z.writeBytes(z.Extra) - if err != nil { - z.pushError(err) - return n, err - } - } - if z.Name != "" { - err = z.writeString(z.Name) - if err != nil { - z.pushError(err) - return n, err - } - } - if z.Comment != "" { - err = z.writeString(z.Comment) - if err != nil { - z.pushError(err) - return n, err - } - } - // Start receiving data from compressors - go func() { - listen := z.results - var failed bool - for { - r, ok := <-listen - // If closed, we are finished. - if !ok { - return - } - if failed { - close(r.notifyWritten) - continue - } - buf := <-r.result - n, err := z.w.Write(buf) - if err != nil { - z.pushError(err) - close(r.notifyWritten) - failed = true - continue - } - if n != len(buf) { - z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) - failed = true - close(r.notifyWritten) - continue - } - z.dstPool.Put(buf) - close(r.notifyWritten) - } - }() - z.currentBuffer = z.dstPool.Get().([]byte) - z.currentBuffer = z.currentBuffer[:0] - } - q := p - for len(q) > 0 { - length := len(q) - if length+len(z.currentBuffer) > z.blockSize { - length = z.blockSize - len(z.currentBuffer) - } - z.digest.Write(q[:length]) - z.currentBuffer = append(z.currentBuffer, q[:length]...) - if len(z.currentBuffer) > z.blockSize { - panic("z.currentBuffer too large (most likely due to concurrent Write race)") - } - if len(z.currentBuffer) == z.blockSize { - z.compressCurrent(false) - if err := z.checkError(); err != nil { - return len(p) - len(q), err - } - } - z.size += length - q = q[length:] - } - return len(p), z.checkError() -} - -// Step 1: compresses buffer to buffer -// Step 2: send writer to channel -// Step 3: Close result channel to indicate we are done -func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { - defer func() { - close(r.result) - z.wg.Done() - }() - buf := z.dstPool.Get().([]byte) // Corresponding Put in .Write's result writer - dest := bytes.NewBuffer(buf[:0]) - - compressor := z.dictFlatePool.Get().(*flate.Writer) // Put below - compressor.ResetDict(dest, prevTail) - compressor.Write(p) - z.dstPool.Put(p) // Corresponding Get in .Write and .compressCurrent - - err := compressor.Flush() - if err != nil { - z.pushError(err) - return - } - if closed { - err = compressor.Close() - if err != nil { - z.pushError(err) - return - } - } - z.dictFlatePool.Put(compressor) // Get above - - if prevTail != nil { - z.dstPool.Put(prevTail) // Get in .compressCurrent - } - - // Read back buffer - buf = dest.Bytes() - r.result <- buf -} - -// Flush flushes any pending compressed data to the underlying writer. -// -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. Flush does -// not return until the data has been written. If the underlying -// writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (z *Writer) Flush() error { - if err := z.checkError(); err != nil { - return err - } - if z.closed { - return nil - } - if !z.wroteHeader { - _, err := z.Write(nil) - if err != nil { - return err - } - } - // We send current block to compression - z.compressCurrent(true) - - return z.checkError() -} - -// UncompressedSize will return the number of bytes written. -// pgzip only, not a function in the official gzip package. -func (z *Writer) UncompressedSize() int { - return z.size -} - -// Close closes the Writer, flushing any unwritten data to the underlying -// io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if err := z.checkError(); err != nil { - return err - } - if z.closed { - return nil - } - - z.closed = true - if !z.wroteHeader { - z.Write(nil) - if err := z.checkError(); err != nil { - return err - } - } - z.compressCurrent(true) - if err := z.checkError(); err != nil { - return err - } - close(z.results) - put4(z.buf[0:4], z.digest.Sum32()) - put4(z.buf[4:8], uint32(z.size)) - _, err := z.w.Write(z.buf[0:8]) - if err != nil { - z.pushError(err) - return err - } - return nil -} diff --git a/vendor/github.com/oliveagle/jsonpath/.gitignore b/vendor/github.com/oliveagle/jsonpath/.gitignore deleted file mode 100644 index c469a41f6bc..00000000000 --- a/vendor/github.com/oliveagle/jsonpath/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -*.sw[op] - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof -.idea diff --git a/vendor/github.com/oliveagle/jsonpath/.travis.yml b/vendor/github.com/oliveagle/jsonpath/.travis.yml deleted file mode 100644 index abf693ce9b1..00000000000 --- a/vendor/github.com/oliveagle/jsonpath/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.5 - - 1.5.1 - - 1.6.2 - -os: linux diff --git a/vendor/github.com/oliveagle/jsonpath/jsonpath.go b/vendor/github.com/oliveagle/jsonpath/jsonpath.go deleted file mode 100644 index 00dc6fde807..00000000000 --- a/vendor/github.com/oliveagle/jsonpath/jsonpath.go +++ /dev/null @@ -1,722 +0,0 @@ -package jsonpath - -import ( - "errors" - "fmt" - "go/token" - "go/types" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ErrGetFromNullObj = errors.New("get attribute from null object") - -func JsonPathLookup(obj interface{}, jpath string) (interface{}, error) { - c, err := Compile(jpath) - if err != nil { - return nil, err - } - return c.Lookup(obj) -} - -type Compiled struct { - path string - steps []step -} - -type step struct { - op string - key string - args interface{} -} - -func MustCompile(jpath string) *Compiled { - c, err := Compile(jpath) - if err != nil { - panic(err) - } - return c -} - -func Compile(jpath string) (*Compiled, error) { - tokens, err := tokenize(jpath) - if err != nil { - return nil, err - } - if tokens[0] != "@" && tokens[0] != "$" { - return nil, fmt.Errorf("$ or @ should in front of path") - } - tokens = tokens[1:] - res := Compiled{ - path: jpath, - steps: make([]step, len(tokens)), - } - for i, token := range tokens { - op, key, args, err := parse_token(token) - if err != nil { - return nil, err - } - res.steps[i] = step{op, key, args} - } - return &res, nil -} - -func (c *Compiled) String() string { - return fmt.Sprintf("Compiled lookup: %s", c.path) -} - -func (c *Compiled) Lookup(obj interface{}) (interface{}, error) { - var err error - for _, s := range c.steps { - // "key", "idx" - switch s.op { - case "key": - obj, err = get_key(obj, s.key) - if err != nil { - return nil, err - } - case "idx": - if len(s.key) > 0 { - // no key `$[0].test` - obj, err = get_key(obj, s.key) - if err != nil { - return nil, err - } - } - - if len(s.args.([]int)) > 1 { - res := []interface{}{} - for _, x := range s.args.([]int) { - //fmt.Println("idx ---- ", x) - tmp, err := get_idx(obj, x) - if err != nil { - return nil, err - } - res = append(res, tmp) - } - obj = res - } else if len(s.args.([]int)) == 1 { - //fmt.Println("idx ----------------3") - obj, err = get_idx(obj, s.args.([]int)[0]) - if err != nil { - return nil, err - } - } else { - //fmt.Println("idx ----------------4") - return nil, fmt.Errorf("cannot index on empty slice") - } - case "range": - if len(s.key) > 0 { - // no key `$[:1].test` - obj, err = get_key(obj, s.key) - if err != nil { - return nil, err - } - } - if argsv, ok := s.args.([2]interface{}); ok == true { - obj, err = get_range(obj, argsv[0], argsv[1]) - if err != nil { - return nil, err - } - } else { - return nil, fmt.Errorf("range args length should be 2") - } - case "filter": - obj, err = get_key(obj, s.key) - if err != nil { - return nil, err - } - obj, err = get_filtered(obj, obj, s.args.(string)) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("expression don't support in filter") - } - } - return obj, nil -} - -func tokenize(query string) ([]string, error) { - tokens := []string{} - // token_start := false - // token_end := false - token := "" - - // fmt.Println("-------------------------------------------------- start") - for idx, x := range query { - token += string(x) - // //fmt.Printf("idx: %d, x: %s, token: %s, tokens: %v\n", idx, string(x), token, tokens) - if idx == 0 { - if token == "$" || token == "@" { - tokens = append(tokens, token[:]) - token = "" - continue - } else { - return nil, fmt.Errorf("should start with '$'") - } - } - if token == "." { - continue - } else if token == ".." { - if tokens[len(tokens)-1] != "*" { - tokens = append(tokens, "*") - } - token = "." - continue - } else { - // fmt.Println("else: ", string(x), token) - if strings.Contains(token, "[") { - // fmt.Println(" contains [ ") - if x == ']' && !strings.HasSuffix(token, "\\]") { - if token[0] == '.' { - tokens = append(tokens, token[1:]) - } else { - tokens = append(tokens, token[:]) - } - token = "" - continue - } - } else { - // fmt.Println(" doesn't contains [ ") - if x == '.' { - if token[0] == '.' { - tokens = append(tokens, token[1:len(token)-1]) - } else { - tokens = append(tokens, token[:len(token)-1]) - } - token = "." - continue - } - } - } - } - if len(token) > 0 { - if token[0] == '.' { - token = token[1:] - if token != "*" { - tokens = append(tokens, token[:]) - } else if tokens[len(tokens)-1] != "*" { - tokens = append(tokens, token[:]) - } - } else { - if token != "*" { - tokens = append(tokens, token[:]) - } else if tokens[len(tokens)-1] != "*" { - tokens = append(tokens, token[:]) - } - } - } - // fmt.Println("finished tokens: ", tokens) - // fmt.Println("================================================= done ") - return tokens, nil -} - -/* - op: "root", "key", "idx", "range", "filter", "scan" -*/ -func parse_token(token string) (op string, key string, args interface{}, err error) { - if token == "$" { - return "root", "$", nil, nil - } - if token == "*" { - return "scan", "*", nil, nil - } - - bracket_idx := strings.Index(token, "[") - if bracket_idx < 0 { - return "key", token, nil, nil - } else { - key = token[:bracket_idx] - tail := token[bracket_idx:] - if len(tail) < 3 { - err = fmt.Errorf("len(tail) should >=3, %v", tail) - return - } - tail = tail[1 : len(tail)-1] - - //fmt.Println(key, tail) - if strings.Contains(tail, "?") { - // filter ------------------------------------------------- - op = "filter" - if strings.HasPrefix(tail, "?(") && strings.HasSuffix(tail, ")") { - args = strings.Trim(tail[2:len(tail)-1], " ") - } - return - } else if strings.Contains(tail, ":") { - // range ---------------------------------------------- - op = "range" - tails := strings.Split(tail, ":") - if len(tails) != 2 { - err = fmt.Errorf("only support one range(from, to): %v", tails) - return - } - var frm interface{} - var to interface{} - if frm, err = strconv.Atoi(strings.Trim(tails[0], " ")); err != nil { - if strings.Trim(tails[0], " ") == "" { - err = nil - } - frm = nil - } - if to, err = strconv.Atoi(strings.Trim(tails[1], " ")); err != nil { - if strings.Trim(tails[1], " ") == "" { - err = nil - } - to = nil - } - args = [2]interface{}{frm, to} - return - } else if tail == "*" { - op = "range" - args = [2]interface{}{nil, nil} - return - } else { - // idx ------------------------------------------------ - op = "idx" - res := []int{} - for _, x := range strings.Split(tail, ",") { - if i, err := strconv.Atoi(strings.Trim(x, " ")); err == nil { - res = append(res, i) - } else { - return "", "", nil, err - } - } - args = res - } - } - return op, key, args, nil -} - -func filter_get_from_explicit_path(obj interface{}, path string) (interface{}, error) { - steps, err := tokenize(path) - //fmt.Println("f: steps: ", steps, err) - //fmt.Println(path, steps) - if err != nil { - return nil, err - } - if steps[0] != "@" && steps[0] != "$" { - return nil, fmt.Errorf("$ or @ should in front of path") - } - steps = steps[1:] - xobj := obj - //fmt.Println("f: xobj", xobj) - for _, s := range steps { - op, key, args, err := parse_token(s) - // "key", "idx" - switch op { - case "key": - xobj, err = get_key(xobj, key) - if err != nil { - return nil, err - } - case "idx": - if len(args.([]int)) != 1 { - return nil, fmt.Errorf("don't support multiple index in filter") - } - xobj, err = get_key(xobj, key) - if err != nil { - return nil, err - } - xobj, err = get_idx(xobj, args.([]int)[0]) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("expression don't support in filter") - } - } - return xobj, nil -} - -func get_key(obj interface{}, key string) (interface{}, error) { - if reflect.TypeOf(obj) == nil { - return nil, ErrGetFromNullObj - } - switch reflect.TypeOf(obj).Kind() { - case reflect.Map: - // if obj came from stdlib json, its highly likely to be a map[string]interface{} - // in which case we can save having to iterate the map keys to work out if the - // key exists - if jsonMap, ok := obj.(map[string]interface{}); ok { - val, exists := jsonMap[key] - if !exists { - return nil, fmt.Errorf("key error: %s not found in object", key) - } - return val, nil - } - for _, kv := range reflect.ValueOf(obj).MapKeys() { - //fmt.Println(kv.String()) - if kv.String() == key { - return reflect.ValueOf(obj).MapIndex(kv).Interface(), nil - } - } - return nil, fmt.Errorf("key error: %s not found in object", key) - case reflect.Slice: - // slice we should get from all objects in it. - res := []interface{}{} - for i := 0; i < reflect.ValueOf(obj).Len(); i++ { - tmp, _ := get_idx(obj, i) - if v, err := get_key(tmp, key); err == nil { - res = append(res, v) - } - } - return res, nil - default: - return nil, fmt.Errorf("object is not map") - } -} - -func get_idx(obj interface{}, idx int) (interface{}, error) { - switch reflect.TypeOf(obj).Kind() { - case reflect.Slice: - length := reflect.ValueOf(obj).Len() - if idx >= 0 { - if idx >= length { - return nil, fmt.Errorf("index out of range: len: %v, idx: %v", length, idx) - } - return reflect.ValueOf(obj).Index(idx).Interface(), nil - } else { - // < 0 - _idx := length + idx - if _idx < 0 { - return nil, fmt.Errorf("index out of range: len: %v, idx: %v", length, idx) - } - return reflect.ValueOf(obj).Index(_idx).Interface(), nil - } - default: - return nil, fmt.Errorf("object is not Slice") - } -} - -func get_range(obj, frm, to interface{}) (interface{}, error) { - switch reflect.TypeOf(obj).Kind() { - case reflect.Slice: - length := reflect.ValueOf(obj).Len() - _frm := 0 - _to := length - if frm == nil { - frm = 0 - } - if to == nil { - to = length - 1 - } - if fv, ok := frm.(int); ok == true { - if fv < 0 { - _frm = length + fv - } else { - _frm = fv - } - } - if tv, ok := to.(int); ok == true { - if tv < 0 { - _to = length + tv + 1 - } else { - _to = tv + 1 - } - } - if _frm < 0 || _frm >= length { - return nil, fmt.Errorf("index [from] out of range: len: %v, from: %v", length, frm) - } - if _to < 0 || _to > length { - return nil, fmt.Errorf("index [to] out of range: len: %v, to: %v", length, to) - } - //fmt.Println("_frm, _to: ", _frm, _to) - res_v := reflect.ValueOf(obj).Slice(_frm, _to) - return res_v.Interface(), nil - default: - return nil, fmt.Errorf("object is not Slice") - } -} - -func regFilterCompile(rule string) (*regexp.Regexp, error) { - runes := []rune(rule) - if len(runes) <= 2 { - return nil, errors.New("empty rule") - } - - if runes[0] != '/' || runes[len(runes)-1] != '/' { - return nil, errors.New("invalid syntax. should be in `/pattern/` form") - } - runes = runes[1 : len(runes)-1] - return regexp.Compile(string(runes)) -} - -func get_filtered(obj, root interface{}, filter string) ([]interface{}, error) { - lp, op, rp, err := parse_filter(filter) - if err != nil { - return nil, err - } - - res := []interface{}{} - - switch reflect.TypeOf(obj).Kind() { - case reflect.Slice: - if op == "=~" { - // regexp - pat, err := regFilterCompile(rp) - if err != nil { - return nil, err - } - - for i := 0; i < reflect.ValueOf(obj).Len(); i++ { - tmp := reflect.ValueOf(obj).Index(i).Interface() - ok, err := eval_reg_filter(tmp, root, lp, pat) - if err != nil { - return nil, err - } - if ok == true { - res = append(res, tmp) - } - } - } else { - for i := 0; i < reflect.ValueOf(obj).Len(); i++ { - tmp := reflect.ValueOf(obj).Index(i).Interface() - ok, err := eval_filter(tmp, root, lp, op, rp) - if err != nil { - return nil, err - } - if ok == true { - res = append(res, tmp) - } - } - } - return res, nil - case reflect.Map: - if op == "=~" { - // regexp - pat, err := regFilterCompile(rp) - if err != nil { - return nil, err - } - - for _, kv := range reflect.ValueOf(obj).MapKeys() { - tmp := reflect.ValueOf(obj).MapIndex(kv).Interface() - ok, err := eval_reg_filter(tmp, root, lp, pat) - if err != nil { - return nil, err - } - if ok == true { - res = append(res, tmp) - } - } - } else { - for _, kv := range reflect.ValueOf(obj).MapKeys() { - tmp := reflect.ValueOf(obj).MapIndex(kv).Interface() - ok, err := eval_filter(tmp, root, lp, op, rp) - if err != nil { - return nil, err - } - if ok == true { - res = append(res, tmp) - } - } - } - default: - return nil, fmt.Errorf("don't support filter on this type: %v", reflect.TypeOf(obj).Kind()) - } - - return res, nil -} - -// @.isbn => @.isbn, exists, nil -// @.price < 10 => @.price, <, 10 -// @.price <= $.expensive => @.price, <=, $.expensive -// @.author =~ /.*REES/i => @.author, match, /.*REES/i - -func parse_filter(filter string) (lp string, op string, rp string, err error) { - tmp := "" - - stage := 0 - str_embrace := false - for idx, c := range filter { - switch c { - case '\'': - if str_embrace == false { - str_embrace = true - } else { - switch stage { - case 0: - lp = tmp - case 1: - op = tmp - case 2: - rp = tmp - } - tmp = "" - } - case ' ': - if str_embrace == true { - tmp += string(c) - continue - } - switch stage { - case 0: - lp = tmp - case 1: - op = tmp - case 2: - rp = tmp - } - tmp = "" - - stage += 1 - if stage > 2 { - return "", "", "", errors.New(fmt.Sprintf("invalid char at %d: `%c`", idx, c)) - } - default: - tmp += string(c) - } - } - if tmp != "" { - switch stage { - case 0: - lp = tmp - op = "exists" - case 1: - op = tmp - case 2: - rp = tmp - } - tmp = "" - } - return lp, op, rp, err -} - -func parse_filter_v1(filter string) (lp string, op string, rp string, err error) { - tmp := "" - istoken := false - for _, c := range filter { - if istoken == false && c != ' ' { - istoken = true - } - if istoken == true && c == ' ' { - istoken = false - } - if istoken == true { - tmp += string(c) - } - if istoken == false && tmp != "" { - if lp == "" { - lp = tmp[:] - tmp = "" - } else if op == "" { - op = tmp[:] - tmp = "" - } else if rp == "" { - rp = tmp[:] - tmp = "" - } - } - } - if tmp != "" && lp == "" && op == "" && rp == "" { - lp = tmp[:] - op = "exists" - rp = "" - err = nil - return - } else if tmp != "" && rp == "" { - rp = tmp[:] - tmp = "" - } - return lp, op, rp, err -} - -func eval_reg_filter(obj, root interface{}, lp string, pat *regexp.Regexp) (res bool, err error) { - if pat == nil { - return false, errors.New("nil pat") - } - lp_v, err := get_lp_v(obj, root, lp) - if err != nil { - return false, err - } - switch v := lp_v.(type) { - case string: - return pat.MatchString(v), nil - default: - return false, errors.New("only string can match with regular expression") - } -} - -func get_lp_v(obj, root interface{}, lp string) (interface{}, error) { - var lp_v interface{} - if strings.HasPrefix(lp, "@.") { - return filter_get_from_explicit_path(obj, lp) - } else if strings.HasPrefix(lp, "$.") { - return filter_get_from_explicit_path(root, lp) - } else { - lp_v = lp - } - return lp_v, nil -} - -func eval_filter(obj, root interface{}, lp, op, rp string) (res bool, err error) { - lp_v, err := get_lp_v(obj, root, lp) - - if op == "exists" { - return lp_v != nil, nil - } else if op == "=~" { - return false, fmt.Errorf("not implemented yet") - } else { - var rp_v interface{} - if strings.HasPrefix(rp, "@.") { - rp_v, err = filter_get_from_explicit_path(obj, rp) - } else if strings.HasPrefix(rp, "$.") { - rp_v, err = filter_get_from_explicit_path(root, rp) - } else { - rp_v = rp - } - //fmt.Printf("lp_v: %v, rp_v: %v\n", lp_v, rp_v) - return cmp_any(lp_v, rp_v, op) - } -} - -func isNumber(o interface{}) bool { - switch v := o.(type) { - case int, int8, int16, int32, int64: - return true - case uint, uint8, uint16, uint32, uint64: - return true - case float32, float64: - return true - case string: - _, err := strconv.ParseFloat(v, 64) - if err == nil { - return true - } else { - return false - } - } - return false -} - -func cmp_any(obj1, obj2 interface{}, op string) (bool, error) { - switch op { - case "<", "<=", "==", ">=", ">": - default: - return false, fmt.Errorf("op should only be <, <=, ==, >= and >") - } - - var exp string - if isNumber(obj1) && isNumber(obj2) { - exp = fmt.Sprintf(`%v %s %v`, obj1, op, obj2) - } else { - exp = fmt.Sprintf(`"%v" %s "%v"`, obj1, op, obj2) - } - //fmt.Println("exp: ", exp) - fset := token.NewFileSet() - res, err := types.Eval(fset, nil, 0, exp) - if err != nil { - return false, err - } - if res.IsValue() == false || (res.Value.String() != "false" && res.Value.String() != "true") { - return false, fmt.Errorf("result should only be true or false") - } - if res.Value.String() == "true" { - return true, nil - } - - return false, nil -} diff --git a/vendor/github.com/oliveagle/jsonpath/readme.md b/vendor/github.com/oliveagle/jsonpath/readme.md deleted file mode 100644 index a8ee2dbfa76..00000000000 --- a/vendor/github.com/oliveagle/jsonpath/readme.md +++ /dev/null @@ -1,114 +0,0 @@ -JsonPath ----------------- - -![Build Status](https://travis-ci.org/oliveagle/jsonpath.svg?branch=master) - -A golang implementation of JsonPath syntax. -follow the majority rules in http://goessner.net/articles/JsonPath/ -but also with some minor differences. - -this library is till bleeding edge, so use it at your own risk. :D - -**Golang Version Required**: 1.5+ - -Get Started ------------- - -```bash -go get github.com/oliveagle/jsonpath -``` - -example code: - -```go -import ( - "github.com/oliveagle/jsonpath" - "encoding/json" -) - -var json_data interface{} -json.Unmarshal([]byte(data), &json_data) - -res, err := jsonpath.JsonPathLookup(json_data, "$.expensive") - -//or reuse lookup pattern -pat, _ := jsonpath.Compile(`$.store.book[?(@.price < $.expensive)].price`) -res, err := pat.Lookup(json_data) -``` - -Operators --------- -referenced from github.com/jayway/JsonPath - -| Operator | Supported | Description | -| ---- | :---: | ---------- | -| $ | Y | The root element to query. This starts all path expressions. | -| @ | Y | The current node being processed by a filter predicate. | -| * | X | Wildcard. Available anywhere a name or numeric are required. | -| .. | X | Deep scan. Available anywhere a name is required. | -| . | Y | Dot-notated child | -| ['' (, '')] | X | Bracket-notated child or children | -| [ (, )] | Y | Array index or indexes | -| [start:end] | Y | Array slice operator | -| [?()] | Y | Filter expression. Expression must evaluate to a boolean value. | - -Examples --------- -given these example data. - -```javascript -{ - "store": { - "book": [ - { - "category": "reference", - "author": "Nigel Rees", - "title": "Sayings of the Century", - "price": 8.95 - }, - { - "category": "fiction", - "author": "Evelyn Waugh", - "title": "Sword of Honour", - "price": 12.99 - }, - { - "category": "fiction", - "author": "Herman Melville", - "title": "Moby Dick", - "isbn": "0-553-21311-3", - "price": 8.99 - }, - { - "category": "fiction", - "author": "J. R. R. Tolkien", - "title": "The Lord of the Rings", - "isbn": "0-395-19395-8", - "price": 22.99 - } - ], - "bicycle": { - "color": "red", - "price": 19.95 - } - }, - "expensive": 10 -} -``` -example json path syntax. ----- - -| jsonpath | result| -| :--------- | :-------| -| $.expensive | 10| -| $.store.book[0].price | 8.95| -| $.store.book[-1].isbn | "0-395-19395-8"| -| $.store.book[0,1].price | [8.95, 12.99] | -| $.store.book[0:2].price | [8.95, 12.99, 8.99]| -| $.store.book[?(@.isbn)].price | [8.99, 22.99] | -| $.store.book[?(@.price > 10)].title | ["Sword of Honour", "The Lord of the Rings"]| -| $.store.book[?(@.price < $.expensive)].price | [8.95, 8.99] | -| $.store.book[:].price | [8.9.5, 12.99, 8.9.9, 22.99] | -| $.store.book[?(@.author =~ /(?i).*REES/)].author | "Nigel Rees" | - -> Note: golang support regular expression flags in form of `(?imsU)pattern` \ No newline at end of file diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore deleted file mode 100644 index ff36b987f07..00000000000 --- a/vendor/github.com/shopspring/decimal/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -.git -*.swp - -# IntelliJ -.idea/ -*.iml - -# VS code -*.code-workspace diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml deleted file mode 100644 index 6326d40f0e9..00000000000 --- a/vendor/github.com/shopspring/decimal/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go - -arch: - - amd64 - - ppc64le - -go: - - 1.7.x - - 1.14.x - - 1.15.x - - 1.16.x - - 1.17.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md deleted file mode 100644 index aea61154b8c..00000000000 --- a/vendor/github.com/shopspring/decimal/CHANGELOG.md +++ /dev/null @@ -1,49 +0,0 @@ -## Decimal v1.3.1 - -#### ENHANCEMENTS -- Reduce memory allocation in case of initialization from big.Int [#252](https://github.com/shopspring/decimal/pull/252) - -#### BUGFIXES -- Fix binary marshalling of decimal zero value [#253](https://github.com/shopspring/decimal/pull/253) - -## Decimal v1.3.0 - -#### FEATURES -- Add NewFromFormattedString initializer [#184](https://github.com/shopspring/decimal/pull/184) -- Add NewNullDecimal initializer [#234](https://github.com/shopspring/decimal/pull/234) -- Add implementation of natural exponent function (Taylor, Hull-Abraham) [#229](https://github.com/shopspring/decimal/pull/229) -- Add RoundUp, RoundDown, RoundCeil, RoundFloor methods [#196](https://github.com/shopspring/decimal/pull/196) [#202](https://github.com/shopspring/decimal/pull/202) [#220](https://github.com/shopspring/decimal/pull/220) -- Add XML support for NullDecimal [#192](https://github.com/shopspring/decimal/pull/192) -- Add IsInteger method [#179](https://github.com/shopspring/decimal/pull/179) -- Add Copy helper method [#123](https://github.com/shopspring/decimal/pull/123) -- Add InexactFloat64 helper method [#205](https://github.com/shopspring/decimal/pull/205) -- Add CoefficientInt64 helper method [#244](https://github.com/shopspring/decimal/pull/244) - -#### ENHANCEMENTS -- Performance optimization of NewFromString init method [#198](https://github.com/shopspring/decimal/pull/198) -- Performance optimization of Abs and Round methods [#240](https://github.com/shopspring/decimal/pull/240) -- Additional tests (CI) for ppc64le architecture [#188](https://github.com/shopspring/decimal/pull/188) - -#### BUGFIXES -- Fix rounding in FormatFloat fallback path (roundShortest method, fix taken from Go main repository) [#161](https://github.com/shopspring/decimal/pull/161) -- Add slice range checks to UnmarshalBinary method [#232](https://github.com/shopspring/decimal/pull/232) - -## Decimal v1.2.0 - -#### BREAKING -- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) - -#### FEATURES -- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) -- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) -- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) - -#### ENHANCEMENTS -- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) -- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) -- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) -- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) - -#### BUGFIXES -- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) -- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE deleted file mode 100644 index ad2148aaf93..00000000000 --- a/vendor/github.com/shopspring/decimal/LICENSE +++ /dev/null @@ -1,45 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Spring, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -- Based on https://github.com/oguzbilgic/fpd, which has the following license: -""" -The MIT License (MIT) - -Copyright (c) 2013 Oguz Bilgic - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -""" diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md deleted file mode 100644 index 2e35df068ea..00000000000 --- a/vendor/github.com/shopspring/decimal/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# decimal - -[![Build Status](https://app.travis-ci.com/shopspring/decimal.svg?branch=master)](https://app.travis-ci.com/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) - -Arbitrary-precision fixed-point decimal numbers in go. - -_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point. - -## Features - - * The zero-value is 0, and is safe to use without initialization - * Addition, subtraction, multiplication with no loss of precision - * Division with specified precision - * Database/sql serialization/deserialization - * JSON and XML serialization/deserialization - -## Install - -Run `go get github.com/shopspring/decimal` - -## Requirements - -Decimal library requires Go version `>=1.7` - -## Usage - -```go -package main - -import ( - "fmt" - "github.com/shopspring/decimal" -) - -func main() { - price, err := decimal.NewFromString("136.02") - if err != nil { - panic(err) - } - - quantity := decimal.NewFromInt(3) - - fee, _ := decimal.NewFromString(".035") - taxRate, _ := decimal.NewFromString(".08875") - - subtotal := price.Mul(quantity) - - preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) - - total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) - - fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06 - fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421 - fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375 - fmt.Println("Total:", total) // Total: 459.824961375 - fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 -} -``` - -## Documentation - -http://godoc.org/github.com/shopspring/decimal - -## Production Usage - -* [Spring](https://shopspring.com/), since August 14, 2014. -* If you are using this in production, please let us know! - -## FAQ - -#### Why don't you just use float64? - -Because float64 (or any binary floating point type, actually) can't represent -numbers such as `0.1` exactly. - -Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that -it prints out `10`, but it actually prints `9.999999999999831`. Over time, -these small errors can really add up! - -#### Why don't you just use big.Rat? - -big.Rat is fine for representing rational numbers, but Decimal is better for -representing money. Why? Here's a (contrived) example: - -Let's say you use big.Rat, and you have two numbers, x and y, both -representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one -out, the string output has to stop somewhere (let's say it stops at 3 decimal -digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did -the other 0.001 go? - -Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE - -With Decimal, the strings being printed out represent the number exactly. So, -if you have `x = y = 1/3` (with precision 3), they will actually be equal to -0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is -unaccounted for! - -You still have to be careful. If you want to split a number `N` 3 ways, you -can't just send `N/3` to three different people. You have to pick one to send -`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. - -But, it is much easier to be careful with Decimal than with big.Rat. - -#### Why isn't the API similar to big.Int's? - -big.Int's API is built to reduce the number of memory allocations for maximal -performance. This makes sense for its use-case, but the trade-off is that the -API is awkward and easy to misuse. - -For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A -developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This -modifies `a` and sets `z` as an alias for `a`, which they might not expect. It -also modifies any other aliases to `a`. - -Here's an example of the subtle bugs you can introduce with big.Int's API: -https://play.golang.org/p/x2R_78pa8r - -In contrast, it's difficult to make such mistakes with decimal. Decimals -behave like other go numbers types: even though `a = b` will not deep copy -`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods -return new Decimals and do not modify the originals. The downside is that -this causes extra allocations, so Decimal is less performant. My assumption -is that if you're using Decimals, you probably care more about correctness -than performance. - -## License - -The MIT License (MIT) - -This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go deleted file mode 100644 index 9958d690206..00000000000 --- a/vendor/github.com/shopspring/decimal/decimal-go.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -package decimal - -type decimal struct { - d [800]byte // digits, big-endian representation - nd int // number of digits used - dp int // decimal point - neg bool // negative flag - trunc bool // discarded nonzero digits beyond d[:nd] -} - -func (a *decimal) String() string { - n := 10 + a.nd - if a.dp > 0 { - n += a.dp - } - if a.dp < 0 { - n += -a.dp - } - - buf := make([]byte, n) - w := 0 - switch { - case a.nd == 0: - return "0" - - case a.dp <= 0: - // zeros fill space between decimal point and digits - buf[w] = '0' - w++ - buf[w] = '.' - w++ - w += digitZero(buf[w : w+-a.dp]) - w += copy(buf[w:], a.d[0:a.nd]) - - case a.dp < a.nd: - // decimal point in middle of digits - w += copy(buf[w:], a.d[0:a.dp]) - buf[w] = '.' - w++ - w += copy(buf[w:], a.d[a.dp:a.nd]) - - default: - // zeros fill space between digits and decimal point - w += copy(buf[w:], a.d[0:a.nd]) - w += digitZero(buf[w : w+a.dp-a.nd]) - } - return string(buf[0:w]) -} - -func digitZero(dst []byte) int { - for i := range dst { - dst[i] = '0' - } - return len(dst) -} - -// trim trailing zeros from number. -// (They are meaningless; the decimal point is tracked -// independent of the number of digits.) -func trim(a *decimal) { - for a.nd > 0 && a.d[a.nd-1] == '0' { - a.nd-- - } - if a.nd == 0 { - a.dp = 0 - } -} - -// Assign v to a. -func (a *decimal) Assign(v uint64) { - var buf [24]byte - - // Write reversed decimal in buf. - n := 0 - for v > 0 { - v1 := v / 10 - v -= 10 * v1 - buf[n] = byte(v + '0') - n++ - v = v1 - } - - // Reverse again to produce forward decimal in a.d. - a.nd = 0 - for n--; n >= 0; n-- { - a.d[a.nd] = buf[n] - a.nd++ - } - a.dp = a.nd - trim(a) -} - -// Maximum shift that we can do in one pass without overflow. -// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) -const maxShift = uintSize - 4 - -// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. -func rightShift(a *decimal, k uint) { - r := 0 // read pointer - w := 0 // write pointer - - // Pick up enough leading digits to cover first shift. - var n uint - for ; n>>k == 0; r++ { - if r >= a.nd { - if n == 0 { - // a == 0; shouldn't get here, but handle anyway. - a.nd = 0 - return - } - for n>>k == 0 { - n = n * 10 - r++ - } - break - } - c := uint(a.d[r]) - n = n*10 + c - '0' - } - a.dp -= r - 1 - - var mask uint = (1 << k) - 1 - - // Pick up a digit, put down a digit. - for ; r < a.nd; r++ { - c := uint(a.d[r]) - dig := n >> k - n &= mask - a.d[w] = byte(dig + '0') - w++ - n = n*10 + c - '0' - } - - // Put down extra digits. - for n > 0 { - dig := n >> k - n &= mask - if w < len(a.d) { - a.d[w] = byte(dig + '0') - w++ - } else if dig > 0 { - a.trunc = true - } - n = n * 10 - } - - a.nd = w - trim(a) -} - -// Cheat sheet for left shift: table indexed by shift count giving -// number of new digits that will be introduced by that shift. -// -// For example, leftcheats[4] = {2, "625"}. That means that -// if we are shifting by 4 (multiplying by 16), it will add 2 digits -// when the string prefix is "625" through "999", and one fewer digit -// if the string prefix is "000" through "624". -// -// Credit for this trick goes to Ken. - -type leftCheat struct { - delta int // number of new digits - cutoff string // minus one digit if original < a. -} - -var leftcheats = []leftCheat{ - // Leading digits of 1/2^i = 5^i. - // 5^23 is not an exact 64-bit floating point number, - // so have to use bc for the math. - // Go up to 60 to be large enough for 32bit and 64bit platforms. - /* - seq 60 | sed 's/^/5^/' | bc | - awk 'BEGIN{ print "\t{ 0, \"\" }," } - { - log2 = log(2)/log(10) - printf("\t{ %d, \"%s\" },\t// * %d\n", - int(log2*NR+1), $0, 2**NR) - }' - */ - {0, ""}, - {1, "5"}, // * 2 - {1, "25"}, // * 4 - {1, "125"}, // * 8 - {2, "625"}, // * 16 - {2, "3125"}, // * 32 - {2, "15625"}, // * 64 - {3, "78125"}, // * 128 - {3, "390625"}, // * 256 - {3, "1953125"}, // * 512 - {4, "9765625"}, // * 1024 - {4, "48828125"}, // * 2048 - {4, "244140625"}, // * 4096 - {4, "1220703125"}, // * 8192 - {5, "6103515625"}, // * 16384 - {5, "30517578125"}, // * 32768 - {5, "152587890625"}, // * 65536 - {6, "762939453125"}, // * 131072 - {6, "3814697265625"}, // * 262144 - {6, "19073486328125"}, // * 524288 - {7, "95367431640625"}, // * 1048576 - {7, "476837158203125"}, // * 2097152 - {7, "2384185791015625"}, // * 4194304 - {7, "11920928955078125"}, // * 8388608 - {8, "59604644775390625"}, // * 16777216 - {8, "298023223876953125"}, // * 33554432 - {8, "1490116119384765625"}, // * 67108864 - {9, "7450580596923828125"}, // * 134217728 - {9, "37252902984619140625"}, // * 268435456 - {9, "186264514923095703125"}, // * 536870912 - {10, "931322574615478515625"}, // * 1073741824 - {10, "4656612873077392578125"}, // * 2147483648 - {10, "23283064365386962890625"}, // * 4294967296 - {10, "116415321826934814453125"}, // * 8589934592 - {11, "582076609134674072265625"}, // * 17179869184 - {11, "2910383045673370361328125"}, // * 34359738368 - {11, "14551915228366851806640625"}, // * 68719476736 - {12, "72759576141834259033203125"}, // * 137438953472 - {12, "363797880709171295166015625"}, // * 274877906944 - {12, "1818989403545856475830078125"}, // * 549755813888 - {13, "9094947017729282379150390625"}, // * 1099511627776 - {13, "45474735088646411895751953125"}, // * 2199023255552 - {13, "227373675443232059478759765625"}, // * 4398046511104 - {13, "1136868377216160297393798828125"}, // * 8796093022208 - {14, "5684341886080801486968994140625"}, // * 17592186044416 - {14, "28421709430404007434844970703125"}, // * 35184372088832 - {14, "142108547152020037174224853515625"}, // * 70368744177664 - {15, "710542735760100185871124267578125"}, // * 140737488355328 - {15, "3552713678800500929355621337890625"}, // * 281474976710656 - {15, "17763568394002504646778106689453125"}, // * 562949953421312 - {16, "88817841970012523233890533447265625"}, // * 1125899906842624 - {16, "444089209850062616169452667236328125"}, // * 2251799813685248 - {16, "2220446049250313080847263336181640625"}, // * 4503599627370496 - {16, "11102230246251565404236316680908203125"}, // * 9007199254740992 - {17, "55511151231257827021181583404541015625"}, // * 18014398509481984 - {17, "277555756156289135105907917022705078125"}, // * 36028797018963968 - {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936 - {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872 - {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744 - {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488 - {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976 -} - -// Is the leading prefix of b lexicographically less than s? -func prefixIsLessThan(b []byte, s string) bool { - for i := 0; i < len(s); i++ { - if i >= len(b) { - return true - } - if b[i] != s[i] { - return b[i] < s[i] - } - } - return false -} - -// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. -func leftShift(a *decimal, k uint) { - delta := leftcheats[k].delta - if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { - delta-- - } - - r := a.nd // read index - w := a.nd + delta // write index - - // Pick up a digit, put down a digit. - var n uint - for r--; r >= 0; r-- { - n += (uint(a.d[r]) - '0') << k - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - // Put down extra digits. - for n > 0 { - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - a.nd += delta - if a.nd >= len(a.d) { - a.nd = len(a.d) - } - a.dp += delta - trim(a) -} - -// Binary shift left (k > 0) or right (k < 0). -func (a *decimal) Shift(k int) { - switch { - case a.nd == 0: - // nothing to do: a == 0 - case k > 0: - for k > maxShift { - leftShift(a, maxShift) - k -= maxShift - } - leftShift(a, uint(k)) - case k < 0: - for k < -maxShift { - rightShift(a, maxShift) - k += maxShift - } - rightShift(a, uint(-k)) - } -} - -// If we chop a at nd digits, should we round up? -func shouldRoundUp(a *decimal, nd int) bool { - if nd < 0 || nd >= a.nd { - return false - } - if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even - // if we truncated, a little higher than what's recorded - always round up - if a.trunc { - return true - } - return nd > 0 && (a.d[nd-1]-'0')%2 != 0 - } - // not halfway - digit tells all - return a.d[nd] >= '5' -} - -// Round a to nd digits (or fewer). -// If nd is zero, it means we're rounding -// just to the left of the digits, as in -// 0.09 -> 0.1. -func (a *decimal) Round(nd int) { - if nd < 0 || nd >= a.nd { - return - } - if shouldRoundUp(a, nd) { - a.RoundUp(nd) - } else { - a.RoundDown(nd) - } -} - -// Round a down to nd digits (or fewer). -func (a *decimal) RoundDown(nd int) { - if nd < 0 || nd >= a.nd { - return - } - a.nd = nd - trim(a) -} - -// Round a up to nd digits (or fewer). -func (a *decimal) RoundUp(nd int) { - if nd < 0 || nd >= a.nd { - return - } - - // round up - for i := nd - 1; i >= 0; i-- { - c := a.d[i] - if c < '9' { // can stop after this digit - a.d[i]++ - a.nd = i + 1 - return - } - } - - // Number is all 9s. - // Change to single 1 with adjusted decimal point. - a.d[0] = '1' - a.nd = 1 - a.dp++ -} - -// Extract integer part, rounded appropriately. -// No guarantees about overflow. -func (a *decimal) RoundedInteger() uint64 { - if a.dp > 20 { - return 0xFFFFFFFFFFFFFFFF - } - var i int - n := uint64(0) - for i = 0; i < a.dp && i < a.nd; i++ { - n = n*10 + uint64(a.d[i]-'0') - } - for ; i < a.dp; i++ { - n *= 10 - } - if shouldRoundUp(a, a.dp) { - n++ - } - return n -} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go deleted file mode 100644 index 84405ec1cf0..00000000000 --- a/vendor/github.com/shopspring/decimal/decimal.go +++ /dev/null @@ -1,1904 +0,0 @@ -// Package decimal implements an arbitrary precision fixed-point decimal. -// -// The zero-value of a Decimal is 0, as you would expect. -// -// The best way to create a new Decimal is to use decimal.NewFromString, ex: -// -// n, err := decimal.NewFromString("-123.4567") -// n.String() // output: "-123.4567" -// -// To use Decimal as part of a struct: -// -// type Struct struct { -// Number Decimal -// } -// -// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. -package decimal - -import ( - "database/sql/driver" - "encoding/binary" - "fmt" - "math" - "math/big" - "regexp" - "strconv" - "strings" -) - -// DivisionPrecision is the number of decimal places in the result when it -// doesn't divide exactly. -// -// Example: -// -// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d1.String() // output: "0.6666666666666667" -// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) -// d2.String() // output: "0.0000666666666667" -// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) -// d3.String() // output: "6666.6666666666666667" -// decimal.DivisionPrecision = 3 -// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) -// d4.String() // output: "0.667" -// -var DivisionPrecision = 16 - -// MarshalJSONWithoutQuotes should be set to true if you want the decimal to -// be JSON marshaled as a number, instead of as a string. -// WARNING: this is dangerous for decimals with many digits, since many JSON -// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 -// double-precision floating point numbers, which means you can potentially -// silently lose precision. -var MarshalJSONWithoutQuotes = false - -// ExpMaxIterations specifies the maximum number of iterations needed to calculate -// precise natural exponent value using ExpHullAbrham method. -var ExpMaxIterations = 1000 - -// Zero constant, to make computations faster. -// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. -var Zero = New(0, 1) - -var zeroInt = big.NewInt(0) -var oneInt = big.NewInt(1) -var twoInt = big.NewInt(2) -var fourInt = big.NewInt(4) -var fiveInt = big.NewInt(5) -var tenInt = big.NewInt(10) -var twentyInt = big.NewInt(20) - -var factorials = []Decimal{New(1, 0)} - -// Decimal represents a fixed-point decimal. It is immutable. -// number = value * 10 ^ exp -type Decimal struct { - value *big.Int - - // NOTE(vadim): this must be an int32, because we cast it to float64 during - // calculations. If exp is 64 bit, we might lose precision. - // If we cared about being able to represent every possible decimal, we - // could make exp a *big.Int but it would hurt performance and numbers - // like that are unrealistic. - exp int32 -} - -// New returns a new fixed-point decimal, value * 10 ^ exp. -func New(value int64, exp int32) Decimal { - return Decimal{ - value: big.NewInt(value), - exp: exp, - } -} - -// NewFromInt converts a int64 to Decimal. -// -// Example: -// -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" -func NewFromInt(value int64) Decimal { - return Decimal{ - value: big.NewInt(value), - exp: 0, - } -} - -// NewFromInt32 converts a int32 to Decimal. -// -// Example: -// -// NewFromInt(123).String() // output: "123" -// NewFromInt(-10).String() // output: "-10" -func NewFromInt32(value int32) Decimal { - return Decimal{ - value: big.NewInt(int64(value)), - exp: 0, - } -} - -// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp -func NewFromBigInt(value *big.Int, exp int32) Decimal { - return Decimal{ - value: new(big.Int).Set(value), - exp: exp, - } -} - -// NewFromString returns a new Decimal from a string representation. -// Trailing zeroes are not trimmed. -// -// Example: -// -// d, err := NewFromString("-123.45") -// d2, err := NewFromString(".0001") -// d3, err := NewFromString("1.47000") -// -func NewFromString(value string) (Decimal, error) { - originalInput := value - var intString string - var exp int64 - - // Check if number is using scientific notation - eIndex := strings.IndexAny(value, "Ee") - if eIndex != -1 { - expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) - if err != nil { - if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) - } - return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) - } - value = value[:eIndex] - exp = expInt - } - - pIndex := -1 - vLen := len(value) - for i := 0; i < vLen; i++ { - if value[i] == '.' { - if pIndex > -1 { - return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) - } - pIndex = i - } - } - - if pIndex == -1 { - // There is no decimal point, we can just parse the original string as - // an int - intString = value - } else { - if pIndex+1 < vLen { - intString = value[:pIndex] + value[pIndex+1:] - } else { - intString = value[:pIndex] - } - expInt := -len(value[pIndex+1:]) - exp += int64(expInt) - } - - var dValue *big.Int - // strconv.ParseInt is faster than new(big.Int).SetString so this is just a shortcut for strings we know won't overflow - if len(intString) <= 18 { - parsed64, err := strconv.ParseInt(intString, 10, 64) - if err != nil { - return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) - } - dValue = big.NewInt(parsed64) - } else { - dValue = new(big.Int) - _, ok := dValue.SetString(intString, 10) - if !ok { - return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) - } - } - - if exp < math.MinInt32 || exp > math.MaxInt32 { - // NOTE(vadim): I doubt a string could realistically be this long - return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) - } - - return Decimal{ - value: dValue, - exp: int32(exp), - }, nil -} - -// NewFromFormattedString returns a new Decimal from a formatted string representation. -// The second argument - replRegexp, is a regular expression that is used to find characters that should be -// removed from given decimal string representation. All matched characters will be replaced with an empty string. -// -// Example: -// -// r := regexp.MustCompile("[$,]") -// d1, err := NewFromFormattedString("$5,125.99", r) -// -// r2 := regexp.MustCompile("[_]") -// d2, err := NewFromFormattedString("1_000_000", r2) -// -// r3 := regexp.MustCompile("[USD\\s]") -// d3, err := NewFromFormattedString("5000 USD", r3) -// -func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) { - parsedValue := replRegexp.ReplaceAllString(value, "") - d, err := NewFromString(parsedValue) - if err != nil { - return Decimal{}, err - } - return d, nil -} - -// RequireFromString returns a new Decimal from a string representation -// or panics if NewFromString would have returned an error. -// -// Example: -// -// d := RequireFromString("-123.45") -// d2 := RequireFromString(".0001") -// -func RequireFromString(value string) Decimal { - dec, err := NewFromString(value) - if err != nil { - panic(err) - } - return dec -} - -// NewFromFloat converts a float64 to Decimal. -// -// The converted number will contain the number of significant digits that can be -// represented in a float with reliable roundtrip. -// This is typically 15 digits, but may be more in some cases. -// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -// -// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -// -// NOTE: this will panic on NaN, +/-inf -func NewFromFloat(value float64) Decimal { - if value == 0 { - return New(0, 0) - } - return newFromFloat(value, math.Float64bits(value), &float64info) -} - -// NewFromFloat32 converts a float32 to Decimal. -// -// The converted number will contain the number of significant digits that can be -// represented in a float with reliable roundtrip. -// This is typically 6-8 digits depending on the input. -// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. -// -// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. -// -// NOTE: this will panic on NaN, +/-inf -func NewFromFloat32(value float32) Decimal { - if value == 0 { - return New(0, 0) - } - // XOR is workaround for https://github.com/golang/go/issues/26285 - a := math.Float32bits(value) ^ 0x80808080 - return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) -} - -func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { - if math.IsNaN(val) || math.IsInf(val, 0) { - panic(fmt.Sprintf("Cannot create a Decimal from %v", val)) - } - exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 - - roundShortest(&d, mant, exp, flt) - // If less than 19 digits, we can do calculation in an int64. - if d.nd < 19 { - tmp := int64(0) - m := int64(1) - for i := d.nd - 1; i >= 0; i-- { - tmp += m * int64(d.d[i]-'0') - m *= 10 - } - if d.neg { - tmp *= -1 - } - return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} - } - dValue := new(big.Int) - dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) - if ok { - return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} - } - - return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) -} - -// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary -// number of fractional digits. -// -// Example: -// -// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" -// -func NewFromFloatWithExponent(value float64, exp int32) Decimal { - if math.IsNaN(value) || math.IsInf(value, 0) { - panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) - } - - bits := math.Float64bits(value) - mant := bits & (1<<52 - 1) - exp2 := int32((bits >> 52) & (1<<11 - 1)) - sign := bits >> 63 - - if exp2 == 0 { - // specials - if mant == 0 { - return Decimal{} - } - // subnormal - exp2++ - } else { - // normal - mant |= 1 << 52 - } - - exp2 -= 1023 + 52 - - // normalizing base-2 values - for mant&1 == 0 { - mant = mant >> 1 - exp2++ - } - - // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 - if exp < 0 && exp < exp2 { - if exp2 < 0 { - exp = exp2 - } else { - exp = 0 - } - } - - // representing 10^M * 2^N as 5^M * 2^(M+N) - exp2 -= exp - - temp := big.NewInt(1) - dMant := big.NewInt(int64(mant)) - - // applying 5^M - if exp > 0 { - temp = temp.SetInt64(int64(exp)) - temp = temp.Exp(fiveInt, temp, nil) - } else if exp < 0 { - temp = temp.SetInt64(-int64(exp)) - temp = temp.Exp(fiveInt, temp, nil) - dMant = dMant.Mul(dMant, temp) - temp = temp.SetUint64(1) - } - - // applying 2^(M+N) - if exp2 > 0 { - dMant = dMant.Lsh(dMant, uint(exp2)) - } else if exp2 < 0 { - temp = temp.Lsh(temp, uint(-exp2)) - } - - // rounding and downscaling - if exp > 0 || exp2 < 0 { - halfDown := new(big.Int).Rsh(temp, 1) - dMant = dMant.Add(dMant, halfDown) - dMant = dMant.Quo(dMant, temp) - } - - if sign == 1 { - dMant = dMant.Neg(dMant) - } - - return Decimal{ - value: dMant, - exp: exp, - } -} - -// Copy returns a copy of decimal with the same value and exponent, but a different pointer to value. -func (d Decimal) Copy() Decimal { - d.ensureInitialized() - return Decimal{ - value: &(*d.value), - exp: d.exp, - } -} - -// rescale returns a rescaled version of the decimal. Returned -// decimal may be less precise if the given exponent is bigger -// than the initial exponent of the Decimal. -// NOTE: this will truncate, NOT round -// -// Example: -// -// d := New(12345, -4) -// d2 := d.rescale(-1) -// d3 := d2.rescale(-4) -// println(d1) -// println(d2) -// println(d3) -// -// Output: -// -// 1.2345 -// 1.2 -// 1.2000 -// -func (d Decimal) rescale(exp int32) Decimal { - d.ensureInitialized() - - if d.exp == exp { - return Decimal{ - new(big.Int).Set(d.value), - d.exp, - } - } - - // NOTE(vadim): must convert exps to float64 before - to prevent overflow - diff := math.Abs(float64(exp) - float64(d.exp)) - value := new(big.Int).Set(d.value) - - expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) - if exp > d.exp { - value = value.Quo(value, expScale) - } else if exp < d.exp { - value = value.Mul(value, expScale) - } - - return Decimal{ - value: value, - exp: exp, - } -} - -// Abs returns the absolute value of the decimal. -func (d Decimal) Abs() Decimal { - if !d.IsNegative() { - return d - } - d.ensureInitialized() - d2Value := new(big.Int).Abs(d.value) - return Decimal{ - value: d2Value, - exp: d.exp, - } -} - -// Add returns d + d2. -func (d Decimal) Add(d2 Decimal) Decimal { - rd, rd2 := RescalePair(d, d2) - - d3Value := new(big.Int).Add(rd.value, rd2.value) - return Decimal{ - value: d3Value, - exp: rd.exp, - } -} - -// Sub returns d - d2. -func (d Decimal) Sub(d2 Decimal) Decimal { - rd, rd2 := RescalePair(d, d2) - - d3Value := new(big.Int).Sub(rd.value, rd2.value) - return Decimal{ - value: d3Value, - exp: rd.exp, - } -} - -// Neg returns -d. -func (d Decimal) Neg() Decimal { - d.ensureInitialized() - val := new(big.Int).Neg(d.value) - return Decimal{ - value: val, - exp: d.exp, - } -} - -// Mul returns d * d2. -func (d Decimal) Mul(d2 Decimal) Decimal { - d.ensureInitialized() - d2.ensureInitialized() - - expInt64 := int64(d.exp) + int64(d2.exp) - if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { - // NOTE(vadim): better to panic than give incorrect results, as - // Decimals are usually used for money - panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64)) - } - - d3Value := new(big.Int).Mul(d.value, d2.value) - return Decimal{ - value: d3Value, - exp: int32(expInt64), - } -} - -// Shift shifts the decimal in base 10. -// It shifts left when shift is positive and right if shift is negative. -// In simpler terms, the given value for shift is added to the exponent -// of the decimal. -func (d Decimal) Shift(shift int32) Decimal { - d.ensureInitialized() - return Decimal{ - value: new(big.Int).Set(d.value), - exp: d.exp + shift, - } -} - -// Div returns d / d2. If it doesn't divide exactly, the result will have -// DivisionPrecision digits after the decimal point. -func (d Decimal) Div(d2 Decimal) Decimal { - return d.DivRound(d2, int32(DivisionPrecision)) -} - -// QuoRem does divsion with remainder -// d.QuoRem(d2,precision) returns quotient q and remainder r such that -// d = d2 * q + r, q an integer multiple of 10^(-precision) -// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 -// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 -// Note that precision<0 is allowed as input. -func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { - d.ensureInitialized() - d2.ensureInitialized() - if d2.value.Sign() == 0 { - panic("decimal division by 0") - } - scale := -precision - e := int64(d.exp - d2.exp - scale) - if e > math.MaxInt32 || e < math.MinInt32 { - panic("overflow in decimal QuoRem") - } - var aa, bb, expo big.Int - var scalerest int32 - // d = a 10^ea - // d2 = b 10^eb - if e < 0 { - aa = *d.value - expo.SetInt64(-e) - bb.Exp(tenInt, &expo, nil) - bb.Mul(d2.value, &bb) - scalerest = d.exp - // now aa = a - // bb = b 10^(scale + eb - ea) - } else { - expo.SetInt64(e) - aa.Exp(tenInt, &expo, nil) - aa.Mul(d.value, &aa) - bb = *d2.value - scalerest = scale + d2.exp - // now aa = a ^ (ea - eb - scale) - // bb = b - } - var q, r big.Int - q.QuoRem(&aa, &bb, &r) - dq := Decimal{value: &q, exp: scale} - dr := Decimal{value: &r, exp: scalerest} - return dq, dr -} - -// DivRound divides and rounds to a given precision -// i.e. to an integer multiple of 10^(-precision) -// for a positive quotient digit 5 is rounded up, away from 0 -// if the quotient is negative then digit 5 is rounded down, away from 0 -// Note that precision<0 is allowed as input. -func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { - // QuoRem already checks initialization - q, r := d.QuoRem(d2, precision) - // the actual rounding decision is based on comparing r*10^precision and d2/2 - // instead compare 2 r 10 ^precision and d2 - var rv2 big.Int - rv2.Abs(r.value) - rv2.Lsh(&rv2, 1) - // now rv2 = abs(r.value) * 2 - r2 := Decimal{value: &rv2, exp: r.exp + precision} - // r2 is now 2 * r * 10 ^ precision - var c = r2.Cmp(d2.Abs()) - - if c < 0 { - return q - } - - if d.value.Sign()*d2.value.Sign() < 0 { - return q.Sub(New(1, -precision)) - } - - return q.Add(New(1, -precision)) -} - -// Mod returns d % d2. -func (d Decimal) Mod(d2 Decimal) Decimal { - quo := d.Div(d2).Truncate(0) - return d.Sub(d2.Mul(quo)) -} - -// Pow returns d to the power d2 -func (d Decimal) Pow(d2 Decimal) Decimal { - var temp Decimal - if d2.IntPart() == 0 { - return NewFromFloat(1) - } - temp = d.Pow(d2.Div(NewFromFloat(2))) - if d2.IntPart()%2 == 0 { - return temp.Mul(temp) - } - if d2.IntPart() > 0 { - return temp.Mul(temp).Mul(d) - } - return temp.Mul(temp).Div(d) -} - -// ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm. -// OverallPrecision argument specifies the overall precision of the result (integer part + decimal part). -// -// ExpHullAbrham is faster than ExpTaylor for small precision values, but it is much slower for large precision values. -// -// Example: -// -// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" -// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" -// -func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { - // Algorithm based on Variable precision exponential function. - // ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham. - if d.IsZero() { - return Decimal{oneInt, 0}, nil - } - - currentPrecision := overallPrecision - - // Algorithm does not work if currentPrecision * 23 < |x|. - // Precision is automatically increased in such cases, so the value can be calculated precisely. - // If newly calculated precision is higher than ExpMaxIterations the currentPrecision will not be changed. - f := d.Abs().InexactFloat64() - if ncp := f / 23; ncp > float64(currentPrecision) && ncp < float64(ExpMaxIterations) { - currentPrecision = uint32(math.Ceil(ncp)) - } - - // fail if abs(d) beyond an over/underflow threshold - overflowThreshold := New(23*int64(currentPrecision), 0) - if d.Abs().Cmp(overflowThreshold) > 0 { - return Decimal{}, fmt.Errorf("over/underflow threshold, exp(x) cannot be calculated precisely") - } - - // Return 1 if abs(d) small enough; this also avoids later over/underflow - overflowThreshold2 := New(9, -int32(currentPrecision)-1) - if d.Abs().Cmp(overflowThreshold2) <= 0 { - return Decimal{oneInt, d.exp}, nil - } - - // t is the smallest integer >= 0 such that the corresponding abs(d/k) < 1 - t := d.exp + int32(d.NumDigits()) // Add d.NumDigits because the paper assumes that d.value [0.1, 1) - - if t < 0 { - t = 0 - } - - k := New(1, t) // reduction factor - r := Decimal{new(big.Int).Set(d.value), d.exp - t} // reduced argument - p := int32(currentPrecision) + t + 2 // precision for calculating the sum - - // Determine n, the number of therms for calculating sum - // use first Newton step (1.435p - 1.182) / log10(p/abs(r)) - // for solving appropriate equation, along with directed - // roundings and simple rational bound for log10(p/abs(r)) - rf := r.Abs().InexactFloat64() - pf := float64(p) - nf := math.Ceil((1.453*pf - 1.182) / math.Log10(pf/rf)) - if nf > float64(ExpMaxIterations) || math.IsNaN(nf) { - return Decimal{}, fmt.Errorf("exact value cannot be calculated in <=ExpMaxIterations iterations") - } - n := int64(nf) - - tmp := New(0, 0) - sum := New(1, 0) - one := New(1, 0) - for i := n - 1; i > 0; i-- { - tmp.value.SetInt64(i) - sum = sum.Mul(r.DivRound(tmp, p)) - sum = sum.Add(one) - } - - ki := k.IntPart() - res := New(1, 0) - for i := ki; i > 0; i-- { - res = res.Mul(sum) - } - - resNumDigits := int32(res.NumDigits()) - - var roundDigits int32 - if resNumDigits > abs(res.exp) { - roundDigits = int32(currentPrecision) - resNumDigits - res.exp - } else { - roundDigits = int32(currentPrecision) - } - - res = res.Round(roundDigits) - - return res, nil -} - -// ExpTaylor calculates the natural exponent of decimal (e to the power of d) using Taylor series expansion. -// Precision argument specifies how precise the result must be (number of digits after decimal point). -// Negative precision is allowed. -// -// ExpTaylor is much faster for large precision values than ExpHullAbrham. -// -// Example: -// -// d, err := NewFromFloat(26.1).ExpTaylor(2).String() -// d.String() // output: "216314672147.06" -// -// NewFromFloat(26.1).ExpTaylor(20).String() -// d.String() // output: "216314672147.05767284062928674083" -// -// NewFromFloat(26.1).ExpTaylor(-10).String() -// d.String() // output: "220000000000" -// -func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { - // Note(mwoss): Implementation can be optimized by exclusively using big.Int API only - if d.IsZero() { - return Decimal{oneInt, 0}.Round(precision), nil - } - - var epsilon Decimal - var divPrecision int32 - if precision < 0 { - epsilon = New(1, -1) - divPrecision = 8 - } else { - epsilon = New(1, -precision-1) - divPrecision = precision + 1 - } - - decAbs := d.Abs() - pow := d.Abs() - factorial := New(1, 0) - - result := New(1, 0) - - for i := int64(1); ; { - step := pow.DivRound(factorial, divPrecision) - result = result.Add(step) - - // Stop Taylor series when current step is smaller than epsilon - if step.Cmp(epsilon) < 0 { - break - } - - pow = pow.Mul(decAbs) - - i++ - - // Calculate next factorial number or retrieve cached value - if len(factorials) >= int(i) && !factorials[i-1].IsZero() { - factorial = factorials[i-1] - } else { - // To avoid any race conditions, firstly the zero value is appended to a slice to create - // a spot for newly calculated factorial. After that, the zero value is replaced by calculated - // factorial using the index notation. - factorial = factorials[i-2].Mul(New(i, 0)) - factorials = append(factorials, Zero) - factorials[i-1] = factorial - } - } - - if d.Sign() < 0 { - result = New(1, 0).DivRound(result, precision+1) - } - - result = result.Round(precision) - return result, nil -} - -// NumDigits returns the number of digits of the decimal coefficient (d.Value) -// Note: Current implementation is extremely slow for large decimals and/or decimals with large fractional part -func (d Decimal) NumDigits() int { - // Note(mwoss): It can be optimized, unnecessary cast of big.Int to string - if d.IsNegative() { - return len(d.value.String()) - 1 - } - return len(d.value.String()) -} - -// IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false. -func (d Decimal) IsInteger() bool { - // The most typical case, all decimal with exponent higher or equal 0 can be represented as integer - if d.exp >= 0 { - return true - } - // When the exponent is negative we have to check every number after the decimal place - // If all of them are zeroes, we are sure that given decimal can be represented as an integer - var r big.Int - q := new(big.Int).Set(d.value) - for z := abs(d.exp); z > 0; z-- { - q.QuoRem(q, tenInt, &r) - if r.Cmp(zeroInt) != 0 { - return false - } - } - return true -} - -// Abs calculates absolute value of any int32. Used for calculating absolute value of decimal's exponent. -func abs(n int32) int32 { - if n < 0 { - return -n - } - return n -} - -// Cmp compares the numbers represented by d and d2 and returns: -// -// -1 if d < d2 -// 0 if d == d2 -// +1 if d > d2 -// -func (d Decimal) Cmp(d2 Decimal) int { - d.ensureInitialized() - d2.ensureInitialized() - - if d.exp == d2.exp { - return d.value.Cmp(d2.value) - } - - rd, rd2 := RescalePair(d, d2) - - return rd.value.Cmp(rd2.value) -} - -// Equal returns whether the numbers represented by d and d2 are equal. -func (d Decimal) Equal(d2 Decimal) bool { - return d.Cmp(d2) == 0 -} - -// Equals is deprecated, please use Equal method instead -func (d Decimal) Equals(d2 Decimal) bool { - return d.Equal(d2) -} - -// GreaterThan (GT) returns true when d is greater than d2. -func (d Decimal) GreaterThan(d2 Decimal) bool { - return d.Cmp(d2) == 1 -} - -// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. -func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { - cmp := d.Cmp(d2) - return cmp == 1 || cmp == 0 -} - -// LessThan (LT) returns true when d is less than d2. -func (d Decimal) LessThan(d2 Decimal) bool { - return d.Cmp(d2) == -1 -} - -// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. -func (d Decimal) LessThanOrEqual(d2 Decimal) bool { - cmp := d.Cmp(d2) - return cmp == -1 || cmp == 0 -} - -// Sign returns: -// -// -1 if d < 0 -// 0 if d == 0 -// +1 if d > 0 -// -func (d Decimal) Sign() int { - if d.value == nil { - return 0 - } - return d.value.Sign() -} - -// IsPositive return -// -// true if d > 0 -// false if d == 0 -// false if d < 0 -func (d Decimal) IsPositive() bool { - return d.Sign() == 1 -} - -// IsNegative return -// -// true if d < 0 -// false if d == 0 -// false if d > 0 -func (d Decimal) IsNegative() bool { - return d.Sign() == -1 -} - -// IsZero return -// -// true if d == 0 -// false if d > 0 -// false if d < 0 -func (d Decimal) IsZero() bool { - return d.Sign() == 0 -} - -// Exponent returns the exponent, or scale component of the decimal. -func (d Decimal) Exponent() int32 { - return d.exp -} - -// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() -func (d Decimal) Coefficient() *big.Int { - d.ensureInitialized() - // we copy the coefficient so that mutating the result does not mutate the Decimal. - return new(big.Int).Set(d.value) -} - -// CoefficientInt64 returns the coefficient of the decimal as int64. It is scaled by 10^Exponent() -// If coefficient cannot be represented in an int64, the result will be undefined. -func (d Decimal) CoefficientInt64() int64 { - d.ensureInitialized() - return d.value.Int64() -} - -// IntPart returns the integer component of the decimal. -func (d Decimal) IntPart() int64 { - scaledD := d.rescale(0) - return scaledD.value.Int64() -} - -// BigInt returns integer component of the decimal as a BigInt. -func (d Decimal) BigInt() *big.Int { - scaledD := d.rescale(0) - i := &big.Int{} - i.SetString(scaledD.String(), 10) - return i -} - -// BigFloat returns decimal as BigFloat. -// Be aware that casting decimal to BigFloat might cause a loss of precision. -func (d Decimal) BigFloat() *big.Float { - f := &big.Float{} - f.SetString(d.String()) - return f -} - -// Rat returns a rational number representation of the decimal. -func (d Decimal) Rat() *big.Rat { - d.ensureInitialized() - if d.exp <= 0 { - // NOTE(vadim): must negate after casting to prevent int32 overflow - denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) - return new(big.Rat).SetFrac(d.value, denom) - } - - mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) - num := new(big.Int).Mul(d.value, mul) - return new(big.Rat).SetFrac(num, oneInt) -} - -// Float64 returns the nearest float64 value for d and a bool indicating -// whether f represents d exactly. -// For more details, see the documentation for big.Rat.Float64 -func (d Decimal) Float64() (f float64, exact bool) { - return d.Rat().Float64() -} - -// InexactFloat64 returns the nearest float64 value for d. -// It doesn't indicate if the returned value represents d exactly. -func (d Decimal) InexactFloat64() float64 { - f, _ := d.Float64() - return f -} - -// String returns the string representation of the decimal -// with the fixed point. -// -// Example: -// -// d := New(-12345, -3) -// println(d.String()) -// -// Output: -// -// -12.345 -// -func (d Decimal) String() string { - return d.string(true) -} - -// StringFixed returns a rounded fixed-point string with places digits after -// the decimal point. -// -// Example: -// -// NewFromFloat(0).StringFixed(2) // output: "0.00" -// NewFromFloat(0).StringFixed(0) // output: "0" -// NewFromFloat(5.45).StringFixed(0) // output: "5" -// NewFromFloat(5.45).StringFixed(1) // output: "5.5" -// NewFromFloat(5.45).StringFixed(2) // output: "5.45" -// NewFromFloat(5.45).StringFixed(3) // output: "5.450" -// NewFromFloat(545).StringFixed(-1) // output: "550" -// -func (d Decimal) StringFixed(places int32) string { - rounded := d.Round(places) - return rounded.string(false) -} - -// StringFixedBank returns a banker rounded fixed-point string with places digits -// after the decimal point. -// -// Example: -// -// NewFromFloat(0).StringFixedBank(2) // output: "0.00" -// NewFromFloat(0).StringFixedBank(0) // output: "0" -// NewFromFloat(5.45).StringFixedBank(0) // output: "5" -// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" -// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" -// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" -// NewFromFloat(545).StringFixedBank(-1) // output: "540" -// -func (d Decimal) StringFixedBank(places int32) string { - rounded := d.RoundBank(places) - return rounded.string(false) -} - -// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For -// more details see the documentation at function RoundCash. -func (d Decimal) StringFixedCash(interval uint8) string { - rounded := d.RoundCash(interval) - return rounded.string(false) -} - -// Round rounds the decimal to places decimal places. -// If places < 0, it will round the integer part to the nearest 10^(-places). -// -// Example: -// -// NewFromFloat(5.45).Round(1).String() // output: "5.5" -// NewFromFloat(545).Round(-1).String() // output: "550" -// -func (d Decimal) Round(places int32) Decimal { - if d.exp == -places { - return d - } - // truncate to places + 1 - ret := d.rescale(-places - 1) - - // add sign(d) * 0.5 - if ret.value.Sign() < 0 { - ret.value.Sub(ret.value, fiveInt) - } else { - ret.value.Add(ret.value, fiveInt) - } - - // floor for positive numbers, ceil for negative numbers - _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) - ret.exp++ - if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { - ret.value.Add(ret.value, oneInt) - } - - return ret -} - -// RoundCeil rounds the decimal towards +infinity. -// -// Example: -// -// NewFromFloat(545).RoundCeil(-2).String() // output: "600" -// NewFromFloat(500).RoundCeil(-2).String() // output: "500" -// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" -// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.5" -// -func (d Decimal) RoundCeil(places int32) Decimal { - if d.exp >= -places { - return d - } - - rescaled := d.rescale(-places) - if d.Equal(rescaled) { - return d - } - - if d.value.Sign() > 0 { - rescaled.value.Add(rescaled.value, oneInt) - } - - return rescaled -} - -// RoundFloor rounds the decimal towards -infinity. -// -// Example: -// -// NewFromFloat(545).RoundFloor(-2).String() // output: "500" -// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" -// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" -// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.4" -// -func (d Decimal) RoundFloor(places int32) Decimal { - if d.exp >= -places { - return d - } - - rescaled := d.rescale(-places) - if d.Equal(rescaled) { - return d - } - - if d.value.Sign() < 0 { - rescaled.value.Sub(rescaled.value, oneInt) - } - - return rescaled -} - -// RoundUp rounds the decimal away from zero. -// -// Example: -// -// NewFromFloat(545).RoundUp(-2).String() // output: "600" -// NewFromFloat(500).RoundUp(-2).String() // output: "500" -// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" -// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.4" -// -func (d Decimal) RoundUp(places int32) Decimal { - if d.exp >= -places { - return d - } - - rescaled := d.rescale(-places) - if d.Equal(rescaled) { - return d - } - - if d.value.Sign() > 0 { - rescaled.value.Add(rescaled.value, oneInt) - } else if d.value.Sign() < 0 { - rescaled.value.Sub(rescaled.value, oneInt) - } - - return rescaled -} - -// RoundDown rounds the decimal towards zero. -// -// Example: -// -// NewFromFloat(545).RoundDown(-2).String() // output: "500" -// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" -// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" -// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.5" -// -func (d Decimal) RoundDown(places int32) Decimal { - if d.exp >= -places { - return d - } - - rescaled := d.rescale(-places) - if d.Equal(rescaled) { - return d - } - return rescaled -} - -// RoundBank rounds the decimal to places decimal places. -// If the final digit to round is equidistant from the nearest two integers the -// rounded value is taken as the even number -// -// If places < 0, it will round the integer part to the nearest 10^(-places). -// -// Examples: -// -// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" -// NewFromFloat(545).RoundBank(-1).String() // output: "540" -// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" -// NewFromFloat(546).RoundBank(-1).String() // output: "550" -// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" -// NewFromFloat(555).RoundBank(-1).String() // output: "560" -// -func (d Decimal) RoundBank(places int32) Decimal { - - round := d.Round(places) - remainder := d.Sub(round).Abs() - - half := New(5, -places-1) - if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { - if round.value.Sign() < 0 { - round.value.Add(round.value, oneInt) - } else { - round.value.Sub(round.value, oneInt) - } - } - - return round -} - -// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific -// interval. The amount payable for a cash transaction is rounded to the nearest -// multiple of the minimum currency unit available. The following intervals are -// available: 5, 10, 25, 50 and 100; any other number throws a panic. -// 5: 5 cent rounding 3.43 => 3.45 -// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) -// 25: 25 cent rounding 3.41 => 3.50 -// 50: 50 cent rounding 3.75 => 4.00 -// 100: 100 cent rounding 3.50 => 4.00 -// For more details: https://en.wikipedia.org/wiki/Cash_rounding -func (d Decimal) RoundCash(interval uint8) Decimal { - var iVal *big.Int - switch interval { - case 5: - iVal = twentyInt - case 10: - iVal = tenInt - case 25: - iVal = fourInt - case 50: - iVal = twoInt - case 100: - iVal = oneInt - default: - panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval)) - } - dVal := Decimal{ - value: iVal, - } - - // TODO: optimize those calculations to reduce the high allocations (~29 allocs). - return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) -} - -// Floor returns the nearest integer value less than or equal to d. -func (d Decimal) Floor() Decimal { - d.ensureInitialized() - - if d.exp >= 0 { - return d - } - - exp := big.NewInt(10) - - // NOTE(vadim): must negate after casting to prevent int32 overflow - exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) - - z := new(big.Int).Div(d.value, exp) - return Decimal{value: z, exp: 0} -} - -// Ceil returns the nearest integer value greater than or equal to d. -func (d Decimal) Ceil() Decimal { - d.ensureInitialized() - - if d.exp >= 0 { - return d - } - - exp := big.NewInt(10) - - // NOTE(vadim): must negate after casting to prevent int32 overflow - exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) - - z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) - if m.Cmp(zeroInt) != 0 { - z.Add(z, oneInt) - } - return Decimal{value: z, exp: 0} -} - -// Truncate truncates off digits from the number, without rounding. -// -// NOTE: precision is the last digit that will not be truncated (must be >= 0). -// -// Example: -// -// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" -// -func (d Decimal) Truncate(precision int32) Decimal { - d.ensureInitialized() - if precision >= 0 && -precision > d.exp { - return d.rescale(-precision) - } - return d -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { - if string(decimalBytes) == "null" { - return nil - } - - str, err := unquoteIfQuoted(decimalBytes) - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err) - } - - decimal, err := NewFromString(str) - *d = decimal - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", str, err) - } - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Decimal) MarshalJSON() ([]byte, error) { - var str string - if MarshalJSONWithoutQuotes { - str = d.String() - } else { - str = "\"" + d.String() + "\"" - } - return []byte(str), nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation -// is already used when encoding to text, this method stores that string as []byte -func (d *Decimal) UnmarshalBinary(data []byte) error { - // Verify we have at least 4 bytes for the exponent. The GOB encoded value - // may be empty. - if len(data) < 4 { - return fmt.Errorf("error decoding binary %v: expected at least 4 bytes, got %d", data, len(data)) - } - - // Extract the exponent - d.exp = int32(binary.BigEndian.Uint32(data[:4])) - - // Extract the value - d.value = new(big.Int) - if err := d.value.GobDecode(data[4:]); err != nil { - return fmt.Errorf("error decoding binary %v: %s", data, err) - } - - return nil -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d Decimal) MarshalBinary() (data []byte, err error) { - // Write the exponent first since it's a fixed size - v1 := make([]byte, 4) - binary.BigEndian.PutUint32(v1, uint32(d.exp)) - - // Add the value - var v2 []byte - if v2, err = d.value.GobEncode(); err != nil { - return - } - - // Return the byte array - data = append(v1, v2...) - return -} - -// Scan implements the sql.Scanner interface for database deserialization. -func (d *Decimal) Scan(value interface{}) error { - // first try to see if the data is stored in database as a Numeric datatype - switch v := value.(type) { - - case float32: - *d = NewFromFloat(float64(v)) - return nil - - case float64: - // numeric in sqlite3 sends us float64 - *d = NewFromFloat(v) - return nil - - case int64: - // at least in sqlite3 when the value is 0 in db, the data is sent - // to us as an int64 instead of a float64 ... - *d = New(v, 0) - return nil - - default: - // default is trying to interpret value stored as string - str, err := unquoteIfQuoted(v) - if err != nil { - return err - } - *d, err = NewFromString(str) - return err - } -} - -// Value implements the driver.Valuer interface for database serialization. -func (d Decimal) Value() (driver.Value, error) { - return d.String(), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for XML -// deserialization. -func (d *Decimal) UnmarshalText(text []byte) error { - str := string(text) - - dec, err := NewFromString(str) - *d = dec - if err != nil { - return fmt.Errorf("error decoding string '%s': %s", str, err) - } - - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface for XML -// serialization. -func (d Decimal) MarshalText() (text []byte, err error) { - return []byte(d.String()), nil -} - -// GobEncode implements the gob.GobEncoder interface for gob serialization. -func (d Decimal) GobEncode() ([]byte, error) { - return d.MarshalBinary() -} - -// GobDecode implements the gob.GobDecoder interface for gob serialization. -func (d *Decimal) GobDecode(data []byte) error { - return d.UnmarshalBinary(data) -} - -// StringScaled first scales the decimal then calls .String() on it. -// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. -func (d Decimal) StringScaled(exp int32) string { - return d.rescale(exp).String() -} - -func (d Decimal) string(trimTrailingZeros bool) string { - if d.exp >= 0 { - return d.rescale(0).value.String() - } - - abs := new(big.Int).Abs(d.value) - str := abs.String() - - var intPart, fractionalPart string - - // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN - // and you are on a 32-bit machine. Won't fix this super-edge case. - dExpInt := int(d.exp) - if len(str) > -dExpInt { - intPart = str[:len(str)+dExpInt] - fractionalPart = str[len(str)+dExpInt:] - } else { - intPart = "0" - - num0s := -dExpInt - len(str) - fractionalPart = strings.Repeat("0", num0s) + str - } - - if trimTrailingZeros { - i := len(fractionalPart) - 1 - for ; i >= 0; i-- { - if fractionalPart[i] != '0' { - break - } - } - fractionalPart = fractionalPart[:i+1] - } - - number := intPart - if len(fractionalPart) > 0 { - number += "." + fractionalPart - } - - if d.value.Sign() < 0 { - return "-" + number - } - - return number -} - -func (d *Decimal) ensureInitialized() { - if d.value == nil { - d.value = new(big.Int) - } -} - -// Min returns the smallest Decimal that was passed in the arguments. -// -// To call this function with an array, you must do: -// -// Min(arr[0], arr[1:]...) -// -// This makes it harder to accidentally call Min with 0 arguments. -func Min(first Decimal, rest ...Decimal) Decimal { - ans := first - for _, item := range rest { - if item.Cmp(ans) < 0 { - ans = item - } - } - return ans -} - -// Max returns the largest Decimal that was passed in the arguments. -// -// To call this function with an array, you must do: -// -// Max(arr[0], arr[1:]...) -// -// This makes it harder to accidentally call Max with 0 arguments. -func Max(first Decimal, rest ...Decimal) Decimal { - ans := first - for _, item := range rest { - if item.Cmp(ans) > 0 { - ans = item - } - } - return ans -} - -// Sum returns the combined total of the provided first and rest Decimals -func Sum(first Decimal, rest ...Decimal) Decimal { - total := first - for _, item := range rest { - total = total.Add(item) - } - - return total -} - -// Avg returns the average value of the provided first and rest Decimals -func Avg(first Decimal, rest ...Decimal) Decimal { - count := New(int64(len(rest)+1), 0) - sum := Sum(first, rest...) - return sum.Div(count) -} - -// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) -func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { - d1.ensureInitialized() - d2.ensureInitialized() - - if d1.exp == d2.exp { - return d1, d2 - } - - baseScale := min(d1.exp, d2.exp) - if baseScale != d1.exp { - return d1.rescale(baseScale), d2 - } - return d1, d2.rescale(baseScale) -} - -func min(x, y int32) int32 { - if x >= y { - return y - } - return x -} - -func unquoteIfQuoted(value interface{}) (string, error) { - var bytes []byte - - switch v := value.(type) { - case string: - bytes = []byte(v) - case []byte: - bytes = v - default: - return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", - value, value) - } - - // If the amount is quoted, strip the quotes - if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' { - bytes = bytes[1 : len(bytes)-1] - } - return string(bytes), nil -} - -// NullDecimal represents a nullable decimal with compatibility for -// scanning null values from the database. -type NullDecimal struct { - Decimal Decimal - Valid bool -} - -func NewNullDecimal(d Decimal) NullDecimal { - return NullDecimal{ - Decimal: d, - Valid: true, - } -} - -// Scan implements the sql.Scanner interface for database deserialization. -func (d *NullDecimal) Scan(value interface{}) error { - if value == nil { - d.Valid = false - return nil - } - d.Valid = true - return d.Decimal.Scan(value) -} - -// Value implements the driver.Valuer interface for database serialization. -func (d NullDecimal) Value() (driver.Value, error) { - if !d.Valid { - return nil, nil - } - return d.Decimal.Value() -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { - if string(decimalBytes) == "null" { - d.Valid = false - return nil - } - d.Valid = true - return d.Decimal.UnmarshalJSON(decimalBytes) -} - -// MarshalJSON implements the json.Marshaler interface. -func (d NullDecimal) MarshalJSON() ([]byte, error) { - if !d.Valid { - return []byte("null"), nil - } - return d.Decimal.MarshalJSON() -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface for XML -// deserialization -func (d *NullDecimal) UnmarshalText(text []byte) error { - str := string(text) - - // check for empty XML or XML without body e.g., - if str == "" { - d.Valid = false - return nil - } - if err := d.Decimal.UnmarshalText(text); err != nil { - d.Valid = false - return err - } - d.Valid = true - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface for XML -// serialization. -func (d NullDecimal) MarshalText() (text []byte, err error) { - if !d.Valid { - return []byte{}, nil - } - return d.Decimal.MarshalText() -} - -// Trig functions - -// Atan returns the arctangent, in radians, of x. -func (d Decimal) Atan() Decimal { - if d.Equal(NewFromFloat(0.0)) { - return d - } - if d.GreaterThan(NewFromFloat(0.0)) { - return d.satan() - } - return d.Neg().satan().Neg() -} - -func (d Decimal) xatan() Decimal { - P0 := NewFromFloat(-8.750608600031904122785e-01) - P1 := NewFromFloat(-1.615753718733365076637e+01) - P2 := NewFromFloat(-7.500855792314704667340e+01) - P3 := NewFromFloat(-1.228866684490136173410e+02) - P4 := NewFromFloat(-6.485021904942025371773e+01) - Q0 := NewFromFloat(2.485846490142306297962e+01) - Q1 := NewFromFloat(1.650270098316988542046e+02) - Q2 := NewFromFloat(4.328810604912902668951e+02) - Q3 := NewFromFloat(4.853903996359136964868e+02) - Q4 := NewFromFloat(1.945506571482613964425e+02) - z := d.Mul(d) - b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) - b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) - z = b1.Div(b2) - z = d.Mul(z).Add(d) - return z -} - -// satan reduces its argument (known to be positive) -// to the range [0, 0.66] and calls xatan. -func (d Decimal) satan() Decimal { - Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits - Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) - pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) - - if d.LessThanOrEqual(NewFromFloat(0.66)) { - return d.xatan() - } - if d.GreaterThan(Tan3pio8) { - return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) - } - return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) -} - -// sin coefficients -var _sin = [...]Decimal{ - NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd - NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d - NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 - NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 - NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 - NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 -} - -// Sin returns the sine of the radian argument x. -func (d Decimal) Sin() Decimal { - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - if d.Equal(NewFromFloat(0.0)) { - return d - } - // make argument positive but save the sign - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - sign = true - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - j &= 7 // octant modulo 2Pi radians (360 degrees) - // reflect in x axis - if j > 3 { - sign = !sign - j -= 4 - } - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if j == 1 || j == 2 { - w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) - y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) - } else { - y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) - } - if sign { - y = y.Neg() - } - return y -} - -// cos coefficients -var _cos = [...]Decimal{ - NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b - NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 - NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 - NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 - NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 - NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b -} - -// Cos returns the cosine of the radian argument x. -func (d Decimal) Cos() Decimal { - - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - // make argument positive - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - j &= 7 // octant modulo 2Pi radians (360 degrees) - // reflect in x axis - if j > 3 { - sign = !sign - j -= 4 - } - if j > 1 { - sign = !sign - } - - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if j == 1 || j == 2 { - y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) - } else { - w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) - y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) - } - if sign { - y = y.Neg() - } - return y -} - -var _tanP = [...]Decimal{ - NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 - NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd - NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 -} -var _tanQ = [...]Decimal{ - NewFromFloat(1.00000000000000000000e+0), - NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 - NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 - NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef - NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 -} - -// Tan returns the tangent of the radian argument x. -func (d Decimal) Tan() Decimal { - - PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts - PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, - PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, - M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi - - if d.Equal(NewFromFloat(0.0)) { - return d - } - - // make argument positive but save the sign - sign := false - if d.LessThan(NewFromFloat(0.0)) { - d = d.Neg() - sign = true - } - - j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle - y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float - - // map zeros to origin - if j&1 == 1 { - j++ - y = y.Add(NewFromFloat(1.0)) - } - - z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic - zz := z.Mul(z) - - if zz.GreaterThan(NewFromFloat(1e-14)) { - w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) - x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) - y = z.Add(z.Mul(w.Div(x))) - } else { - y = z - } - if j&2 == 2 { - y = NewFromFloat(-1.0).Div(y) - } - if sign { - y = y.Neg() - } - return y -} diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go deleted file mode 100644 index d4b0cd00795..00000000000 --- a/vendor/github.com/shopspring/decimal/rounding.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -package decimal - -type floatInfo struct { - mantbits uint - expbits uint - bias int -} - -var float32info = floatInfo{23, 8, -127} -var float64info = floatInfo{52, 11, -1023} - -// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits -// that will let the original floating point value be precisely reconstructed. -func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { - // If mantissa is zero, the number is zero; stop now. - if mant == 0 { - d.nd = 0 - return - } - - // Compute upper and lower such that any decimal number - // between upper and lower (possibly inclusive) - // will round to the original floating point number. - - // We may see at once that the number is already shortest. - // - // Suppose d is not denormal, so that 2^exp <= d < 10^dp. - // The closest shorter number is at least 10^(dp-nd) away. - // The lower/upper bounds computed below are at distance - // at most 2^(exp-mantbits). - // - // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), - // or equivalently log2(10)*(dp-nd) > exp-mantbits. - // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). - minexp := flt.bias + 1 // minimum possible exponent - if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { - // The number is already shortest. - return - } - - // d = mant << (exp - mantbits) - // Next highest floating point number is mant+1 << exp-mantbits. - // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. - upper := new(decimal) - upper.Assign(mant*2 + 1) - upper.Shift(exp - int(flt.mantbits) - 1) - - // d = mant << (exp - mantbits) - // Next lowest floating point number is mant-1 << exp-mantbits, - // unless mant-1 drops the significant bit and exp is not the minimum exp, - // in which case the next lowest is mant*2-1 << exp-mantbits-1. - // Either way, call it mantlo << explo-mantbits. - // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. - var mantlo uint64 - var explo int - if mant > 1<= d.nd { - break - } - li := ui - upper.dp + lower.dp - l := byte('0') // lower digit - if li >= 0 && li < lower.nd { - l = lower.d[li] - } - m := byte('0') // middle digit - if mi >= 0 { - m = d.d[mi] - } - u := byte('0') // upper digit - if ui < upper.nd { - u = upper.d[ui] - } - - // Okay to round down (truncate) if lower has a different digit - // or if lower is inclusive and is exactly the result of rounding - // down (i.e., and we have reached the final digit of lower). - okdown := l != m || inclusive && li+1 == lower.nd - - switch { - case upperdelta == 0 && m+1 < u: - // Example: - // m = 12345xxx - // u = 12347xxx - upperdelta = 2 - case upperdelta == 0 && m != u: - // Example: - // m = 12345xxx - // u = 12346xxx - upperdelta = 1 - case upperdelta == 1 && (m != '9' || u != '0'): - // Example: - // m = 1234598x - // u = 1234600x - upperdelta = 2 - } - // Okay to round up if upper has a different digit and either upper - // is inclusive or upper is bigger than the result of rounding up. - okup := upperdelta > 0 && (inclusive || upperdelta > 1 || ui+1 < upper.nd) - - // If it's okay to do either, then round to the nearest one. - // If it's okay to do only one, do it. - switch { - case okdown && okup: - d.Round(mi + 1) - return - case okdown: - d.RoundDown(mi + 1) - return - case okup: - d.RoundUp(mi + 1) - return - } - } -} diff --git a/vendor/github.com/spf13/cast/.gitignore b/vendor/github.com/spf13/cast/.gitignore deleted file mode 100644 index 53053a8ac59..00000000000 --- a/vendor/github.com/spf13/cast/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.bench diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE deleted file mode 100644 index 4527efb9c06..00000000000 --- a/vendor/github.com/spf13/cast/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile deleted file mode 100644 index f01a5dbb6e3..00000000000 --- a/vendor/github.com/spf13/cast/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -GOVERSION := $(shell go version | cut -d ' ' -f 3 | cut -d '.' -f 2) - -.PHONY: check fmt lint test test-race vet test-cover-html help -.DEFAULT_GOAL := help - -check: test-race fmt vet lint ## Run tests and linters - -test: ## Run tests - go test ./... - -test-race: ## Run tests with race detector - go test -race ./... - -fmt: ## Run gofmt linter -ifeq "$(GOVERSION)" "12" - @for d in `go list` ; do \ - if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \ - echo "^ improperly formatted go files" && echo && exit 1; \ - fi \ - done -endif - -lint: ## Run golint linter - @for d in `go list` ; do \ - if [ "`golint $$d | tee /dev/stderr`" ]; then \ - echo "^ golint errors!" && echo && exit 1; \ - fi \ - done - -vet: ## Run go vet linter - @if [ "`go vet | tee /dev/stderr`" ]; then \ - echo "^ go vet errors!" && echo && exit 1; \ - fi - -test-cover-html: ## Generate test coverage report - go test -coverprofile=coverage.out -covermode=count - go tool cover -func=coverage.out - -help: - @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md deleted file mode 100644 index 120a573426b..00000000000 --- a/vendor/github.com/spf13/cast/README.md +++ /dev/null @@ -1,75 +0,0 @@ -cast -==== -[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) -[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) - -Easy and safe casting from one type to another in Go - -Don’t Panic! ... Cast - -## What is Cast? - -Cast is a library to convert between different go types in a consistent and easy way. - -Cast provides simple functions to easily convert a number to a string, an -interface into a bool, etc. Cast does this intelligently when an obvious -conversion is possible. It doesn’t make any attempts to guess what you meant, -for example you can only convert a string to an int when it is a string -representation of an int such as “8”. Cast was developed for use in -[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON -for meta data. - -## Why use Cast? - -When working with dynamic data in Go you often need to cast or convert the data -from one type into another. Cast goes beyond just using type assertion (though -it uses that when possible) to provide a very straightforward and convenient -library. - -If you are working with interfaces to handle things like dynamic content -you’ll need an easy way to convert an interface into a given type. This -is the library for you. - -If you are taking in data from YAML, TOML or JSON or other formats which lack -full types, then Cast is the library for you. - -## Usage - -Cast provides a handful of To_____ methods. These methods will always return -the desired type. **If input is provided that will not convert to that type, the -0 or nil value for that type will be returned**. - -Cast also provides identical methods To_____E. These return the same result as -the To_____ methods, plus an additional error which tells you if it successfully -converted. Using these methods you can tell the difference between when the -input matched the zero value or when the conversion failed and the zero value -was returned. - -The following examples are merely a sample of what is available. Please review -the code for a complete set. - -### Example ‘ToString’: - - cast.ToString("mayonegg") // "mayonegg" - cast.ToString(8) // "8" - cast.ToString(8.31) // "8.31" - cast.ToString([]byte("one time")) // "one time" - cast.ToString(nil) // "" - - var foo interface{} = "one more time" - cast.ToString(foo) // "one more time" - - -### Example ‘ToInt’: - - cast.ToInt(8) // 8 - cast.ToInt(8.31) // 8 - cast.ToInt("8") // 8 - cast.ToInt(true) // 1 - cast.ToInt(false) // 0 - - var eight interface{} = 8 - cast.ToInt(eight) // 8 - cast.ToInt(nil) // 0 - diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go deleted file mode 100644 index 0cfe9418de3..00000000000 --- a/vendor/github.com/spf13/cast/cast.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Package cast provides easy and safe casting in Go. -package cast - -import "time" - -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -func ToTimeInDefaultLocation(i interface{}, location *time.Location) time.Time { - v, _ := ToTimeInDefaultLocationE(i, location) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMapInt casts an interface to a map[string]int type. -func ToStringMapInt(i interface{}) map[string]int { - v, _ := ToStringMapIntE(i) - return v -} - -// ToStringMapInt64 casts an interface to a map[string]int64 type. -func ToStringMapInt64(i interface{}) map[string]int64 { - v, _ := ToStringMapInt64E(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} - -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} - -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) - return v -} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index 514d759bfb5..00000000000 --- a/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1476 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - return ToTimeInDefaultLocationE(i, time.UTC) -} - -// ToTimeInDefaultLocationE casts an empty interface to time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func ToTimeInDefaultLocationE(i interface{}, location *time.Location) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDateInDefaultLocation(v, location) - case json.Number: - s, err1 := ToInt64E(v) - if err1 != nil { - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } - return time.Unix(s, 0), nil - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - case json.Number: - var v float64 - v, err = s.Float64() - d = time.Duration(v) - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - if i.(int) != 0 { - return true, nil - } - return false, nil - case string: - return strconv.ParseBool(i.(string)) - case json.Number: - v, err := ToInt64E(b) - if err == nil { - return v != 0, nil - } - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float64(intv), nil - } - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case json.Number: - v, err := s.Float64() - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return float32(intv), nil - } - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case json.Number: - v, err := s.Float64() - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int64(intv), nil - } - - switch s := i.(type) { - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToInt64E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int32(intv), nil - } - - switch s := i.(type) { - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case json.Number: - return ToInt32E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int16(intv), nil - } - - switch s := i.(type) { - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case json.Number: - return ToInt16E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return int8(intv), nil - } - - switch s := i.(type) { - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case json.Number: - return ToInt8E(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - return intv, nil - } - - switch s := i.(type) { - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case json.Number: - return ToIntE(string(s)) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - case json.Number: - return ToUintE(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint64(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint64(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - case json.Number: - return ToUint64E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint32(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - case json.Number: - return ToUint32E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint16(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - case json.Number: - return ToUint16E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - intv, ok := toInt(i) - if ok { - if intv < 0 { - return 0, errNegativeNotAllowed - } - return uint8(intv), nil - } - - switch s := i.(type) { - case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) - if err == nil { - if v < 0 { - return 0, errNegativeNotAllowed - } - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - case json.Number: - return ToUint8E(string(s)) - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatUint(uint64(s), 10), nil - case uint64: - return strconv.FormatUint(uint64(s), 10), nil - case uint32: - return strconv.FormatUint(uint64(s), 10), nil - case uint16: - return strconv.FormatUint(uint64(s), 10), nil - case uint8: - return strconv.FormatUint(uint64(s), 10), nil - case json.Number: - return s.String(), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToStringMapIntE casts an interface to a map[string]int{} type. -func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt(val) - } - return m, nil - case map[string]int: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToIntE(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToStringMapInt64E casts an interface to a map[string]int64{} type. -func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} - if i == nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToInt64(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[k] = ToInt64(val) - } - return m, nil - case map[string]int64: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - } - - if reflect.TypeOf(i).Kind() != reflect.Map { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal := reflect.ValueOf(m) - v := reflect.ValueOf(i) - for _, keyVal := range v.MapKeys() { - val, err := ToInt64E(v.MapIndex(keyVal).Interface()) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) - } - mVal.SetMapIndex(keyVal, reflect.ValueOf(val)) - } - return m, nil -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case []int8: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []int64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float32: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []float64: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case string: - return strings.Fields(v), nil - case []error: - for _, err := range i.([]error) { - a = append(a, err.Error()) - } - return a, nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, time.UTC, timeFormats) -} - -// StringToDateInDefaultLocation casts an empty interface to a time.Time, -// interpreting inputs without a timezone to be in the given location, -// or the local timezone if nil. -func StringToDateInDefaultLocation(s string, location *time.Location) (time.Time, error) { - return parseDateWith(s, location, timeFormats) -} - -type timeFormatType int - -const ( - timeFormatNoTimezone timeFormatType = iota - timeFormatNamedTimezone - timeFormatNumericTimezone - timeFormatNumericAndNamedTimezone - timeFormatTimeOnly -) - -type timeFormat struct { - format string - typ timeFormatType -} - -func (f timeFormat) hasTimezone() bool { - // We don't include the formats with only named timezones, see - // https://github.com/golang/go/issues/19694#issuecomment-289103522 - return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone -} - -var ( - timeFormats = []timeFormat{ - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"2006-01-02", timeFormatNoTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, - } -) - -func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - - for _, format := range formats { - if d, e = time.Parse(format.format, s); e == nil { - - // Some time formats have a zone name, but no offset, so it gets - // put in that zone name (not the default one passed in to us), but - // without that zone's offset. So set the location manually. - if format.typ <= timeFormatNamedTimezone { - if location == nil { - location = time.Local - } - year, month, day := d.Date() - hour, min, sec := d.Clock() - d = time.Date(year, month, day, hour, min, sec, d.Nanosecond(), location) - } - - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} - -// toInt returns the int value of v if v or v's underlying type -// is an int. -// Note that this will return false for int64 etc. types. -func toInt(v interface{}) (int, bool) { - switch v := v.(type) { - case int: - return v, true - case time.Weekday: - return int(v), true - case time.Month: - return int(v), true - default: - return 0, false - } -} - -func trimZeroDecimal(s string) string { - var foundZero bool - for i := len(s); i > 0; i-- { - switch s[i-1] { - case '.': - if foundZero { - return s[:i-1] - } - case '0': - foundZero = true - default: - return s - } - } - return s -} diff --git a/vendor/github.com/spf13/cast/timeformattype_string.go b/vendor/github.com/spf13/cast/timeformattype_string.go deleted file mode 100644 index 1524fc82ce7..00000000000 --- a/vendor/github.com/spf13/cast/timeformattype_string.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by "stringer -type timeFormatType"; DO NOT EDIT. - -package cast - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[timeFormatNoTimezone-0] - _ = x[timeFormatNamedTimezone-1] - _ = x[timeFormatNumericTimezone-2] - _ = x[timeFormatNumericAndNamedTimezone-3] - _ = x[timeFormatTimeOnly-4] -} - -const _timeFormatType_name = "timeFormatNoTimezonetimeFormatNamedTimezonetimeFormatNumericTimezonetimeFormatNumericAndNamedTimezonetimeFormatTimeOnly" - -var _timeFormatType_index = [...]uint8{0, 20, 43, 68, 101, 119} - -func (i timeFormatType) String() string { - if i < 0 || i >= timeFormatType(len(_timeFormatType_index)-1) { - return "timeFormatType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _timeFormatType_name[_timeFormatType_index[i]:_timeFormatType_index[i+1]] -} diff --git a/vendor/github.com/valyala/bytebufferpool/.travis.yml b/vendor/github.com/valyala/bytebufferpool/.travis.yml deleted file mode 100644 index 6a6ec2eb069..00000000000 --- a/vendor/github.com/valyala/bytebufferpool/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.6 - -script: - # build test for supported platforms - - GOOS=linux go build - - GOOS=darwin go build - - GOOS=freebsd go build - - GOOS=windows go build - - GOARCH=386 go build - - # run tests on a standard platform - - go test -v ./... diff --git a/vendor/github.com/valyala/bytebufferpool/LICENSE b/vendor/github.com/valyala/bytebufferpool/LICENSE deleted file mode 100644 index f7c935c201b..00000000000 --- a/vendor/github.com/valyala/bytebufferpool/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/valyala/bytebufferpool/README.md b/vendor/github.com/valyala/bytebufferpool/README.md deleted file mode 100644 index 061357e833d..00000000000 --- a/vendor/github.com/valyala/bytebufferpool/README.md +++ /dev/null @@ -1,21 +0,0 @@ -[![Build Status](https://travis-ci.org/valyala/bytebufferpool.svg)](https://travis-ci.org/valyala/bytebufferpool) -[![GoDoc](https://godoc.org/github.com/valyala/bytebufferpool?status.svg)](http://godoc.org/github.com/valyala/bytebufferpool) -[![Go Report](http://goreportcard.com/badge/valyala/bytebufferpool)](http://goreportcard.com/report/valyala/bytebufferpool) - -# bytebufferpool - -An implementation of a pool of byte buffers with anti-memory-waste protection. - -The pool may waste limited amount of memory due to fragmentation. -This amount equals to the maximum total size of the byte buffers -in concurrent use. - -# Benchmark results -Currently bytebufferpool is fastest and most effective buffer pool written in Go. - -You can find results [here](https://omgnull.github.io/go-benchmark/buffer/). - -# bytebufferpool users - -* [fasthttp](https://github.com/valyala/fasthttp) -* [quicktemplate](https://github.com/valyala/quicktemplate) diff --git a/vendor/github.com/valyala/bytebufferpool/bytebuffer.go b/vendor/github.com/valyala/bytebufferpool/bytebuffer.go deleted file mode 100644 index 07a055a2df7..00000000000 --- a/vendor/github.com/valyala/bytebufferpool/bytebuffer.go +++ /dev/null @@ -1,111 +0,0 @@ -package bytebufferpool - -import "io" - -// ByteBuffer provides byte buffer, which can be used for minimizing -// memory allocations. -// -// ByteBuffer may be used with functions appending data to the given []byte -// slice. See example code for details. -// -// Use Get for obtaining an empty byte buffer. -type ByteBuffer struct { - - // B is a byte buffer to use in append-like workloads. - // See example code for details. - B []byte -} - -// Len returns the size of the byte buffer. -func (b *ByteBuffer) Len() int { - return len(b.B) -} - -// ReadFrom implements io.ReaderFrom. -// -// The function appends all the data read from r to b. -func (b *ByteBuffer) ReadFrom(r io.Reader) (int64, error) { - p := b.B - nStart := int64(len(p)) - nMax := int64(cap(p)) - n := nStart - if nMax == 0 { - nMax = 64 - p = make([]byte, nMax) - } else { - p = p[:nMax] - } - for { - if n == nMax { - nMax *= 2 - bNew := make([]byte, nMax) - copy(bNew, p) - p = bNew - } - nn, err := r.Read(p[n:]) - n += int64(nn) - if err != nil { - b.B = p[:n] - n -= nStart - if err == io.EOF { - return n, nil - } - return n, err - } - } -} - -// WriteTo implements io.WriterTo. -func (b *ByteBuffer) WriteTo(w io.Writer) (int64, error) { - n, err := w.Write(b.B) - return int64(n), err -} - -// Bytes returns b.B, i.e. all the bytes accumulated in the buffer. -// -// The purpose of this function is bytes.Buffer compatibility. -func (b *ByteBuffer) Bytes() []byte { - return b.B -} - -// Write implements io.Writer - it appends p to ByteBuffer.B -func (b *ByteBuffer) Write(p []byte) (int, error) { - b.B = append(b.B, p...) - return len(p), nil -} - -// WriteByte appends the byte c to the buffer. -// -// The purpose of this function is bytes.Buffer compatibility. -// -// The function always returns nil. -func (b *ByteBuffer) WriteByte(c byte) error { - b.B = append(b.B, c) - return nil -} - -// WriteString appends s to ByteBuffer.B. -func (b *ByteBuffer) WriteString(s string) (int, error) { - b.B = append(b.B, s...) - return len(s), nil -} - -// Set sets ByteBuffer.B to p. -func (b *ByteBuffer) Set(p []byte) { - b.B = append(b.B[:0], p...) -} - -// SetString sets ByteBuffer.B to s. -func (b *ByteBuffer) SetString(s string) { - b.B = append(b.B[:0], s...) -} - -// String returns string representation of ByteBuffer.B. -func (b *ByteBuffer) String() string { - return string(b.B) -} - -// Reset makes ByteBuffer.B empty. -func (b *ByteBuffer) Reset() { - b.B = b.B[:0] -} diff --git a/vendor/github.com/valyala/bytebufferpool/doc.go b/vendor/github.com/valyala/bytebufferpool/doc.go deleted file mode 100644 index e511b7c5932..00000000000 --- a/vendor/github.com/valyala/bytebufferpool/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package bytebufferpool implements a pool of byte buffers -// with anti-fragmentation protection. -// -// The pool may waste limited amount of memory due to fragmentation. -// This amount equals to the maximum total size of the byte buffers -// in concurrent use. -package bytebufferpool diff --git a/vendor/github.com/valyala/bytebufferpool/pool.go b/vendor/github.com/valyala/bytebufferpool/pool.go deleted file mode 100644 index 8bb4134dd0d..00000000000 --- a/vendor/github.com/valyala/bytebufferpool/pool.go +++ /dev/null @@ -1,151 +0,0 @@ -package bytebufferpool - -import ( - "sort" - "sync" - "sync/atomic" -) - -const ( - minBitSize = 6 // 2**6=64 is a CPU cache line size - steps = 20 - - minSize = 1 << minBitSize - maxSize = 1 << (minBitSize + steps - 1) - - calibrateCallsThreshold = 42000 - maxPercentile = 0.95 -) - -// Pool represents byte buffer pool. -// -// Distinct pools may be used for distinct types of byte buffers. -// Properly determined byte buffer types with their own pools may help reducing -// memory waste. -type Pool struct { - calls [steps]uint64 - calibrating uint64 - - defaultSize uint64 - maxSize uint64 - - pool sync.Pool -} - -var defaultPool Pool - -// Get returns an empty byte buffer from the pool. -// -// Got byte buffer may be returned to the pool via Put call. -// This reduces the number of memory allocations required for byte buffer -// management. -func Get() *ByteBuffer { return defaultPool.Get() } - -// Get returns new byte buffer with zero length. -// -// The byte buffer may be returned to the pool via Put after the use -// in order to minimize GC overhead. -func (p *Pool) Get() *ByteBuffer { - v := p.pool.Get() - if v != nil { - return v.(*ByteBuffer) - } - return &ByteBuffer{ - B: make([]byte, 0, atomic.LoadUint64(&p.defaultSize)), - } -} - -// Put returns byte buffer to the pool. -// -// ByteBuffer.B mustn't be touched after returning it to the pool. -// Otherwise data races will occur. -func Put(b *ByteBuffer) { defaultPool.Put(b) } - -// Put releases byte buffer obtained via Get to the pool. -// -// The buffer mustn't be accessed after returning to the pool. -func (p *Pool) Put(b *ByteBuffer) { - idx := index(len(b.B)) - - if atomic.AddUint64(&p.calls[idx], 1) > calibrateCallsThreshold { - p.calibrate() - } - - maxSize := int(atomic.LoadUint64(&p.maxSize)) - if maxSize == 0 || cap(b.B) <= maxSize { - b.Reset() - p.pool.Put(b) - } -} - -func (p *Pool) calibrate() { - if !atomic.CompareAndSwapUint64(&p.calibrating, 0, 1) { - return - } - - a := make(callSizes, 0, steps) - var callsSum uint64 - for i := uint64(0); i < steps; i++ { - calls := atomic.SwapUint64(&p.calls[i], 0) - callsSum += calls - a = append(a, callSize{ - calls: calls, - size: minSize << i, - }) - } - sort.Sort(a) - - defaultSize := a[0].size - maxSize := defaultSize - - maxSum := uint64(float64(callsSum) * maxPercentile) - callsSum = 0 - for i := 0; i < steps; i++ { - if callsSum > maxSum { - break - } - callsSum += a[i].calls - size := a[i].size - if size > maxSize { - maxSize = size - } - } - - atomic.StoreUint64(&p.defaultSize, defaultSize) - atomic.StoreUint64(&p.maxSize, maxSize) - - atomic.StoreUint64(&p.calibrating, 0) -} - -type callSize struct { - calls uint64 - size uint64 -} - -type callSizes []callSize - -func (ci callSizes) Len() int { - return len(ci) -} - -func (ci callSizes) Less(i, j int) bool { - return ci[i].calls > ci[j].calls -} - -func (ci callSizes) Swap(i, j int) { - ci[i], ci[j] = ci[j], ci[i] -} - -func index(n int) int { - n-- - n >>= minBitSize - idx := 0 - for n > 0 { - n >>= 1 - idx++ - } - if idx >= steps { - idx = steps - 1 - } - return idx -} diff --git a/vendor/github.com/valyala/fasttemplate/LICENSE b/vendor/github.com/valyala/fasttemplate/LICENSE deleted file mode 100644 index 7125a63c4cf..00000000000 --- a/vendor/github.com/valyala/fasttemplate/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Aliaksandr Valialkin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/valyala/fasttemplate/README.md b/vendor/github.com/valyala/fasttemplate/README.md deleted file mode 100644 index 2839ed0f7a2..00000000000 --- a/vendor/github.com/valyala/fasttemplate/README.md +++ /dev/null @@ -1,85 +0,0 @@ -fasttemplate -============ - -Simple and fast template engine for Go. - -Fasttemplate performs only a single task - it substitutes template placeholders -with user-defined values. At high speed :) - -Take a look at [quicktemplate](https://github.com/valyala/quicktemplate) if you need fast yet powerful html template engine. - -*Please note that fasttemplate doesn't do any escaping on template values -unlike [html/template](http://golang.org/pkg/html/template/) do. So values -must be properly escaped before passing them to fasttemplate.* - -Fasttemplate is faster than [text/template](http://golang.org/pkg/text/template/), -[strings.Replace](http://golang.org/pkg/strings/#Replace), -[strings.Replacer](http://golang.org/pkg/strings/#Replacer) -and [fmt.Fprintf](https://golang.org/pkg/fmt/#Fprintf) on placeholders' substitution. - -Below are benchmark results comparing fasttemplate performance to text/template, -strings.Replace, strings.Replacer and fmt.Fprintf: - -``` -$ go test -bench=. -benchmem -PASS -BenchmarkFmtFprintf-4 2000000 790 ns/op 0 B/op 0 allocs/op -BenchmarkStringsReplace-4 500000 3474 ns/op 2112 B/op 14 allocs/op -BenchmarkStringsReplacer-4 500000 2657 ns/op 2256 B/op 23 allocs/op -BenchmarkTextTemplate-4 500000 3333 ns/op 336 B/op 19 allocs/op -BenchmarkFastTemplateExecuteFunc-4 5000000 349 ns/op 0 B/op 0 allocs/op -BenchmarkFastTemplateExecute-4 3000000 383 ns/op 0 B/op 0 allocs/op -BenchmarkFastTemplateExecuteFuncString-4 3000000 549 ns/op 144 B/op 1 allocs/op -BenchmarkFastTemplateExecuteString-4 3000000 572 ns/op 144 B/op 1 allocs/op -BenchmarkFastTemplateExecuteTagFunc-4 2000000 743 ns/op 144 B/op 3 allocs/op -``` - - -Docs -==== - -See http://godoc.org/github.com/valyala/fasttemplate . - - -Usage -===== - -```go - template := "http://{{host}}/?q={{query}}&foo={{bar}}{{bar}}" - t := fasttemplate.New(template, "{{", "}}") - s := t.ExecuteString(map[string]interface{}{ - "host": "google.com", - "query": url.QueryEscape("hello=world"), - "bar": "foobar", - }) - fmt.Printf("%s", s) - - // Output: - // http://google.com/?q=hello%3Dworld&foo=foobarfoobar -``` - - -Advanced usage -============== - -```go - template := "Hello, [user]! You won [prize]!!! [foobar]" - t, err := fasttemplate.NewTemplate(template, "[", "]") - if err != nil { - log.Fatalf("unexpected error when parsing template: %s", err) - } - s := t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { - switch tag { - case "user": - return w.Write([]byte("John")) - case "prize": - return w.Write([]byte("$100500")) - default: - return w.Write([]byte(fmt.Sprintf("[unknown tag %q]", tag))) - } - }) - fmt.Printf("%s", s) - - // Output: - // Hello, John! You won $100500!!! [unknown tag "foobar"] -``` diff --git a/vendor/github.com/valyala/fasttemplate/template.go b/vendor/github.com/valyala/fasttemplate/template.go deleted file mode 100644 index f2d3261f8b6..00000000000 --- a/vendor/github.com/valyala/fasttemplate/template.go +++ /dev/null @@ -1,436 +0,0 @@ -// Package fasttemplate implements simple and fast template library. -// -// Fasttemplate is faster than text/template, strings.Replace -// and strings.Replacer. -// -// Fasttemplate ideally fits for fast and simple placeholders' substitutions. -package fasttemplate - -import ( - "bytes" - "fmt" - "io" - - "github.com/valyala/bytebufferpool" -) - -// ExecuteFunc calls f on each template tag (placeholder) occurrence. -// -// Returns the number of bytes written to w. -// -// This function is optimized for constantly changing templates. -// Use Template.ExecuteFunc for frozen templates. -func ExecuteFunc(template, startTag, endTag string, w io.Writer, f TagFunc) (int64, error) { - s := unsafeString2Bytes(template) - a := unsafeString2Bytes(startTag) - b := unsafeString2Bytes(endTag) - - var nn int64 - var ni int - var err error - for { - n := bytes.Index(s, a) - if n < 0 { - break - } - ni, err = w.Write(s[:n]) - nn += int64(ni) - if err != nil { - return nn, err - } - - s = s[n+len(a):] - n = bytes.Index(s, b) - if n < 0 { - // cannot find end tag - just write it to the output. - ni, _ = w.Write(a) - nn += int64(ni) - break - } - - ni, err = f(w, unsafeBytes2String(s[:n])) - nn += int64(ni) - if err != nil { - return nn, err - } - s = s[n+len(b):] - } - ni, err = w.Write(s) - nn += int64(ni) - - return nn, err -} - -// Execute substitutes template tags (placeholders) with the corresponding -// values from the map m and writes the result to the given writer w. -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// Returns the number of bytes written to w. -// -// This function is optimized for constantly changing templates. -// Use Template.Execute for frozen templates. -func Execute(template, startTag, endTag string, w io.Writer, m map[string]interface{}) (int64, error) { - return ExecuteFunc(template, startTag, endTag, w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) -} - -// ExecuteStd works the same way as Execute, but keeps the unknown placeholders. -// This can be used as a drop-in replacement for strings.Replacer -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// Returns the number of bytes written to w. -// -// This function is optimized for constantly changing templates. -// Use Template.ExecuteStd for frozen templates. -func ExecuteStd(template, startTag, endTag string, w io.Writer, m map[string]interface{}) (int64, error) { - return ExecuteFunc(template, startTag, endTag, w, func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, startTag, endTag, tag, m) }) -} - -// ExecuteFuncString calls f on each template tag (placeholder) occurrence -// and substitutes it with the data written to TagFunc's w. -// -// Returns the resulting string. -// -// This function is optimized for constantly changing templates. -// Use Template.ExecuteFuncString for frozen templates. -func ExecuteFuncString(template, startTag, endTag string, f TagFunc) string { - s, err := ExecuteFuncStringWithErr(template, startTag, endTag, f) - if err != nil { - panic(fmt.Sprintf("unexpected error: %s", err)) - } - return s -} - -// ExecuteFuncStringWithErr is nearly the same as ExecuteFuncString -// but when f returns an error, ExecuteFuncStringWithErr won't panic like ExecuteFuncString -// it just returns an empty string and the error f returned -func ExecuteFuncStringWithErr(template, startTag, endTag string, f TagFunc) (string, error) { - if n := bytes.Index(unsafeString2Bytes(template), unsafeString2Bytes(startTag)); n < 0 { - return template, nil - } - - bb := byteBufferPool.Get() - if _, err := ExecuteFunc(template, startTag, endTag, bb, f); err != nil { - bb.Reset() - byteBufferPool.Put(bb) - return "", err - } - s := string(bb.B) - bb.Reset() - byteBufferPool.Put(bb) - return s, nil -} - -var byteBufferPool bytebufferpool.Pool - -// ExecuteString substitutes template tags (placeholders) with the corresponding -// values from the map m and returns the result. -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// This function is optimized for constantly changing templates. -// Use Template.ExecuteString for frozen templates. -func ExecuteString(template, startTag, endTag string, m map[string]interface{}) string { - return ExecuteFuncString(template, startTag, endTag, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) -} - -// ExecuteStringStd works the same way as ExecuteString, but keeps the unknown placeholders. -// This can be used as a drop-in replacement for strings.Replacer -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// This function is optimized for constantly changing templates. -// Use Template.ExecuteStringStd for frozen templates. -func ExecuteStringStd(template, startTag, endTag string, m map[string]interface{}) string { - return ExecuteFuncString(template, startTag, endTag, func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, startTag, endTag, tag, m) }) -} - -// Template implements simple template engine, which can be used for fast -// tags' (aka placeholders) substitution. -type Template struct { - template string - startTag string - endTag string - - texts [][]byte - tags []string - byteBufferPool bytebufferpool.Pool -} - -// New parses the given template using the given startTag and endTag -// as tag start and tag end. -// -// The returned template can be executed by concurrently running goroutines -// using Execute* methods. -// -// New panics if the given template cannot be parsed. Use NewTemplate instead -// if template may contain errors. -func New(template, startTag, endTag string) *Template { - t, err := NewTemplate(template, startTag, endTag) - if err != nil { - panic(err) - } - return t -} - -// NewTemplate parses the given template using the given startTag and endTag -// as tag start and tag end. -// -// The returned template can be executed by concurrently running goroutines -// using Execute* methods. -func NewTemplate(template, startTag, endTag string) (*Template, error) { - var t Template - err := t.Reset(template, startTag, endTag) - if err != nil { - return nil, err - } - return &t, nil -} - -// TagFunc can be used as a substitution value in the map passed to Execute*. -// Execute* functions pass tag (placeholder) name in 'tag' argument. -// -// TagFunc must be safe to call from concurrently running goroutines. -// -// TagFunc must write contents to w and return the number of bytes written. -type TagFunc func(w io.Writer, tag string) (int, error) - -// Reset resets the template t to new one defined by -// template, startTag and endTag. -// -// Reset allows Template object re-use. -// -// Reset may be called only if no other goroutines call t methods at the moment. -func (t *Template) Reset(template, startTag, endTag string) error { - // Keep these vars in t, so GC won't collect them and won't break - // vars derived via unsafe* - t.template = template - t.startTag = startTag - t.endTag = endTag - t.texts = t.texts[:0] - t.tags = t.tags[:0] - - if len(startTag) == 0 { - panic("startTag cannot be empty") - } - if len(endTag) == 0 { - panic("endTag cannot be empty") - } - - s := unsafeString2Bytes(template) - a := unsafeString2Bytes(startTag) - b := unsafeString2Bytes(endTag) - - tagsCount := bytes.Count(s, a) - if tagsCount == 0 { - return nil - } - - if tagsCount+1 > cap(t.texts) { - t.texts = make([][]byte, 0, tagsCount+1) - } - if tagsCount > cap(t.tags) { - t.tags = make([]string, 0, tagsCount) - } - - for { - n := bytes.Index(s, a) - if n < 0 { - t.texts = append(t.texts, s) - break - } - t.texts = append(t.texts, s[:n]) - - s = s[n+len(a):] - n = bytes.Index(s, b) - if n < 0 { - return fmt.Errorf("Cannot find end tag=%q in the template=%q starting from %q", endTag, template, s) - } - - t.tags = append(t.tags, unsafeBytes2String(s[:n])) - s = s[n+len(b):] - } - - return nil -} - -// ExecuteFunc calls f on each template tag (placeholder) occurrence. -// -// Returns the number of bytes written to w. -// -// This function is optimized for frozen templates. -// Use ExecuteFunc for constantly changing templates. -func (t *Template) ExecuteFunc(w io.Writer, f TagFunc) (int64, error) { - var nn int64 - - n := len(t.texts) - 1 - if n == -1 { - ni, err := w.Write(unsafeString2Bytes(t.template)) - return int64(ni), err - } - - for i := 0; i < n; i++ { - ni, err := w.Write(t.texts[i]) - nn += int64(ni) - if err != nil { - return nn, err - } - - ni, err = f(w, t.tags[i]) - nn += int64(ni) - if err != nil { - return nn, err - } - } - ni, err := w.Write(t.texts[n]) - nn += int64(ni) - return nn, err -} - -// Execute substitutes template tags (placeholders) with the corresponding -// values from the map m and writes the result to the given writer w. -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// Returns the number of bytes written to w. -func (t *Template) Execute(w io.Writer, m map[string]interface{}) (int64, error) { - return t.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) -} - -// ExecuteStd works the same way as Execute, but keeps the unknown placeholders. -// This can be used as a drop-in replacement for strings.Replacer -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// Returns the number of bytes written to w. -func (t *Template) ExecuteStd(w io.Writer, m map[string]interface{}) (int64, error) { - return t.ExecuteFunc(w, func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, t.startTag, t.endTag, tag, m) }) -} - -// ExecuteFuncString calls f on each template tag (placeholder) occurrence -// and substitutes it with the data written to TagFunc's w. -// -// Returns the resulting string. -// -// This function is optimized for frozen templates. -// Use ExecuteFuncString for constantly changing templates. -func (t *Template) ExecuteFuncString(f TagFunc) string { - s, err := t.ExecuteFuncStringWithErr(f) - if err != nil { - panic(fmt.Sprintf("unexpected error: %s", err)) - } - return s -} - -// ExecuteFuncStringWithErr calls f on each template tag (placeholder) occurrence -// and substitutes it with the data written to TagFunc's w. -// -// Returns the resulting string. -// -// This function is optimized for frozen templates. -// Use ExecuteFuncString for constantly changing templates. -func (t *Template) ExecuteFuncStringWithErr(f TagFunc) (string, error) { - bb := t.byteBufferPool.Get() - if _, err := t.ExecuteFunc(bb, f); err != nil { - bb.Reset() - t.byteBufferPool.Put(bb) - return "", err - } - s := string(bb.Bytes()) - bb.Reset() - t.byteBufferPool.Put(bb) - return s, nil -} - -// ExecuteString substitutes template tags (placeholders) with the corresponding -// values from the map m and returns the result. -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// This function is optimized for frozen templates. -// Use ExecuteString for constantly changing templates. -func (t *Template) ExecuteString(m map[string]interface{}) string { - return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { return stdTagFunc(w, tag, m) }) -} - -// ExecuteStringStd works the same way as ExecuteString, but keeps the unknown placeholders. -// This can be used as a drop-in replacement for strings.Replacer -// -// Substitution map m may contain values with the following types: -// * []byte - the fastest value type -// * string - convenient value type -// * TagFunc - flexible value type -// -// This function is optimized for frozen templates. -// Use ExecuteStringStd for constantly changing templates. -func (t *Template) ExecuteStringStd(m map[string]interface{}) string { - return t.ExecuteFuncString(func(w io.Writer, tag string) (int, error) { return keepUnknownTagFunc(w, t.startTag, t.endTag, tag, m) }) -} - -func stdTagFunc(w io.Writer, tag string, m map[string]interface{}) (int, error) { - v := m[tag] - if v == nil { - return 0, nil - } - switch value := v.(type) { - case []byte: - return w.Write(value) - case string: - return w.Write([]byte(value)) - case TagFunc: - return value(w, tag) - default: - panic(fmt.Sprintf("tag=%q contains unexpected value type=%#v. Expected []byte, string or TagFunc", tag, v)) - } -} - -func keepUnknownTagFunc(w io.Writer, startTag, endTag, tag string, m map[string]interface{}) (int, error) { - v, ok := m[tag] - if !ok { - if _, err := w.Write(unsafeString2Bytes(startTag)); err != nil { - return 0, err - } - if _, err := w.Write(unsafeString2Bytes(tag)); err != nil { - return 0, err - } - if _, err := w.Write(unsafeString2Bytes(endTag)); err != nil { - return 0, err - } - return len(startTag) + len(tag) + len(endTag), nil - } - if v == nil { - return 0, nil - } - switch value := v.(type) { - case []byte: - return w.Write(value) - case string: - return w.Write([]byte(value)) - case TagFunc: - return value(w, tag) - default: - panic(fmt.Sprintf("tag=%q contains unexpected value type=%#v. Expected []byte, string or TagFunc", tag, v)) - } -} diff --git a/vendor/github.com/valyala/fasttemplate/unsafe.go b/vendor/github.com/valyala/fasttemplate/unsafe.go deleted file mode 100644 index 1020ca3874b..00000000000 --- a/vendor/github.com/valyala/fasttemplate/unsafe.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !appengine - -package fasttemplate - -import ( - "reflect" - "unsafe" -) - -func unsafeBytes2String(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -func unsafeString2Bytes(s string) (b []byte) { - sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = sh.Data - bh.Cap = sh.Len - bh.Len = sh.Len - return b -} diff --git a/vendor/github.com/valyala/fasttemplate/unsafe_gae.go b/vendor/github.com/valyala/fasttemplate/unsafe_gae.go deleted file mode 100644 index cc4ce151694..00000000000 --- a/vendor/github.com/valyala/fasttemplate/unsafe_gae.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package fasttemplate - -func unsafeBytes2String(b []byte) string { - return string(b) -} - -func unsafeString2Bytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/golang.org/x/crypto/md4/md4.go b/vendor/golang.org/x/crypto/md4/md4.go deleted file mode 100644 index 7d9281e0259..00000000000 --- a/vendor/golang.org/x/crypto/md4/md4.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package md4 implements the MD4 hash algorithm as defined in RFC 1320. -// -// Deprecated: MD4 is cryptographically broken and should only be used -// where compatibility with legacy systems, not security, is the goal. Instead, -// use a secure hash like SHA-256 (from crypto/sha256). -package md4 - -import ( - "crypto" - "hash" -) - -func init() { - crypto.RegisterHash(crypto.MD4, New) -} - -// The size of an MD4 checksum in bytes. -const Size = 16 - -// The blocksize of MD4 in bytes. -const BlockSize = 64 - -const ( - _Chunk = 64 - _Init0 = 0x67452301 - _Init1 = 0xEFCDAB89 - _Init2 = 0x98BADCFE - _Init3 = 0x10325476 -) - -// digest represents the partial evaluation of a checksum. -type digest struct { - s [4]uint32 - x [_Chunk]byte - nx int - len uint64 -} - -func (d *digest) Reset() { - d.s[0] = _Init0 - d.s[1] = _Init1 - d.s[2] = _Init2 - d.s[3] = _Init3 - d.nx = 0 - d.len = 0 -} - -// New returns a new hash.Hash computing the MD4 checksum. -func New() hash.Hash { - d := new(digest) - d.Reset() - return d -} - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Write(p []byte) (nn int, err error) { - nn = len(p) - d.len += uint64(nn) - if d.nx > 0 { - n := len(p) - if n > _Chunk-d.nx { - n = _Chunk - d.nx - } - for i := 0; i < n; i++ { - d.x[d.nx+i] = p[i] - } - d.nx += n - if d.nx == _Chunk { - _Block(d, d.x[0:]) - d.nx = 0 - } - p = p[n:] - } - n := _Block(d, p) - p = p[n:] - if len(p) > 0 { - d.nx = copy(d.x[:], p) - } - return -} - -func (d0 *digest) Sum(in []byte) []byte { - // Make a copy of d0, so that caller can keep writing and summing. - d := new(digest) - *d = *d0 - - // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. - len := d.len - var tmp [64]byte - tmp[0] = 0x80 - if len%64 < 56 { - d.Write(tmp[0 : 56-len%64]) - } else { - d.Write(tmp[0 : 64+56-len%64]) - } - - // Length in bits. - len <<= 3 - for i := uint(0); i < 8; i++ { - tmp[i] = byte(len >> (8 * i)) - } - d.Write(tmp[0:8]) - - if d.nx != 0 { - panic("d.nx != 0") - } - - for _, s := range d.s { - in = append(in, byte(s>>0)) - in = append(in, byte(s>>8)) - in = append(in, byte(s>>16)) - in = append(in, byte(s>>24)) - } - return in -} diff --git a/vendor/golang.org/x/crypto/md4/md4block.go b/vendor/golang.org/x/crypto/md4/md4block.go deleted file mode 100644 index 5ea1ba966ea..00000000000 --- a/vendor/golang.org/x/crypto/md4/md4block.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// MD4 block step. -// In its own file so that a faster assembly or C version -// can be substituted easily. - -package md4 - -import "math/bits" - -var shift1 = []int{3, 7, 11, 19} -var shift2 = []int{3, 5, 9, 13} -var shift3 = []int{3, 9, 11, 15} - -var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15} -var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15} - -func _Block(dig *digest, p []byte) int { - a := dig.s[0] - b := dig.s[1] - c := dig.s[2] - d := dig.s[3] - n := 0 - var X [16]uint32 - for len(p) >= _Chunk { - aa, bb, cc, dd := a, b, c, d - - j := 0 - for i := 0; i < 16; i++ { - X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 - j += 4 - } - - // If this needs to be made faster in the future, - // the usual trick is to unroll each of these - // loops by a factor of 4; that lets you replace - // the shift[] lookups with constants and, - // with suitable variable renaming in each - // unrolled body, delete the a, b, c, d = d, a, b, c - // (or you can let the optimizer do the renaming). - // - // The index variables are uint so that % by a power - // of two can be optimized easily by a compiler. - - // Round 1. - for i := uint(0); i < 16; i++ { - x := i - s := shift1[i%4] - f := ((c ^ d) & b) ^ d - a += f + X[x] - a = bits.RotateLeft32(a, s) - a, b, c, d = d, a, b, c - } - - // Round 2. - for i := uint(0); i < 16; i++ { - x := xIndex2[i] - s := shift2[i%4] - g := (b & c) | (b & d) | (c & d) - a += g + X[x] + 0x5a827999 - a = bits.RotateLeft32(a, s) - a, b, c, d = d, a, b, c - } - - // Round 3. - for i := uint(0); i < 16; i++ { - x := xIndex3[i] - s := shift3[i%4] - h := b ^ c ^ d - a += h + X[x] + 0x6ed9eba1 - a = bits.RotateLeft32(a, s) - a, b, c, d = d, a, b, c - } - - a += aa - b += bb - c += cc - d += dd - - p = p[_Chunk:] - n += _Chunk - } - - dig.s[0] = a - dig.s[1] = b - dig.s[2] = c - dig.s[3] = d - return n -} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index f0e0cf3cb1d..8f6c7f493f8 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit { // or its associated context.Context is canceled. // // The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. type Limiter struct { mu sync.Mutex limit Limit diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 6d03f4d36e8..00000000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md index ffc29852085..289693613cc 100644 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -19,14 +19,12 @@ ## Running system tests -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. -Run tests with `goapp test`: +Run tests with `go test`: ``` -goapp test -v google.golang.org/appengine/... +go test -v google.golang.org/appengine/... ``` ## Contributor License Agreements diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index 9fdbacd3c60..5ccddd9990d 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -1,6 +1,6 @@ # Go App Engine packages -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) This repository supports the Go runtime on *App Engine standard*. It provides APIs for interacting with App Engine services. @@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/ Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and there are some differences: -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* `appengine.Context` has been replaced with the `Context` type from `context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. @@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences: * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. -## Key Encode/Decode compatibiltiy to help with datastore library migrations +## Key Encode/Decode compatibility to help with datastore library migrations Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8c9697674f2..35ba9c89676 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -9,10 +9,10 @@ package appengine // import "google.golang.org/appengine" import ( + "context" "net/http" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" ) @@ -35,18 +35,18 @@ import ( // // Main is designed so that the app's main package looks like this: // -// package main +// package main // -// import ( -// "google.golang.org/appengine" +// import ( +// "google.golang.org/appengine" // -// _ "myapp/package0" -// _ "myapp/package1" -// ) +// _ "myapp/package0" +// _ "myapp/package1" +// ) // -// func main() { -// appengine.Main() -// } +// func main() { +// appengine.Main() +// } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. @@ -54,6 +54,9 @@ func Main() { internal.Main() } +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go index f4b645aad3b..6e1d041cd95 100644 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -2,19 +2,19 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package appengine import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" + "context" ) // BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). func BackgroundContext() context.Context { - return internal.BackgroundContext() + return context.Background() } diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go index b8dcf8f3619..1202fc1a531 100644 --- a/vendor/google.golang.org/appengine/identity.go +++ b/vendor/google.golang.org/appengine/identity.go @@ -5,10 +5,9 @@ package appengine import ( + "context" "time" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20a1..0569f5dd43e 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e35c..87c33c798e8 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b72..5b95c13d926 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e4257..0f95aa91d5b 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e34..5ad3548bf74 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e3f..4201b6b585a 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d806726355..18ddda3a423 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312fd1..afd0ae84fdf 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a33387..86a8caf06f3 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae65380..2ae8ab9fa42 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go index 21860ca0822..6f169be487d 100644 --- a/vendor/google.golang.org/appengine/namespace.go +++ b/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go index 0ad50e2d36d..20e5940527e 100644 --- a/vendor/google.golang.org/appengine/socket/socket_classic.go +++ b/vendor/google.golang.org/appengine/socket/socket_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package socket import ( + "context" "fmt" "io" "net" @@ -14,7 +16,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/socket" diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go index c804169a1c0..fa0ec83861b 100644 --- a/vendor/google.golang.org/appengine/socket/socket_vm.go +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -2,15 +2,15 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package socket import ( + "context" "net" "time" - - "golang.org/x/net/context" ) // Dial connects to the address addr on the network protocol. diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go index 05642a992a3..fcf3ad0a58f 100644 --- a/vendor/google.golang.org/appengine/timeout.go +++ b/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46e8..00000000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045b6..00000000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d901..6c0d72418d8 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore b/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore deleted file mode 100644 index a1338d68517..00000000000 --- a/vendor/gopkg.in/jcmturner/aescts.v1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/README.md b/vendor/gopkg.in/jcmturner/aescts.v1/README.md deleted file mode 100644 index d1fddf3a281..00000000000 --- a/vendor/gopkg.in/jcmturner/aescts.v1/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# AES CBC Ciphertext Stealing -[![GoDoc](https://godoc.org/gopkg.in/jcmturner/aescts.v1?status.svg)](https://godoc.org/gopkg.in/jcmturner/aescts.v1) [![Go Report Card](https://goreportcard.com/badge/gopkg.in/jcmturner/aescts.v1)](https://goreportcard.com/report/gopkg.in/jcmturner/aescts.v1) - -Encrypt and decrypt data using AES CBC Ciphertext stealing mode. - -Reference: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing - -To get the package, execute: -``` -go get gopkg.in/jcmturner/aescts.v1 -``` -To import this package, add the following line to your code: -```go -import "gopkg.in/jcmturner/aescts.v1" - -``` \ No newline at end of file diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go b/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go deleted file mode 100644 index 278713ea5b4..00000000000 --- a/vendor/gopkg.in/jcmturner/aescts.v1/aescts.go +++ /dev/null @@ -1,186 +0,0 @@ -// Package aescts provides AES CBC CipherText Stealing encryption and decryption methods -package aescts - -import ( - "crypto/aes" - "crypto/cipher" - "errors" - "fmt" -) - -// Encrypt the message with the key and the initial vector. -// Returns: next iv, ciphertext bytes, error -func Encrypt(key, iv, plaintext []byte) ([]byte, []byte, error) { - l := len(plaintext) - - block, err := aes.NewCipher(key) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("Error creating cipher: %v", err) - } - mode := cipher.NewCBCEncrypter(block, iv) - - m := make([]byte, len(plaintext)) - copy(m, plaintext) - - /*For consistency, ciphertext stealing is always used for the last two - blocks of the data to be encrypted, as in [RC5]. If the data length - is a multiple of the block size, this is equivalent to plain CBC mode - with the last two ciphertext blocks swapped.*/ - /*The initial vector carried out from one encryption for use in a - subsequent encryption is the next-to-last block of the encryption - output; this is the encrypted form of the last plaintext block.*/ - if l <= aes.BlockSize { - m, _ = zeroPad(m, aes.BlockSize) - mode.CryptBlocks(m, m) - return m, m, nil - } - if l%aes.BlockSize == 0 { - mode.CryptBlocks(m, m) - iv = m[len(m)-aes.BlockSize:] - rb, _ := swapLastTwoBlocks(m, aes.BlockSize) - return iv, rb, nil - } - m, _ = zeroPad(m, aes.BlockSize) - rb, pb, lb, err := tailBlocks(m, aes.BlockSize) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("Error tailing blocks: %v", err) - } - var ct []byte - if rb != nil { - // Encrpt all but the lats 2 blocks and update the rolling iv - mode.CryptBlocks(rb, rb) - iv = rb[len(rb)-aes.BlockSize:] - mode = cipher.NewCBCEncrypter(block, iv) - ct = append(ct, rb...) - } - mode.CryptBlocks(pb, pb) - mode = cipher.NewCBCEncrypter(block, pb) - mode.CryptBlocks(lb, lb) - // Cipher Text Stealing (CTS) - Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing - // Swap the last two cipher blocks - // Truncate the ciphertext to the length of the original plaintext - ct = append(ct, lb...) - ct = append(ct, pb...) - return lb, ct[:l], nil -} - -// Decrypt the ciphertext with the key and the initial vector. -func Decrypt(key, iv, ciphertext []byte) ([]byte, error) { - // Copy the cipher text as golang slices even when passed by value to this method can result in the backing arrays of the calling code value being updated. - ct := make([]byte, len(ciphertext)) - copy(ct, ciphertext) - if len(ct) < aes.BlockSize { - return []byte{}, fmt.Errorf("Ciphertext is not large enough. It is less that one block size. Blocksize:%v; Ciphertext:%v", aes.BlockSize, len(ct)) - } - // Configure the CBC - block, err := aes.NewCipher(key) - if err != nil { - return nil, fmt.Errorf("Error creating cipher: %v", err) - } - var mode cipher.BlockMode - - //If ciphertext is multiple of blocksize we just need to swap back the last two blocks and then do CBC - //If the ciphertext is just one block we can't swap so we just decrypt - if len(ct)%aes.BlockSize == 0 { - if len(ct) > aes.BlockSize { - ct, _ = swapLastTwoBlocks(ct, aes.BlockSize) - } - mode = cipher.NewCBCDecrypter(block, iv) - message := make([]byte, len(ct)) - mode.CryptBlocks(message, ct) - return message[:len(ct)], nil - } - - // Cipher Text Stealing (CTS) using CBC interface. Ref: https://en.wikipedia.org/wiki/Ciphertext_stealing#CBC_ciphertext_stealing - // Get ciphertext of the 2nd to last (penultimate) block (cpb), the last block (clb) and the rest (crb) - crb, cpb, clb, _ := tailBlocks(ct, aes.BlockSize) - v := make([]byte, len(iv), len(iv)) - copy(v, iv) - var message []byte - if crb != nil { - //If there is more than just the last and the penultimate block we decrypt it and the last bloc of this becomes the iv for later - rb := make([]byte, len(crb)) - mode = cipher.NewCBCDecrypter(block, v) - v = crb[len(crb)-aes.BlockSize:] - mode.CryptBlocks(rb, crb) - message = append(message, rb...) - } - - // We need to modify the cipher text - // Decryt the 2nd to last (penultimate) block with a the original iv - pb := make([]byte, aes.BlockSize) - mode = cipher.NewCBCDecrypter(block, iv) - mode.CryptBlocks(pb, cpb) - // number of byte needed to pad - npb := aes.BlockSize - len(ct)%aes.BlockSize - //pad last block using the number of bytes needed from the tail of the plaintext 2nd to last (penultimate) block - clb = append(clb, pb[len(pb)-npb:]...) - - // Now decrypt the last block in the penultimate position (iv will be from the crb, if the is no crb it's zeros) - // iv for the penultimate block decrypted in the last position becomes the modified last block - lb := make([]byte, aes.BlockSize) - mode = cipher.NewCBCDecrypter(block, v) - v = clb - mode.CryptBlocks(lb, clb) - message = append(message, lb...) - - // Now decrypt the penultimate block in the last position (iv will be from the modified last block) - mode = cipher.NewCBCDecrypter(block, v) - mode.CryptBlocks(cpb, cpb) - message = append(message, cpb...) - - // Truncate to the size of the original cipher text - return message[:len(ct)], nil -} - -func tailBlocks(b []byte, c int) ([]byte, []byte, []byte, error) { - if len(b) <= c { - return []byte{}, []byte{}, []byte{}, errors.New("bytes slice is not larger than one block so cannot tail") - } - // Get size of last block - var lbs int - if l := len(b) % aes.BlockSize; l == 0 { - lbs = aes.BlockSize - } else { - lbs = l - } - // Get last block - lb := b[len(b)-lbs:] - // Get 2nd to last (penultimate) block - pb := b[len(b)-lbs-c : len(b)-lbs] - if len(b) > 2*c { - rb := b[:len(b)-lbs-c] - return rb, pb, lb, nil - } - return nil, pb, lb, nil -} - -func swapLastTwoBlocks(b []byte, c int) ([]byte, error) { - rb, pb, lb, err := tailBlocks(b, c) - if err != nil { - return nil, err - } - var out []byte - if rb != nil { - out = append(out, rb...) - } - out = append(out, lb...) - out = append(out, pb...) - return out, nil -} - -// zeroPad pads bytes with zeros to nearest multiple of message size m. -func zeroPad(b []byte, m int) ([]byte, error) { - if m <= 0 { - return nil, errors.New("Invalid message block size when padding") - } - if b == nil || len(b) == 0 { - return nil, errors.New("Data not valid to pad: Zero size") - } - if l := len(b) % m; l != 0 { - n := m - l - z := make([]byte, n) - b = append(b, z...) - } - return b, nil -} diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore b/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore deleted file mode 100644 index a1338d68517..00000000000 --- a/vendor/gopkg.in/jcmturner/dnsutils.v1/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml b/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml deleted file mode 100644 index cab4f7b861e..00000000000 --- a/vendor/gopkg.in/jcmturner/dnsutils.v1/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -go: - - 1.7.x - - 1.8.x - - 1.9.x - - master - -gobuild_args: -tags=integration -race - -sudo: required - -services: - - docker - -before_install: - - docker pull jcmturner/gokrb5:dns - - docker run -d -h kdc.test.gokrb5 -v /etc/localtime:/etc/localtime:ro -e "TEST_KDC_ADDR=127.0.0.1" -p 53:53 -p 53:53/udp --name dns jcmturner/gokrb5:dns - -before_script: - - sudo sed -i 's/nameserver .*/nameserver 127.0.0.1/g' /etc/resolv.conf - -env: - - DNSUTILS_OVERRIDE_NS="127.0.0.1:53" \ No newline at end of file diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE b/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/vendor/gopkg.in/jcmturner/dnsutils.v1/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go b/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go deleted file mode 100644 index 15ea912d100..00000000000 --- a/vendor/gopkg.in/jcmturner/dnsutils.v1/srv.go +++ /dev/null @@ -1,95 +0,0 @@ -package dnsutils - -import ( - "math/rand" - "net" - "sort" -) - -// OrderedSRV returns a count of the results and a map keyed on the order they should be used. -// This based on the records' priority and randomised selection based on their relative weighting. -// The function's inputs are the same as those for net.LookupSRV -// To use in the correct order: -// -// count, orderedSRV, err := OrderedSRV(service, proto, name) -// i := 1 -// for i <= count { -// srv := orderedSRV[i] -// // Do something such as dial this SRV. If fails move on the the next or break if it succeeds. -// i += 1 -// } -func OrderedSRV(service, proto, name string) (int, map[int]*net.SRV, error) { - _, addrs, err := net.LookupSRV(service, proto, name) - if err != nil { - return 0, make(map[int]*net.SRV), err - } - index, osrv := orderSRV(addrs) - return index, osrv, nil -} - -func orderSRV(addrs []*net.SRV) (int, map[int]*net.SRV) { - // Initialise the ordered map - var o int - osrv := make(map[int]*net.SRV) - - prioMap := make(map[int][]*net.SRV, 0) - for _, srv := range addrs { - prioMap[int(srv.Priority)] = append(prioMap[int(srv.Priority)], srv) - } - - priorities := make([]int, 0) - for p := range prioMap { - priorities = append(priorities, p) - } - - var count int - sort.Ints(priorities) - for _, p := range priorities { - tos := weightedOrder(prioMap[p]) - for i, s := range tos { - count += 1 - osrv[o+i] = s - } - o += len(tos) - } - return count, osrv -} - -func weightedOrder(srvs []*net.SRV) map[int]*net.SRV { - // Get the total weight - var tw int - for _, s := range srvs { - tw += int(s.Weight) - } - - // Initialise the ordered map - o := 1 - osrv := make(map[int]*net.SRV) - - // Whilst there are still entries to be ordered - l := len(srvs) - for l > 0 { - i := rand.Intn(l) - s := srvs[i] - var rw int - if tw > 0 { - // Greater the weight the more likely this will be zero or less - rw = rand.Intn(tw) - int(s.Weight) - } - if rw <= 0 { - // Put entry in position - osrv[o] = s - if len(srvs) > 1 { - // Remove the entry from the source slice by swapping with the last entry and truncating - srvs[len(srvs)-1], srvs[i] = srvs[i], srvs[len(srvs)-1] - srvs = srvs[:len(srvs)-1] - l = len(srvs) - } else { - l = 0 - } - o += 1 - tw = tw - int(s.Weight) - } - } - return osrv -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/LICENSE b/vendor/gopkg.in/jcmturner/gokrb5.v5/LICENSE deleted file mode 100644 index 8dada3edaf5..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/asn1tools/tools.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/asn1tools/tools.go deleted file mode 100644 index f27740b9bd7..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/asn1tools/tools.go +++ /dev/null @@ -1,86 +0,0 @@ -// Package asn1tools provides tools for managing ASN1 marshaled data. -package asn1tools - -import ( - "github.com/jcmturner/gofork/encoding/asn1" -) - -// MarshalLengthBytes returns the ASN1 encoded bytes for the length 'l' -// -// There are two forms: short (for lengths between 0 and 127), and long definite (for lengths between 0 and 2^1008 -1). -// -// Short form: One octet. Bit 8 has value "0" and bits 7-1 give the length. -// -// Long form: Two to 127 octets. Bit 8 of first octet has value "1" and bits 7-1 give the number of additional length octets. Second and following octets give the length, base 256, most significant digit first. -func MarshalLengthBytes(l int) []byte { - if l <= 127 { - return []byte{byte(l)} - } - var b []byte - p := 1 - for i := 1; i < 127; { - b = append([]byte{byte((l % (p * 256)) / p)}, b...) - p = p * 256 - l = l - l%p - if l <= 0 { - break - } - } - return append([]byte{byte(128 + len(b))}, b...) -} - -// GetLengthFromASN returns the length of a slice of ASN1 encoded bytes from the ASN1 length header it contains. -func GetLengthFromASN(b []byte) int { - if int(b[1]) <= 127 { - return int(b[1]) - } - // The bytes that indicate the length - lb := b[2 : 2+int(b[1])-128] - base := 1 - l := 0 - for i := len(lb) - 1; i >= 0; i-- { - l += int(lb[i]) * base - base = base * 256 - } - return l -} - -// GetNumberBytesInLengthHeader returns the number of bytes in the ASn1 header that indicate the length. -func GetNumberBytesInLengthHeader(b []byte) int { - if int(b[1]) <= 127 { - return 1 - } - // The bytes that indicate the length - return 1 + int(b[1]) - 128 -} - -// AddASNAppTag adds an ASN1 encoding application tag value to the raw bytes provided. -func AddASNAppTag(b []byte, tag int) []byte { - r := asn1.RawValue{ - Class: asn1.ClassApplication, - IsCompound: true, - Tag: tag, - Bytes: b, - } - ab, _ := asn1.Marshal(r) - return ab -} - -/* -// The Marshal method of golang's asn1 package does not enable you to define wrapping the output in an application tag. -// This method adds that wrapping tag. -func AddASNAppTag(b []byte, tag int) []byte { - // The ASN1 wrapping consists of 2 bytes: - // 1st byte -> Identifier Octet - Application Tag - // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. - // Application Tag: - //| Bit: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | - //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | - //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | - // Therefore the value of the byte is an integer = ( Application tag value + 96 ) - //b = append(MarshalLengthBytes(int(b[1])+2), b...) - b = append(MarshalLengthBytes(len(b)), b...) - b = append([]byte{byte(96 + tag)}, b...) - return b -} -*/ diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/ASExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/ASExchange.go deleted file mode 100644 index 894337602ad..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/ASExchange.go +++ /dev/null @@ -1,159 +0,0 @@ -package client - -import ( - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" - "gopkg.in/jcmturner/gokrb5.v5/iana/errorcode" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/iana/patype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// ASExchange performs an AS exchange for the client to retrieve a TGT. -func (cl *Client) ASExchange(realm string, ASReq messages.ASReq, referral int) (messages.ASRep, error) { - if ok, err := cl.IsConfigured(); !ok { - return messages.ASRep{}, krberror.Errorf(err, krberror.ConfigError, "AS Exchange cannot be preformed") - } - - b, err := ASReq.Marshal() - if err != nil { - return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ") - } - var ASRep messages.ASRep - - rb, err := cl.SendToKDC(b, realm) - if err != nil { - if e, ok := err.(messages.KRBError); ok { - switch e.ErrorCode { - case errorcode.KDC_ERR_PREAUTH_REQUIRED: - // From now on assume this client will need to do this pre-auth and set the PAData - cl.GoKrb5Conf.AssumePAEncTimestampRequired = true - err = setPAData(cl, e, &ASReq) - if err != nil { - return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: failed setting AS_REQ PAData for pre-authentication required") - } - b, err := ASReq.Marshal() - if err != nil { - return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed marshaling AS_REQ with PAData") - } - rb, err = cl.SendToKDC(b, realm) - if err != nil { - if _, ok := err.(messages.KRBError); ok { - return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") - } - return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") - } - case errorcode.KDC_ERR_WRONG_REALM: - // Client referral https://tools.ietf.org/html/rfc6806.html#section-7 - if referral > 5 { - return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "maximum number of client referrals exceeded") - } - referral++ - return cl.ASExchange(e.CRealm, ASReq, referral) - default: - return messages.ASRep{}, krberror.Errorf(err, krberror.KDCError, "AS Exchange Error: kerberos error response from KDC") - } - } else { - return messages.ASRep{}, krberror.Errorf(err, krberror.NetworkingError, "AS Exchange Error: failed sending AS_REQ to KDC") - } - } - err = ASRep.Unmarshal(rb) - if err != nil { - return messages.ASRep{}, krberror.Errorf(err, krberror.EncodingError, "AS Exchange Error: failed to process the AS_REP") - } - if ok, err := ASRep.IsValid(cl.Config, cl.Credentials, ASReq); !ok { - return messages.ASRep{}, krberror.Errorf(err, krberror.KRBMsgError, "AS Exchange Error: AS_REP is not valid or client password/keytab incorrect") - } - return ASRep, nil -} - -func setPAData(cl *Client, krberr messages.KRBError, ASReq *messages.ASReq) error { - if !cl.GoKrb5Conf.DisablePAFXFast { - pa := types.PAData{PADataType: patype.PA_REQ_ENC_PA_REP} - ASReq.PAData = append(ASReq.PAData, pa) - } - if cl.GoKrb5Conf.AssumePAEncTimestampRequired { - paTSb, err := types.GetPAEncTSEncAsnMarshalled() - if err != nil { - return krberror.Errorf(err, krberror.KRBMsgError, "error creating PAEncTSEnc for Pre-Authentication") - } - var et etype.EType - if krberr.ErrorCode == 0 { - // This is not in response to an error from the KDC. It is preemptive - et, err = crypto.GetEtype(ASReq.ReqBody.EType[0]) // Take the first as preference - if err != nil { - return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") - } - } else { - // Get the etype to use from the PA data in the KRBError e-data - et, err = preAuthEType(krberr) - if err != nil { - return krberror.Errorf(err, krberror.EncryptingError, "error getting etype for pre-auth encryption") - } - } - key, err := cl.Key(et, krberr) - if err != nil { - return krberror.Errorf(err, krberror.EncryptingError, "error getting key from credentials") - } - paEncTS, err := crypto.GetEncryptedData(paTSb, key, keyusage.AS_REQ_PA_ENC_TIMESTAMP, 1) - if err != nil { - return krberror.Errorf(err, krberror.EncryptingError, "error encrypting pre-authentication timestamp") - } - pb, err := paEncTS.Marshal() - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error marshaling the PAEncTSEnc encrypted data") - } - pa := types.PAData{ - PADataType: patype.PA_ENC_TIMESTAMP, - PADataValue: pb, - } - ASReq.PAData = append(ASReq.PAData, pa) - } - return nil -} - -func preAuthEType(krberr messages.KRBError) (etype etype.EType, err error) { - //The preferred ordering of the "hint" pre-authentication data that - //affect client key selection is: ETYPE-INFO2, followed by ETYPE-INFO, - //followed by PW-SALT. - //A KDC SHOULD NOT send PA-PW-SALT when issuing a KRB-ERROR message - //that requests additional pre-authentication. Implementation note: - //Some KDC implementations issue an erroneous PA-PW-SALT when issuing a - //KRB-ERROR message that requests additional pre-authentication. - //Therefore, clients SHOULD ignore a PA-PW-SALT accompanying a - //KRB-ERROR message that requests additional pre-authentication. - var etypeID int32 - var pas types.PADataSequence - e := pas.Unmarshal(krberr.EData) - if e != nil { - err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling KRBError data") - return - } - for _, pa := range pas { - switch pa.PADataType { - case patype.PA_ETYPE_INFO2: - info, e := pa.GetETypeInfo2() - if e != nil { - err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO2 data") - return - } - etypeID = info[0].EType - break - case patype.PA_ETYPE_INFO: - info, e := pa.GetETypeInfo() - if e != nil { - err = krberror.Errorf(e, krberror.EncodingError, "error unmashalling ETYPE-INFO data") - return - } - etypeID = info[0].EType - } - } - etype, e = crypto.GetEtype(etypeID) - if e != nil { - err = krberror.Errorf(e, krberror.EncryptingError, "error creating etype") - return - } - return etype, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/TGSExchange.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/TGSExchange.go deleted file mode 100644 index f7c5c38902a..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/TGSExchange.go +++ /dev/null @@ -1,105 +0,0 @@ -package client - -import ( - "time" - - "gopkg.in/jcmturner/gokrb5.v5/iana/nametype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// TGSExchange performs a TGS exchange to retrieve a ticket to the specified SPN. -// The ticket retrieved is added to the client's cache. -func (cl *Client) TGSExchange(spn types.PrincipalName, kdcRealm string, tkt messages.Ticket, sessionKey types.EncryptionKey, renewal bool, referral int) (tgsReq messages.TGSReq, tgsRep messages.TGSRep, err error) { - //// Check what sessions we have for this SPN. - //// Will get the session to the default realm if one does not exist for requested SPN - //sess, err := cl.GetSessionFromPrincipalName(spn) - //if err != nil { - // return tgsReq, tgsRep, err - //} - tgsReq, err = messages.NewTGSReq(cl.Credentials.CName, kdcRealm, cl.Config, tkt, sessionKey, spn, renewal) - if err != nil { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "TGS Exchange Error: failed to generate a new TGS_REQ") - } - b, err := tgsReq.Marshal() - if err != nil { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to generate a new TGS_REQ") - } - r, err := cl.SendToKDC(b, kdcRealm) - if err != nil { - if _, ok := err.(messages.KRBError); ok { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.KDCError, "TGS Exchange Error: kerberos error response from KDC") - } - return tgsReq, tgsRep, krberror.Errorf(err, krberror.NetworkingError, "TGS Exchange Error: issue sending TGS_REQ to KDC") - } - err = tgsRep.Unmarshal(r) - if err != nil { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") - } - err = tgsRep.DecryptEncPart(sessionKey) - if err != nil { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: failed to process the TGS_REP") - } - // TODO should this check the first element is krbtgt rather than the nametype? - if tgsRep.Ticket.SName.NameType == nametype.KRB_NT_SRV_INST && !tgsRep.Ticket.SName.Equal(spn) { - if referral > 5 { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.KRBMsgError, "maximum number of referrals exceeded") - } - // Server referral https://tools.ietf.org/html/rfc6806.html#section-8 - // The TGS Rep contains a TGT for another domain as the service resides in that domain. - if ok, err := tgsRep.IsValid(cl.Config, tgsReq); !ok { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: TGS_REP is not valid") - } - cl.AddSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) - realm := tgsRep.Ticket.SName.NameString[1] - referral++ - return cl.TGSExchange(spn, realm, tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, false, referral) - } - if ok, err := tgsRep.IsValid(cl.Config, tgsReq); !ok { - return tgsReq, tgsRep, krberror.Errorf(err, krberror.EncodingError, "TGS Exchange Error: TGS_REP is not valid") - } - return tgsReq, tgsRep, nil -} - -// GetServiceTicket makes a request to get a service ticket for the SPN specified -// SPN format: / Eg. HTTP/www.example.com -// The ticket will be added to the client's ticket cache -func (cl *Client) GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error) { - var tkt messages.Ticket - var skey types.EncryptionKey - if tkt, skey, ok := cl.GetCachedTicket(spn); ok { - // Already a valid ticket in the cache - return tkt, skey, nil - } - princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) - sess, err := cl.GetSessionFromPrincipalName(princ) - if err != nil { - return tkt, skey, err - } - // Ensure TGT still valid - if time.Now().UTC().After(sess.EndTime) { - _, err := cl.updateSession(sess) - if err != nil { - return tkt, skey, err - } - // Get the session again as it could have been replaced by the update - sess, err = cl.GetSessionFromPrincipalName(princ) - if err != nil { - return tkt, skey, err - } - } - _, tgsRep, err := cl.TGSExchange(princ, sess.Realm, sess.TGT, sess.SessionKey, false, 0) - if err != nil { - return tkt, skey, err - } - cl.Cache.addEntry( - tgsRep.Ticket, - tgsRep.DecryptedEncPart.AuthTime, - tgsRep.DecryptedEncPart.StartTime, - tgsRep.DecryptedEncPart.EndTime, - tgsRep.DecryptedEncPart.RenewTill, - tgsRep.DecryptedEncPart.Key, - ) - return tgsRep.Ticket, tgsRep.DecryptedEncPart.Key, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/cache.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/cache.go deleted file mode 100644 index 9d3e1040a9c..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/cache.go +++ /dev/null @@ -1,111 +0,0 @@ -package client - -import ( - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" - "strings" - "sync" - "time" -) - -// Cache for client tickets. -type Cache struct { - Entries map[string]CacheEntry - mux sync.RWMutex -} - -// CacheEntry holds details for a client cache entry. -type CacheEntry struct { - Ticket messages.Ticket - AuthTime time.Time - StartTime time.Time - EndTime time.Time - RenewTill time.Time - SessionKey types.EncryptionKey -} - -// NewCache creates a new client ticket cache instance. -func NewCache() *Cache { - return &Cache{ - Entries: map[string]CacheEntry{}, - } -} - -// GetEntry returns a cache entry that matches the SPN. -func (c *Cache) getEntry(spn string) (CacheEntry, bool) { - c.mux.RLock() - defer c.mux.RUnlock() - e, ok := (*c).Entries[spn] - return e, ok -} - -// AddEntry adds a ticket to the cache. -func (c *Cache) addEntry(tkt messages.Ticket, authTime, startTime, endTime, renewTill time.Time, sessionKey types.EncryptionKey) CacheEntry { - spn := strings.Join(tkt.SName.NameString, "/") - c.mux.Lock() - defer c.mux.Unlock() - (*c).Entries[spn] = CacheEntry{ - Ticket: tkt, - AuthTime: authTime, - StartTime: startTime, - EndTime: endTime, - RenewTill: renewTill, - SessionKey: sessionKey, - } - return c.Entries[spn] -} - -// Clear deletes all the cache entries -func (c *Cache) clear() { - c.mux.Lock() - defer c.mux.Unlock() - for k := range c.Entries { - delete(c.Entries, k) - } -} - -// RemoveEntry removes the cache entry for the defined SPN. -func (c *Cache) RemoveEntry(spn string) { - c.mux.Lock() - defer c.mux.Unlock() - delete(c.Entries, spn) -} - -// GetCachedTicket returns a ticket from the cache for the SPN. -// Only a ticket that is currently valid will be returned. -func (cl *Client) GetCachedTicket(spn string) (messages.Ticket, types.EncryptionKey, bool) { - if e, ok := cl.Cache.getEntry(spn); ok { - //If within time window of ticket return it - if time.Now().UTC().After(e.StartTime) && time.Now().UTC().Before(e.EndTime) { - return e.Ticket, e.SessionKey, true - } else if time.Now().UTC().Before(e.RenewTill) { - e, err := cl.renewTicket(e) - if err != nil { - return e.Ticket, e.SessionKey, false - } - return e.Ticket, e.SessionKey, true - } - } - var tkt messages.Ticket - var key types.EncryptionKey - return tkt, key, false -} - -// renewTicket renews a cache entry ticket. -// To renew from outside the client package use GetCachedTicket -func (cl *Client) renewTicket(e CacheEntry) (CacheEntry, error) { - spn := e.Ticket.SName - _, tgsRep, err := cl.TGSExchange(spn, e.Ticket.Realm, e.Ticket, e.SessionKey, true, 0) - if err != nil { - return e, err - } - e = cl.Cache.addEntry( - tgsRep.Ticket, - tgsRep.DecryptedEncPart.AuthTime, - tgsRep.DecryptedEncPart.StartTime, - tgsRep.DecryptedEncPart.EndTime, - tgsRep.DecryptedEncPart.RenewTill, - tgsRep.DecryptedEncPart.Key, - ) - return e, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/client.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/client.go deleted file mode 100644 index c583ada9255..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/client.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package client provides a client library and methods for Kerberos 5 authentication. -package client - -import ( - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/config" - "gopkg.in/jcmturner/gokrb5.v5/credentials" - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" - "gopkg.in/jcmturner/gokrb5.v5/iana/errorcode" - "gopkg.in/jcmturner/gokrb5.v5/iana/nametype" - "gopkg.in/jcmturner/gokrb5.v5/keytab" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// Client side configuration and state. -type Client struct { - Credentials *credentials.Credentials - Config *config.Config - GoKrb5Conf *Config - sessions *sessions - Cache *Cache -} - -// Config struct holds GoKRB5 specific client configurations. -// Set Disable_PA_FX_FAST to true to force this behaviour off. -// Set Assume_PA_ENC_TIMESTAMP_Required to send the PA_ENC_TIMESTAMP pro-actively rather than waiting for a KRB_ERROR response from the KDC indicating it is required. -type Config struct { - DisablePAFXFast bool - AssumePAEncTimestampRequired bool -} - -// NewClientWithPassword creates a new client from a password credential. -// Set the realm to empty string to use the default realm from config. -func NewClientWithPassword(username, realm, password string) Client { - creds := credentials.NewCredentials(username, realm) - return Client{ - Credentials: creds.WithPassword(password), - Config: config.NewConfig(), - GoKrb5Conf: &Config{}, - sessions: &sessions{ - Entries: make(map[string]*session), - }, - Cache: NewCache(), - } -} - -// NewClientWithKeytab creates a new client from a keytab credential. -func NewClientWithKeytab(username, realm string, kt keytab.Keytab) Client { - creds := credentials.NewCredentials(username, realm) - return Client{ - Credentials: creds.WithKeytab(kt), - Config: config.NewConfig(), - GoKrb5Conf: &Config{}, - sessions: &sessions{ - Entries: make(map[string]*session), - }, - Cache: NewCache(), - } -} - -// NewClientFromCCache create a client from a populated client cache. -// -// WARNING: A client created from CCache does not automatically renew TGTs and a failure will occur after the TGT expires. -func NewClientFromCCache(c credentials.CCache) (Client, error) { - cl := Client{ - Credentials: c.GetClientCredentials(), - Config: config.NewConfig(), - GoKrb5Conf: &Config{}, - sessions: &sessions{ - Entries: make(map[string]*session), - }, - Cache: NewCache(), - } - spn := types.PrincipalName{ - NameType: nametype.KRB_NT_SRV_INST, - NameString: []string{"krbtgt", c.DefaultPrincipal.Realm}, - } - cred, ok := c.GetEntry(spn) - if !ok { - return cl, errors.New("TGT not found in CCache") - } - var tgt messages.Ticket - err := tgt.Unmarshal(cred.Ticket) - if err != nil { - return cl, fmt.Errorf("TGT bytes in cache are not valid: %v", err) - } - cl.sessions.Entries[c.DefaultPrincipal.Realm] = &session{ - Realm: c.DefaultPrincipal.Realm, - AuthTime: cred.AuthTime, - EndTime: cred.EndTime, - RenewTill: cred.RenewTill, - TGT: tgt, - SessionKey: cred.Key, - } - for _, cred := range c.GetEntries() { - var tkt messages.Ticket - err = tkt.Unmarshal(cred.Ticket) - if err != nil { - return cl, fmt.Errorf("cache entry ticket bytes are not valid: %v", err) - } - cl.Cache.addEntry( - tkt, - cred.AuthTime, - cred.StartTime, - cred.EndTime, - cred.RenewTill, - cred.Key, - ) - } - return cl, nil -} - -// WithConfig sets the Kerberos configuration for the client. -func (cl *Client) WithConfig(cfg *config.Config) *Client { - cl.Config = cfg - return cl -} - -// WithKeytab adds a keytab to the client -func (cl *Client) WithKeytab(kt keytab.Keytab) *Client { - cl.Credentials.WithKeytab(kt) - return cl -} - -// WithPassword adds a password to the client -func (cl *Client) WithPassword(password string) *Client { - cl.Credentials.WithPassword(password) - return cl -} - -// Key returns a key for the client. Preferably from a keytab and then generated from the password. -// The KRBError would have been returned from the KDC and must be of type KDC_ERR_PREAUTH_REQUIRED. -// If a KRBError is not available pass messages.KRBError{} and a key will be returned from the credentials keytab. -func (cl *Client) Key(etype etype.EType, krberr messages.KRBError) (types.EncryptionKey, error) { - if cl.Credentials.HasKeytab() && etype != nil { - return cl.Credentials.Keytab.GetEncryptionKey(cl.Credentials.CName.NameString, cl.Credentials.Realm, 0, etype.GetETypeID()) - } else if cl.Credentials.HasPassword() { - if krberr.ErrorCode == errorcode.KDC_ERR_PREAUTH_REQUIRED { - var pas types.PADataSequence - err := pas.Unmarshal(krberr.EData) - if err != nil { - return types.EncryptionKey{}, fmt.Errorf("could not get PAData from KRBError to generate key from password: %v", err) - } - key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password, krberr.CName, krberr.CRealm, etype.GetETypeID(), pas) - return key, err - } - key, _, err := crypto.GetKeyFromPassword(cl.Credentials.Password, cl.Credentials.CName, cl.Credentials.Realm, etype.GetETypeID(), types.PADataSequence{}) - return key, err - } - return types.EncryptionKey{}, errors.New("credential has neither keytab or password to generate key") -} - -// LoadConfig loads the Kerberos configuration for the client from file path specified. -func (cl *Client) LoadConfig(cfgPath string) (*Client, error) { - cfg, err := config.Load(cfgPath) - if err != nil { - return cl, err - } - cl.Config = cfg - return cl, nil -} - -// IsConfigured indicates if the client has the values required set. -func (cl *Client) IsConfigured() (bool, error) { - if cl.Credentials.Username == "" { - return false, errors.New("client does not have a username") - } - if cl.Credentials.Realm == "" { - return false, errors.New("client does not have a define realm") - } - // Client needs to have either a password, keytab or a session already (later when loading from CCache) - if !cl.Credentials.HasPassword() && !cl.Credentials.HasKeytab() { - sess, err := cl.GetSessionFromRealm(cl.Credentials.Realm) - if err != nil || sess.AuthTime.IsZero() { - return false, errors.New("client has neither a keytab nor a password set and no session") - } - } - if !cl.Config.LibDefaults.DNSLookupKDC { - for _, r := range cl.Config.Realms { - if r.Realm == cl.Credentials.Realm { - if len(r.KDC) > 0 { - return true, nil - } - return false, errors.New("client krb5 config does not have any defined KDCs for the default realm") - } - } - } - return true, nil -} - -// Login the client with the KDC via an AS exchange. -func (cl *Client) Login() error { - if ok, err := cl.IsConfigured(); !ok { - return err - } - ASReq, err := messages.NewASReqForTGT(cl.Credentials.Realm, cl.Config, cl.Credentials.CName) - if err != nil { - return krberror.Errorf(err, krberror.KRBMsgError, "error generating new AS_REQ") - } - err = setPAData(cl, messages.KRBError{}, &ASReq) - if err != nil { - return krberror.Errorf(err, krberror.KRBMsgError, "failed setting AS_REQ PAData") - } - ASRep, err := cl.ASExchange(cl.Credentials.Realm, ASReq, 0) - if err != nil { - return err - } - cl.AddSession(ASRep.Ticket, ASRep.DecryptedEncPart) - return nil -} - -// Destroy stops the auto-renewal of all sessions and removes the sessions and cache entries from the client. -func (cl *Client) Destroy() { - creds := credentials.NewCredentials("", "") - cl.sessions.destroy() - cl.Cache.clear() - cl.Credentials = &creds -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/http.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/http.go deleted file mode 100644 index c13afbeb61d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/http.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "encoding/base64" - "fmt" - "net/http" - "strings" - - "gopkg.in/jcmturner/gokrb5.v5/credentials" - "gopkg.in/jcmturner/gokrb5.v5/gssapi" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// SetSPNEGOHeader gets the service ticket and sets it as the SPNEGO authorization header on HTTP request object. -// To auto generate the SPN from the request object pass a null string "". -func (cl *Client) SetSPNEGOHeader(r *http.Request, spn string) error { - if spn == "" { - spn = "HTTP/" + strings.SplitN(r.Host, ":", 2)[0] - } - tkt, skey, err := cl.GetServiceTicket(spn) - if err != nil { - return fmt.Errorf("could not get service ticket: %v", err) - } - err = SetSPNEGOHeader(*cl.Credentials, tkt, skey, r) - if err != nil { - return err - } - return nil -} - -// SetSPNEGOHeader sets the provided ticket as the SPNEGO authorization header on HTTP request object. -func SetSPNEGOHeader(creds credentials.Credentials, tkt messages.Ticket, sessionKey types.EncryptionKey, r *http.Request) error { - SPNEGOToken, err := gssapi.GetSPNEGOKrbNegTokenInit(creds, tkt, sessionKey) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "could not generate SPNEGO negotiation token") - } - nb, err := SPNEGOToken.Marshal() - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "could not marshal SPNEGO") - } - hs := "Negotiate " + base64.StdEncoding.EncodeToString(nb) - r.Header.Set("Authorization", hs) - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/network.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/network.go deleted file mode 100644 index c330aeb24e3..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/network.go +++ /dev/null @@ -1,213 +0,0 @@ -package client - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "net" - "time" - - "gopkg.in/jcmturner/gokrb5.v5/iana/errorcode" - "gopkg.in/jcmturner/gokrb5.v5/messages" -) - -// SendToKDC performs network actions to send data to the KDC. -func (cl *Client) SendToKDC(b []byte, realm string) ([]byte, error) { - var rb []byte - if cl.Config.LibDefaults.UDPPreferenceLimit == 1 { - //1 means we should always use TCP - rb, errtcp := cl.sendKDCTCP(realm, b) - if errtcp != nil { - if e, ok := errtcp.(messages.KRBError); ok { - return rb, e - } - return rb, fmt.Errorf("communication error with KDC via TCP: %v", errtcp) - } - return rb, nil - } - if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { - //Try UDP first, TCP second - rb, errudp := cl.sendKDCUDP(realm, b) - if errudp != nil { - if e, ok := errudp.(messages.KRBError); ok && e.ErrorCode != errorcode.KRB_ERR_RESPONSE_TOO_BIG { - // Got a KRBError from KDC - // If this is not a KRB_ERR_RESPONSE_TOO_BIG we will return immediately otherwise will try TCP. - return rb, e - } - // Try TCP - r, errtcp := cl.sendKDCTCP(realm, b) - if errtcp != nil { - if e, ok := errtcp.(messages.KRBError); ok { - // Got a KRBError - return r, e - } - return r, fmt.Errorf("failed to communicate with KDC. Attempts made with UDP (%v) and then TCP (%v)", errudp, errtcp) - } - rb = r - } - return rb, nil - } - //Try TCP first, UDP second - rb, errtcp := cl.sendKDCTCP(realm, b) - if errtcp != nil { - if e, ok := errtcp.(messages.KRBError); ok { - // Got a KRBError from KDC so returning and not trying UDP. - return rb, e - } - rb, errudp := cl.sendKDCUDP(realm, b) - if errudp != nil { - if e, ok := errudp.(messages.KRBError); ok { - // Got a KRBError - return rb, e - } - return rb, fmt.Errorf("failed to communicate with KDC. Attempts made with TCP (%v) and then UDP (%v)", errtcp, errudp) - } - } - return rb, nil -} - -func dialKDCUDP(count int, kdcs map[int]string) (conn *net.UDPConn, err error) { - i := 1 - for i <= count { - udpAddr, e := net.ResolveUDPAddr("udp", kdcs[i]) - if e != nil { - err = fmt.Errorf("error resolving KDC address: %v", e) - return - } - conn, err = net.DialUDP("udp", nil, udpAddr) - if err == nil { - conn.SetDeadline(time.Now().Add(time.Duration(5 * time.Second))) - return - } - i++ - } - err = errors.New("error in getting a UDP connection to any of the KDCs") - return -} - -func dialKDCTCP(count int, kdcs map[int]string) (conn *net.TCPConn, err error) { - i := 1 - for i <= count { - tcpAddr, e := net.ResolveTCPAddr("tcp", kdcs[i]) - if e != nil { - err = fmt.Errorf("error resolving KDC address: %v", e) - return - } - conn, err = net.DialTCP("tcp", nil, tcpAddr) - if err == nil { - conn.SetDeadline(time.Now().Add(time.Duration(5 * time.Second))) - return - } - i++ - } - err = errors.New("error in getting a TCP connection to any of the KDCs") - return -} - -// Send the bytes to the KDC over UDP. -func (cl *Client) sendKDCUDP(realm string, b []byte) ([]byte, error) { - var r []byte - count, kdcs, err := cl.Config.GetKDCs(realm, false) - if err != nil { - return r, err - } - conn, err := dialKDCUDP(count, kdcs) - if err != nil { - return r, err - } - r, err = cl.sendUDP(conn, b) - if err != nil { - return r, err - } - return checkForKRBError(r) -} - -func (cl *Client) sendKDCTCP(realm string, b []byte) ([]byte, error) { - var r []byte - count, kdcs, err := cl.Config.GetKDCs(realm, true) - if err != nil { - return r, err - } - conn, err := dialKDCTCP(count, kdcs) - if err != nil { - return r, err - } - rb, err := cl.sendTCP(conn, b) - if err != nil { - return r, err - } - return checkForKRBError(rb) -} - -// Send the bytes over UDP. -func (cl *Client) sendUDP(conn *net.UDPConn, b []byte) ([]byte, error) { - var r []byte - defer conn.Close() - _, err := conn.Write(b) - if err != nil { - return r, fmt.Errorf("error sending to (%s): %v", conn.RemoteAddr().String(), err) - } - udpbuf := make([]byte, 4096) - n, _, err := conn.ReadFrom(udpbuf) - r = udpbuf[:n] - if err != nil { - return r, fmt.Errorf("sending over UDP failed to %s: %v", conn.RemoteAddr().String(), err) - } - if len(r) < 1 { - return r, fmt.Errorf("no response data from %s", conn.RemoteAddr().String()) - } - return r, nil -} - -// Send the bytes over TCP. -func (cl *Client) sendTCP(conn *net.TCPConn, b []byte) ([]byte, error) { - defer conn.Close() - var r []byte - /* - RFC https://tools.ietf.org/html/rfc4120#section-7.2.2 - Each request (KRB_KDC_REQ) and response (KRB_KDC_REP or KRB_ERROR) - sent over the TCP stream is preceded by the length of the request as - 4 octets in network byte order. The high bit of the length is - reserved for future expansion and MUST currently be set to zero. If - a KDC that does not understand how to interpret a set high bit of the - length encoding receives a request with the high order bit of the - length set, it MUST return a KRB-ERROR message with the error - KRB_ERR_FIELD_TOOLONG and MUST close the TCP stream. - NB: network byte order == big endian - */ - var buf bytes.Buffer - binary.Write(&buf, binary.BigEndian, uint32(len(b))) - b = append(buf.Bytes(), b...) - - _, err := conn.Write(b) - if err != nil { - return r, fmt.Errorf("error sending to KDC (%s): %v", conn.RemoteAddr().String(), err) - } - - sh := make([]byte, 4, 4) - _, err = conn.Read(sh) - if err != nil { - return r, fmt.Errorf("error reading response size header: %v", err) - } - s := binary.BigEndian.Uint32(sh) - - rb := make([]byte, s, s) - _, err = io.ReadFull(conn, rb) - if err != nil { - return r, fmt.Errorf("error reading response: %v", err) - } - if len(rb) < 1 { - return r, fmt.Errorf("no response data from KDC %s", conn.RemoteAddr().String()) - } - return rb, nil -} - -func checkForKRBError(b []byte) ([]byte, error) { - var KRBErr messages.KRBError - if err := KRBErr.Unmarshal(b); err == nil { - return b, KRBErr - } - return b, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/passwd.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/passwd.go deleted file mode 100644 index 7733ab3cc6a..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/passwd.go +++ /dev/null @@ -1,94 +0,0 @@ -package client - -import ( - "fmt" - "net" - - "gopkg.in/jcmturner/gokrb5.v5/kadmin" - "gopkg.in/jcmturner/gokrb5.v5/messages" -) - -// Kpasswd server response codes. -const ( - KRB5_KPASSWD_SUCCESS = 0 - KRB5_KPASSWD_MALFORMED = 1 - KRB5_KPASSWD_HARDERROR = 2 - KRB5_KPASSWD_AUTHERROR = 3 - KRB5_KPASSWD_SOFTERROR = 4 - KRB5_KPASSWD_ACCESSDENIED = 5 - KRB5_KPASSWD_BAD_VERSION = 6 - KRB5_KPASSWD_INITIAL_FLAG_NEEDED = 7 -) - -// ChangePasswd changes the password of the client to the value provided. -func (cl *Client) ChangePasswd(newPasswd string) (bool, error) { - ASReq, err := messages.NewASReqForChgPasswd(cl.Credentials.Realm, cl.Config, cl.Credentials.CName) - if err != nil { - return false, err - } - ASRep, err := cl.ASExchange(cl.Credentials.Realm, ASReq, 0) - if err != nil { - return false, err - } - - msg, key, err := kadmin.ChangePasswdMsg(cl.Credentials.CName, cl.Credentials.Realm, newPasswd, ASRep.Ticket, ASRep.DecryptedEncPart.Key) - if err != nil { - return false, err - } - r, err := cl.sendToKPasswd(msg) - if err != nil { - return false, err - } - err = r.Decrypt(key) - if err != nil { - return false, err - } - if r.ResultCode != KRB5_KPASSWD_SUCCESS { - return false, fmt.Errorf("error response from kdamin: %s", r.Result) - } - return true, nil -} - -func (cl *Client) sendToKPasswd(msg kadmin.Request) (r kadmin.Reply, err error) { - _, kps, err := cl.Config.GetKpasswdServers(cl.Credentials.Realm, true) - if err != nil { - return - } - addr := kps[1] - b, err := msg.Marshal() - if err != nil { - return - } - if len(b) <= cl.Config.LibDefaults.UDPPreferenceLimit { - return cl.sendKPasswdUDP(b, addr) - } - return cl.sendKPasswdTCP(b, addr) -} - -func (cl *Client) sendKPasswdTCP(b []byte, kadmindAddr string) (r kadmin.Reply, err error) { - tcpAddr, err := net.ResolveTCPAddr("tcp", kadmindAddr) - if err != nil { - return - } - conn, err := net.DialTCP("tcp", nil, tcpAddr) - if err != nil { - return - } - rb, err := cl.sendTCP(conn, b) - err = r.Unmarshal(rb) - return -} - -func (cl *Client) sendKPasswdUDP(b []byte, kadmindAddr string) (r kadmin.Reply, err error) { - udpAddr, err := net.ResolveUDPAddr("udp", kadmindAddr) - if err != nil { - return - } - conn, err := net.DialUDP("udp", nil, udpAddr) - if err != nil { - return - } - rb, err := cl.sendUDP(conn, b) - err = r.Unmarshal(rb) - return -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/session.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/client/session.go deleted file mode 100644 index 06675dec673..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/client/session.go +++ /dev/null @@ -1,191 +0,0 @@ -package client - -import ( - "fmt" - "sync" - "time" - - "gopkg.in/jcmturner/gokrb5.v5/iana/nametype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// Sessions keyed on the realm name -type sessions struct { - Entries map[string]*session - mux sync.RWMutex -} - -func (s *sessions) destroy() { - s.mux.Lock() - defer s.mux.Unlock() - for k, e := range s.Entries { - e.destroy() - delete(s.Entries, k) - } -} - -// Client session struct. -type session struct { - Realm string - AuthTime time.Time - EndTime time.Time - RenewTill time.Time - TGT messages.Ticket - SessionKey types.EncryptionKey - SessionKeyExpiration time.Time - cancel chan bool - mux sync.RWMutex -} - -func (s *session) update(tkt messages.Ticket, dep messages.EncKDCRepPart) { - s.mux.Lock() - defer s.mux.Unlock() - s.AuthTime = dep.AuthTime - s.EndTime = dep.EndTime - s.RenewTill = dep.RenewTill - s.TGT = tkt - s.SessionKey = dep.Key - s.SessionKeyExpiration = dep.KeyExpiration -} - -func (s *session) destroy() { - s.mux.Lock() - defer s.mux.Unlock() - s.cancel <- true - s.EndTime = time.Now().UTC() - s.RenewTill = s.EndTime - s.SessionKeyExpiration = s.EndTime -} - -// AddSession adds a session for a realm with a TGT to the client's session cache. -// A goroutine is started to automatically renew the TGT before expiry. -func (cl *Client) AddSession(tkt messages.Ticket, dep messages.EncKDCRepPart) { - cl.sessions.mux.Lock() - defer cl.sessions.mux.Unlock() - s := &session{ - Realm: tkt.SName.NameString[1], - AuthTime: dep.AuthTime, - EndTime: dep.EndTime, - RenewTill: dep.RenewTill, - TGT: tkt, - SessionKey: dep.Key, - SessionKeyExpiration: dep.KeyExpiration, - cancel: make(chan bool, 1), - } - // if a session already exists for this, cancel its auto renew. - if i, ok := cl.sessions.Entries[tkt.SName.NameString[1]]; ok { - i.cancel <- true - } - cl.sessions.Entries[tkt.SName.NameString[1]] = s - cl.enableAutoSessionRenewal(s) -} - -// enableAutoSessionRenewal turns on the automatic renewal for the client's TGT session. -func (cl *Client) enableAutoSessionRenewal(s *session) { - var timer *time.Timer - go func(s *session) { - for { - s.mux.RLock() - w := (s.EndTime.Sub(time.Now().UTC()) * 5) / 6 - s.mux.RUnlock() - if w < 0 { - return - } - timer = time.NewTimer(w) - select { - case <-timer.C: - renewal, err := cl.updateSession(s) - if !renewal && err == nil { - // end this goroutine as there will have been a new login and new auto renewal goroutine created. - return - } - case <-s.cancel: - // cancel has been called. Stop the timer and exit. - timer.Stop() - return - } - } - }(s) -} - -// RenewTGT renews the client's TGT session. -func (cl *Client) renewTGT(s *session) error { - spn := types.PrincipalName{ - NameType: nametype.KRB_NT_SRV_INST, - NameString: []string{"krbtgt", s.Realm}, - } - _, tgsRep, err := cl.TGSExchange(spn, s.TGT.Realm, s.TGT, s.SessionKey, true, 0) - if err != nil { - return krberror.Errorf(err, krberror.KRBMsgError, "error renewing TGT") - } - s.update(tgsRep.Ticket, tgsRep.DecryptedEncPart) - return nil -} - -// updateSession updates either through renewal or creating a new login. -// The boolean indicates if the update was a renewal. -func (cl *Client) updateSession(s *session) (bool, error) { - if time.Now().UTC().Before(s.RenewTill) { - err := cl.renewTGT(s) - return true, err - } - err := cl.Login() - return false, err -} - -func (cl *Client) getSessionFromRemoteRealm(realm string) (*session, error) { - cl.sessions.mux.RLock() - sess, ok := cl.sessions.Entries[cl.Credentials.Realm] - cl.sessions.mux.RUnlock() - if !ok { - return nil, fmt.Errorf("client does not have a session for realm %s, login first", cl.Credentials.Realm) - } - - spn := types.PrincipalName{ - NameType: nametype.KRB_NT_SRV_INST, - NameString: []string{"krbtgt", realm}, - } - - _, tgsRep, err := cl.TGSExchange(spn, cl.Credentials.Realm, sess.TGT, sess.SessionKey, false, 0) - if err != nil { - return nil, err - } - cl.AddSession(tgsRep.Ticket, tgsRep.DecryptedEncPart) - - cl.sessions.mux.RLock() - defer cl.sessions.mux.RUnlock() - return cl.sessions.Entries[realm], nil -} - -// GetSessionFromRealm returns the session for the realm provided. -func (cl *Client) GetSessionFromRealm(realm string) (sess *session, err error) { - cl.sessions.mux.RLock() - s, ok := cl.sessions.Entries[realm] - cl.sessions.mux.RUnlock() - if !ok { - // Try to request TGT from trusted remote Realm - s, err = cl.getSessionFromRemoteRealm(realm) - if err != nil { - return - } - } - // Create another session to return to prevent race condition. - sess = &session{ - Realm: s.Realm, - AuthTime: s.AuthTime, - EndTime: s.EndTime, - RenewTill: s.RenewTill, - TGT: s.TGT, - SessionKey: s.SessionKey, - SessionKeyExpiration: s.SessionKeyExpiration, - } - return -} - -// GetSessionFromPrincipalName returns the session for the realm of the principal provided. -func (cl *Client) GetSessionFromPrincipalName(spn types.PrincipalName) (*session, error) { - realm := cl.Config.ResolveRealm(spn.NameString[len(spn.NameString)-1]) - return cl.GetSessionFromRealm(realm) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/config/hosts.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/config/hosts.go deleted file mode 100644 index a58c234178d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/config/hosts.go +++ /dev/null @@ -1,137 +0,0 @@ -package config - -import ( - "fmt" - "math/rand" - "net" - "strconv" - "strings" - - "gopkg.in/jcmturner/dnsutils.v1" -) - -// GetKDCs returns the count of KDCs available and a map of KDC host names keyed on preference order. -func (c *Config) GetKDCs(realm string, tcp bool) (int, map[int]string, error) { - if realm == "" { - realm = c.LibDefaults.DefaultRealm - } - kdcs := make(map[int]string) - var count int - - // Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf. - if c.LibDefaults.DNSLookupKDC { - proto := "udp" - if tcp { - proto = "tcp" - } - c, addrs, err := dnsutils.OrderedSRV("kerberos", proto, realm) - if err != nil { - return count, kdcs, err - } - if len(addrs) < 1 { - return count, kdcs, fmt.Errorf("no KDC SRV records found for realm %s", realm) - } - count = c - for k, v := range addrs { - kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) - } - } else { - // Get the KDCs from the krb5.conf an order them randomly for preference. - var ks []string - for _, r := range c.Realms { - if r.Realm == realm { - ks = r.KDC - break - } - } - count = len(ks) - if count < 1 { - return count, kdcs, fmt.Errorf("no KDCs defined in configuration for realm %s", realm) - } - kdcs = randServOrder(ks) - } - return count, kdcs, nil -} - -// GetKpasswdServers returns the count of kpasswd servers available and a map of kpasswd host names keyed on preference order. -// https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html#realms - see kpasswd_server section -func (c *Config) GetKpasswdServers(realm string, tcp bool) (int, map[int]string, error) { - kdcs := make(map[int]string) - var count int - - // Use DNS to resolve kerberos SRV records if configured to do so in krb5.conf. - if c.LibDefaults.DNSLookupKDC { - proto := "udp" - if tcp { - proto = "tcp" - } - c, addrs, err := dnsutils.OrderedSRV("kpasswd", proto, realm) - if err != nil { - return count, kdcs, err - } - if c < 1 { - c, addrs, err = dnsutils.OrderedSRV("kerberos-adm", proto, realm) - if err != nil { - return count, kdcs, err - } - } - if len(addrs) < 1 { - return count, kdcs, fmt.Errorf("no kpasswd or kadmin SRV records found for realm %s", realm) - } - count = c - for k, v := range addrs { - kdcs[k] = strings.TrimRight(v.Target, ".") + ":" + strconv.Itoa(int(v.Port)) - } - } else { - // Get the KDCs from the krb5.conf an order them randomly for preference. - var ks []string - var ka []string - for _, r := range c.Realms { - if r.Realm == realm { - ks = r.KPasswdServer - ka = r.AdminServer - break - } - } - if len(ks) < 1 { - for _, k := range ka { - h, _, err := net.SplitHostPort(k) - if err != nil { - continue - } - ks = append(ks, h+":464") - } - } - count = len(ks) - if count < 1 { - return count, kdcs, fmt.Errorf("no kpasswd or kadmin defined in configuration for realm %s", realm) - } - kdcs = randServOrder(ks) - } - return count, kdcs, nil -} - -func randServOrder(ks []string) map[int]string { - kdcs := make(map[int]string) - count := len(ks) - i := 1 - if count > 1 { - l := len(ks) - for l > 0 { - ri := rand.Intn(l) - kdcs[i] = ks[ri] - if l > 1 { - // Remove the entry from the source slice by swapping with the last entry and truncating - ks[len(ks)-1], ks[ri] = ks[ri], ks[len(ks)-1] - ks = ks[:len(ks)-1] - l = len(ks) - } else { - l = 0 - } - i++ - } - } else { - kdcs[i] = ks[0] - } - return kdcs -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/config/krb5conf.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/config/krb5conf.go deleted file mode 100644 index aa829816f98..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/config/krb5conf.go +++ /dev/null @@ -1,665 +0,0 @@ -// Package config implements KRB5 client and service configuration as described at https://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html -package config - -import ( - "bufio" - "encoding/hex" - "errors" - "fmt" - "io" - "net" - "os" - "os/user" - "regexp" - "strconv" - "strings" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -// Config represents the KRB5 configuration. -type Config struct { - LibDefaults *LibDefaults - Realms []Realm - DomainRealm DomainRealm - //CaPaths - //AppDefaults - //Plugins -} - -// WeakETypeList is a list of encryption types that have been deemed weak. -const WeakETypeList = "des-cbc-crc des-cbc-md4 des-cbc-md5 des-cbc-raw des3-cbc-raw des-hmac-sha1 arcfour-hmac-exp rc4-hmac-exp arcfour-hmac-md5-exp des" - -// NewConfig creates a new config struct instance. -func NewConfig() *Config { - d := make(DomainRealm) - return &Config{ - LibDefaults: newLibDefaults(), - DomainRealm: d, - } -} - -// LibDefaults represents the [libdefaults] section of the configuration. -type LibDefaults struct { - AllowWeakCrypto bool //default false - // ap_req_checksum_type int //unlikely to support this - Canonicalize bool //default false - CCacheType int //default is 4. unlikely to implement older - Clockskew time.Duration //max allowed skew in seconds, default 300 - //Default_ccache_name string // default /tmp/krb5cc_%{uid} //Not implementing as will hold in memory - DefaultClientKeytabName string //default /usr/local/var/krb5/user/%{euid}/client.keytab - DefaultKeytabName string //default /etc/krb5.keytab - DefaultRealm string - DefaultTGSEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 - DefaultTktEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 - DefaultTGSEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 - DefaultTktEnctypeIDs []int32 //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 - DNSCanonicalizeHostname bool //default true - DNSLookupKDC bool //default false - DNSLookupRealm bool - ExtraAddresses []net.IP //Not implementing yet - Forwardable bool //default false - IgnoreAcceptorHostname bool //default false - K5LoginAuthoritative bool //default false - K5LoginDirectory string //default user's home directory. Must be owned by the user or root - KDCDefaultOptions asn1.BitString //default 0x00000010 (KDC_OPT_RENEWABLE_OK) - KDCTimeSync int //default 1 - //kdc_req_checksum_type int //unlikely to implement as for very old KDCs - NoAddresses bool //default true - PermittedEnctypes []string //default aes256-cts-hmac-sha1-96 aes128-cts-hmac-sha1-96 des3-cbc-sha1 arcfour-hmac-md5 camellia256-cts-cmac camellia128-cts-cmac des-cbc-crc des-cbc-md5 des-cbc-md4 - PermittedEnctypeIDs []int32 - //plugin_base_dir string //not supporting plugins - PreferredPreauthTypes []int //default “17, 16, 15, 14”, which forces libkrb5 to attempt to use PKINIT if it is supported - Proxiable bool //default false - RDNS bool //default true - RealmTryDomains int //default -1 - RenewLifetime time.Duration //default 0 - SafeChecksumType int //default 8 - TicketLifetime time.Duration //default 1 day - UDPPreferenceLimit int // 1 means to always use tcp. MIT krb5 has a default value of 1465, and it prevents user setting more than 32700. - VerifyAPReqNofail bool //default false -} - -// Create a new LibDefaults struct. -func newLibDefaults() *LibDefaults { - uid := "0" - var hdir string - usr, _ := user.Current() - if usr != nil { - uid = usr.Uid - hdir = usr.HomeDir - } - opts := asn1.BitString{} - opts.Bytes, _ = hex.DecodeString("00000010") - opts.BitLength = len(opts.Bytes) * 8 - return &LibDefaults{ - CCacheType: 4, - Clockskew: time.Duration(300) * time.Second, - DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid), - DefaultKeytabName: "/etc/krb5.keytab", - DefaultTGSEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, - DefaultTktEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, - DNSCanonicalizeHostname: true, - K5LoginDirectory: hdir, - KDCDefaultOptions: opts, - KDCTimeSync: 1, - NoAddresses: true, - PermittedEnctypes: []string{"aes256-cts-hmac-sha1-96", "aes128-cts-hmac-sha1-96", "des3-cbc-sha1", "arcfour-hmac-md5", "camellia256-cts-cmac", "camellia128-cts-cmac", "des-cbc-crc", "des-cbc-md5", "des-cbc-md4"}, - RDNS: true, - RealmTryDomains: -1, - SafeChecksumType: 8, - TicketLifetime: time.Duration(24) * time.Hour, - UDPPreferenceLimit: 1465, - PreferredPreauthTypes: []int{17, 16, 15, 14}, - } -} - -// Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct. -func (l *LibDefaults) parseLines(lines []string) error { - for _, line := range lines { - line = strings.TrimSpace(line) - if line == "" { - continue - } - if strings.Contains(line, "v4_") { - return errors.New("v4 configurations are not supported in Realms section") - } - if !strings.Contains(line, "=") { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - - p := strings.Split(line, "=") - key := strings.TrimSpace(strings.ToLower(p[0])) - switch key { - case "allow_weak_crypto": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.AllowWeakCrypto = v - case "canonicalize": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.Canonicalize = v - case "ccache_type": - p[1] = strings.TrimSpace(p[1]) - v, err := strconv.ParseUint(p[1], 10, 32) - if err != nil || v < 0 || v > 4 { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - l.CCacheType = int(v) - case "clockskew": - d, err := parseDuration(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.Clockskew = d - case "default_client_keytab_name": - l.DefaultClientKeytabName = strings.TrimSpace(p[1]) - case "default_keytab_name": - l.DefaultKeytabName = strings.TrimSpace(p[1]) - case "default_realm": - l.DefaultRealm = strings.TrimSpace(p[1]) - case "default_tgs_enctypes": - l.DefaultTGSEnctypes = strings.Fields(p[1]) - case "default_tkt_enctypes": - l.DefaultTktEnctypes = strings.Fields(p[1]) - case "dns_canonicalize_hostname": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.DNSCanonicalizeHostname = v - case "dns_lookup_kdc": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.DNSLookupKDC = v - case "dns_lookup_realm": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.DNSLookupRealm = v - case "extra_addresses": - ipStr := strings.TrimSpace(p[1]) - for _, ip := range strings.Split(ipStr, ",") { - if eip := net.ParseIP(ip); eip != nil { - l.ExtraAddresses = append(l.ExtraAddresses, eip) - } - } - case "forwardable": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.Forwardable = v - case "ignore_acceptor_hostname": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.IgnoreAcceptorHostname = v - case "k5login_authoritative": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.K5LoginAuthoritative = v - case "k5login_directory": - l.K5LoginDirectory = strings.TrimSpace(p[1]) - case "kdc_default_options": - v := strings.TrimSpace(p[1]) - v = strings.Replace(v, "0x", "", -1) - b, err := hex.DecodeString(v) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - l.KDCDefaultOptions.Bytes = b - l.KDCDefaultOptions.BitLength = len(b) * 8 - case "kdc_timesync": - p[1] = strings.TrimSpace(p[1]) - v, err := strconv.ParseInt(p[1], 10, 32) - if err != nil || v < 0 { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - l.KDCTimeSync = int(v) - case "noaddresses": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.NoAddresses = v - case "permitted_enctypes": - l.PermittedEnctypes = strings.Fields(p[1]) - case "preferred_preauth_types": - p[1] = strings.TrimSpace(p[1]) - t := strings.Split(p[1], ",") - var v []int - for _, s := range t { - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - v = append(v, int(i)) - } - l.PreferredPreauthTypes = v - case "proxiable": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.Proxiable = v - case "rdns": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.RDNS = v - case "realm_try_domains": - p[1] = strings.TrimSpace(p[1]) - v, err := strconv.ParseInt(p[1], 10, 32) - if err != nil || v < -1 { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - l.RealmTryDomains = int(v) - case "renew_lifetime": - d, err := parseDuration(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.RenewLifetime = d - case "safe_checksum_type": - p[1] = strings.TrimSpace(p[1]) - v, err := strconv.ParseInt(p[1], 10, 32) - if err != nil || v < 0 { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - l.SafeChecksumType = int(v) - case "ticket_lifetime": - d, err := parseDuration(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.TicketLifetime = d - case "udp_preference_limit": - p[1] = strings.TrimSpace(p[1]) - v, err := strconv.ParseUint(p[1], 10, 32) - if err != nil || v > 32700 { - return fmt.Errorf("libdefaults configuration line invalid: %s", line) - } - l.UDPPreferenceLimit = int(v) - case "verify_ap_req_nofail": - v, err := parseBoolean(p[1]) - if err != nil { - return fmt.Errorf("libdefaults configuration line invalid. %v: %s", err, line) - } - l.VerifyAPReqNofail = v - default: - //Ignore the line - continue - } - } - l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto) - l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto) - l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto) - return nil -} - -// Realm represents an entry in the [realms] section of the configuration. -type Realm struct { - Realm string - AdminServer []string - //auth_to_local //Not implementing for now - //auth_to_local_names //Not implementing for now - DefaultDomain string - KDC []string - KPasswdServer []string //default admin_server:464 - MasterKDC []string -} - -// Parse the lines of a [realms] entry into the Realm struct. -func (r *Realm) parseLines(name string, lines []string) error { - r.Realm = name - var adminServerFinal bool - var KDCFinal bool - var kpasswdServerFinal bool - var masterKDCFinal bool - for _, line := range lines { - if strings.TrimSpace(line) == "" { - continue - } - if !strings.Contains(line, "=") { - return fmt.Errorf("realm configuration line invalid: %s", line) - } - - p := strings.Split(line, "=") - key := strings.TrimSpace(strings.ToLower(p[0])) - v := strings.TrimSpace(p[1]) - switch key { - case "admin_server": - appendUntilFinal(&r.AdminServer, v, &adminServerFinal) - case "default_domain": - r.DefaultDomain = v - case "kdc": - if !strings.Contains(v, ":") { - // No port number specified default to 88 - if strings.HasSuffix(v, `*`) { - v = strings.TrimSpace(strings.TrimSuffix(v, `*`)) + ":88*" - } else { - v = strings.TrimSpace(v) + ":88" - } - } - appendUntilFinal(&r.KDC, v, &KDCFinal) - case "kpasswd_server": - appendUntilFinal(&r.KPasswdServer, v, &kpasswdServerFinal) - case "master_kdc": - appendUntilFinal(&r.MasterKDC, v, &masterKDCFinal) - default: - //Ignore the line - continue - } - } - //default for Kpasswd_server = admin_server:464 - if len(r.KPasswdServer) < 1 { - for _, a := range r.AdminServer { - s := strings.Split(a, ":") - r.KPasswdServer = append(r.KPasswdServer, s[0]+":464") - } - } - return nil -} - -// Parse the lines of the [realms] section of the configuration into an slice of Realm structs. -func parseRealms(lines []string) ([]Realm, error) { - var realms []Realm - start := -1 - var name string - for i, l := range lines { - if strings.TrimSpace(l) == "" { - continue - } - if strings.Contains(l, "v4_") { - return nil, errors.New("v4 configurations are not supported in Realms section") - } - if strings.Contains(l, "{") { - if start >= 0 { - // already started a block!!! - return nil, errors.New("invalid Realms section in configuration") - } - start = i - if !strings.Contains(l, "=") { - return nil, fmt.Errorf("realm configuration line invalid: %s", l) - } - p := strings.Split(l, "=") - name = strings.TrimSpace(p[0]) - } - if strings.Contains(l, "}") { - if start < 0 { - // but not started a block!!! - return nil, errors.New("invalid Realms section in configuration") - } - var r Realm - r.parseLines(name, lines[start+1:i]) - realms = append(realms, r) - start = -1 - } - } - return realms, nil -} - -// DomainRealm maps the domains to realms representing the [domain_realm] section of the configuration. -type DomainRealm map[string]string - -// Parse the lines of the [domain_realm] section of the configuration and add to the mapping. -func (d *DomainRealm) parseLines(lines []string) error { - for _, line := range lines { - if strings.TrimSpace(line) == "" { - continue - } - if !strings.Contains(line, "=") { - return fmt.Errorf("realm configuration line invalid: %s", line) - } - p := strings.Split(line, "=") - domain := strings.TrimSpace(strings.ToLower(p[0])) - realm := strings.TrimSpace(strings.ToUpper(p[1])) - d.addMapping(domain, realm) - } - return nil -} - -// Add a domain to realm mapping. -func (d *DomainRealm) addMapping(domain, realm string) { - (*d)[domain] = realm -} - -// Delete a domain to realm mapping. -func (d *DomainRealm) deleteMapping(domain, realm string) { - delete(*d, domain) -} - -// ResolveRealm resolves the kerberos realm for the specified domain name from the domain to realm mapping. -// The most specific mapping is returned. -func (c *Config) ResolveRealm(domainName string) string { - domainName = strings.TrimSuffix(domainName, ".") - - // Try to match the entire hostname first - if r, ok := c.DomainRealm[domainName]; ok { - return r - } - - // Try to match all DNS domain parts - periods := strings.Count(domainName, ".") + 1 - for i := 2; i <= periods; i++ { - z := strings.SplitN(domainName, ".", i) - if r, ok := c.DomainRealm["."+z[len(z)-1]]; ok { - return r - } - } - return c.LibDefaults.DefaultRealm -} - -// Load the KRB5 configuration from the specified file path. -func Load(cfgPath string) (*Config, error) { - fh, err := os.Open(cfgPath) - if err != nil { - return nil, errors.New("configuration file could not be opened: " + cfgPath + " " + err.Error()) - } - defer fh.Close() - scanner := bufio.NewScanner(fh) - return NewConfigFromScanner(scanner) -} - -// NewConfigFromString creates a new Config struct from a string. -func NewConfigFromString(s string) (*Config, error) { - reader := strings.NewReader(s) - return NewConfigFromReader(reader) -} - -// NewConfigFromReader creates a new Config struct from an io.Reader. -func NewConfigFromReader(r io.Reader) (*Config, error) { - scanner := bufio.NewScanner(r) - return NewConfigFromScanner(scanner) -} - -// NewConfigFromScanner creates a new Config struct from a bufio.Scanner. -func NewConfigFromScanner(scanner *bufio.Scanner) (*Config, error) { - c := NewConfig() - sections := make(map[int]string) - var sectionLineNum []int - var lines []string - for scanner.Scan() { - // Skip comments and blank lines - if matched, _ := regexp.MatchString(`^\s*(#|;|\n)`, scanner.Text()); matched { - continue - } - if matched, _ := regexp.MatchString(`^\s*\[libdefaults\]\s*`, scanner.Text()); matched { - sections[len(lines)] = "libdefaults" - sectionLineNum = append(sectionLineNum, len(lines)) - continue - } - if matched, _ := regexp.MatchString(`^\s*\[realms\]\s*`, scanner.Text()); matched { - sections[len(lines)] = "realms" - sectionLineNum = append(sectionLineNum, len(lines)) - continue - } - if matched, _ := regexp.MatchString(`^\s*\[domain_realm\]\s*`, scanner.Text()); matched { - sections[len(lines)] = "domain_realm" - sectionLineNum = append(sectionLineNum, len(lines)) - continue - } - if matched, _ := regexp.MatchString(`^\s*\[.*\]\s*`, scanner.Text()); matched { - sections[len(lines)] = "unknown_section" - sectionLineNum = append(sectionLineNum, len(lines)) - continue - } - lines = append(lines, scanner.Text()) - } - for i, start := range sectionLineNum { - var end int - if i+1 >= len(sectionLineNum) { - end = len(lines) - } else { - end = sectionLineNum[i+1] - } - switch section := sections[start]; section { - case "libdefaults": - err := c.LibDefaults.parseLines(lines[start:end]) - if err != nil { - return nil, fmt.Errorf("error processing libdefaults section: %v", err) - } - case "realms": - realms, err := parseRealms(lines[start:end]) - if err != nil { - return nil, fmt.Errorf("error processing realms section: %v", err) - } - c.Realms = realms - case "domain_realm": - err := c.DomainRealm.parseLines(lines[start:end]) - if err != nil { - return nil, fmt.Errorf("error processing domaain_realm section: %v", err) - } - default: - continue - } - } - return c, nil -} - -// Parse a space delimited list of ETypes into a list of EType numbers optionally filtering out weak ETypes. -func parseETypes(s []string, w bool) []int32 { - var eti []int32 - for _, et := range s { - if !w { - var weak bool - for _, wet := range strings.Fields(WeakETypeList) { - if et == wet { - weak = true - break - } - } - if weak { - continue - } - } - i := etypeID.EtypeSupported(et) - if i != 0 { - eti = append(eti, i) - } - } - return eti -} - -// Parse a time duration string in the configuration to a golang time.Duration. -func parseDuration(s string) (time.Duration, error) { - s = strings.Replace(strings.TrimSpace(s), " ", "", -1) - - // handle Nd[NmNs] - if strings.Contains(s, "d") { - ds := strings.SplitN(s, "d", 2) - dn, err := strconv.ParseUint(ds[0], 10, 32) - if err != nil { - return time.Duration(0), errors.New("invalid time duration") - } - d := time.Duration(dn*24) * time.Hour - if ds[1] != "" { - dp, err := time.ParseDuration(ds[1]) - if err != nil { - return time.Duration(0), errors.New("invalid time duration") - } - d = d + dp - } - return d, nil - } - - // handle Nm[Ns] - d, err := time.ParseDuration(s) - if err == nil { - return d, nil - } - - // handle N - v, err := strconv.ParseUint(s, 10, 32) - if err == nil && v > 0 { - return time.Duration(v) * time.Second, nil - } - - // handle h:m[:s] - if strings.Contains(s, ":") { - t := strings.Split(s, ":") - if 2 > len(t) || len(t) > 3 { - return time.Duration(0), errors.New("invalid time duration value") - } - var i []int - for _, n := range t { - j, err := strconv.ParseInt(n, 10, 16) - if err != nil { - return time.Duration(0), errors.New("invalid time duration value") - } - i = append(i, int(j)) - } - d := time.Duration(i[0])*time.Hour + time.Duration(i[1])*time.Minute - if len(i) == 3 { - d = d + time.Duration(i[2])*time.Second - } - return d, nil - } - return time.Duration(0), errors.New("invalid time duration value") -} - -// Parse possible boolean values to golang bool. -func parseBoolean(s string) (bool, error) { - s = strings.TrimSpace(s) - v, err := strconv.ParseBool(s) - if err == nil { - return v, nil - } - switch strings.ToLower(s) { - case "yes": - return true, nil - case "y": - return true, nil - case "no": - return false, nil - case "n": - return false, nil - } - return false, errors.New("invalid boolean value") -} - -// Parse array of strings but stop if an asterisk is placed at the end of a line. -func appendUntilFinal(s *[]string, value string, final *bool) { - if *final { - return - } - if last := len(value) - 1; last >= 0 && value[last] == '*' { - *final = true - value = value[:len(value)-1] - } - *s = append(*s, value) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/ccache.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/ccache.go deleted file mode 100644 index 3ef5758e4e0..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/ccache.go +++ /dev/null @@ -1,351 +0,0 @@ -package credentials - -import ( - "bytes" - "encoding/binary" - "errors" - "io/ioutil" - "strings" - "time" - "unsafe" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -const ( - headerFieldTagKDCOffset = 1 -) - -// The first byte of the file always has the value 5. -// The value of the second byte contains the version number (1 through 4) -// Versions 1 and 2 of the file format use native byte order for integer representations. -// Versions 3 and 4 always use big-endian byte order -// After the two-byte version indicator, the file has three parts: -// 1) the header (in version 4 only) -// 2) the default principal name -// 3) a sequence of credentials - -// CCache is the file credentials cache as define here: https://web.mit.edu/kerberos/krb5-latest/doc/formats/ccache_file_format.html -type CCache struct { - Version uint8 - Header header - DefaultPrincipal principal - Credentials []credential - Path string -} - -type header struct { - length uint16 - fields []headerField -} - -type headerField struct { - tag uint16 - length uint16 - value []byte -} - -// Credential cache entry principal struct. -type principal struct { - Realm string - PrincipalName types.PrincipalName -} - -type credential struct { - Client principal - Server principal - Key types.EncryptionKey - AuthTime time.Time - StartTime time.Time - EndTime time.Time - RenewTill time.Time - IsSKey bool - TicketFlags asn1.BitString - Addresses []types.HostAddress - AuthData []types.AuthorizationDataEntry - Ticket []byte - SecondTicket []byte -} - -// LoadCCache loads a credential cache file into a CCache type. -func LoadCCache(cpath string) (CCache, error) { - k, err := ioutil.ReadFile(cpath) - if err != nil { - return CCache{}, err - } - c, err := ParseCCache(k) - c.Path = cpath - return c, err -} - -// ParseCCache byte slice of credential cache data into CCache type. -func ParseCCache(b []byte) (c CCache, err error) { - p := 0 - //The first byte of the file always has the value 5 - if int8(b[p]) != 5 { - err = errors.New("Invalid credential cache data. First byte does not equal 5") - return - } - p++ - //Get credential cache version - //The second byte contains the version number (1 to 4) - c.Version = uint8(b[p]) - if c.Version < 1 || c.Version > 4 { - err = errors.New("Invalid credential cache data. Keytab version is not within 1 to 4") - if err != nil { - return - } - } - p++ - //Version 1 or 2 of the file format uses native byte order for integer representations. Versions 3 & 4 always uses big-endian byte order - var endian binary.ByteOrder - endian = binary.BigEndian - if (c.Version == 1 || c.Version == 2) && isNativeEndianLittle() { - endian = binary.LittleEndian - } - if c.Version == 4 { - err = parseHeader(b, &p, &c, &endian) - if err != nil { - return - } - } - c.DefaultPrincipal = parsePrincipal(b, &p, &c, &endian) - for p < len(b) { - cred, e := parseCredential(b, &p, &c, &endian) - if e != nil { - err = e - return - } - c.Credentials = append(c.Credentials, cred) - } - return -} - -func parseHeader(b []byte, p *int, c *CCache, e *binary.ByteOrder) error { - if c.Version != 4 { - return errors.New("Credentials cache version is not 4 so there is no header to parse.") - } - h := header{} - h.length = uint16(readInt16(b, p, e)) - for *p <= int(h.length) { - f := headerField{} - f.tag = uint16(readInt16(b, p, e)) - f.length = uint16(readInt16(b, p, e)) - f.value = b[*p : *p+int(f.length)] - *p += int(f.length) - if !f.valid() { - return errors.New("Invalid credential cache header found") - } - h.fields = append(h.fields, f) - } - c.Header = h - return nil -} - -// Parse the Keytab bytes of a principal into a Keytab entry's principal. -func parsePrincipal(b []byte, p *int, c *CCache, e *binary.ByteOrder) (princ principal) { - if c.Version != 1 { - //Name Type is omitted in version 1 - princ.PrincipalName.NameType = int32(readInt32(b, p, e)) - } - nc := int(readInt32(b, p, e)) - if c.Version == 1 { - //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 - nc-- - } - lenRealm := readInt32(b, p, e) - princ.Realm = string(readBytes(b, p, int(lenRealm), e)) - for i := 0; i < int(nc); i++ { - l := readInt32(b, p, e) - princ.PrincipalName.NameString = append(princ.PrincipalName.NameString, string(readBytes(b, p, int(l), e))) - } - return princ -} - -func parseCredential(b []byte, p *int, c *CCache, e *binary.ByteOrder) (cred credential, err error) { - cred.Client = parsePrincipal(b, p, c, e) - cred.Server = parsePrincipal(b, p, c, e) - key := types.EncryptionKey{} - key.KeyType = int32(readInt16(b, p, e)) - if c.Version == 3 { - //repeated twice in version 3 - key.KeyType = int32(readInt16(b, p, e)) - } - key.KeyValue = readData(b, p, e) - cred.Key = key - cred.AuthTime = readTimestamp(b, p, e) - cred.StartTime = readTimestamp(b, p, e) - cred.EndTime = readTimestamp(b, p, e) - cred.RenewTill = readTimestamp(b, p, e) - if ik := readInt8(b, p, e); ik == 0 { - cred.IsSKey = false - } else { - cred.IsSKey = true - } - cred.TicketFlags = types.NewKrbFlags() - cred.TicketFlags.Bytes = readBytes(b, p, 4, e) - l := int(readInt32(b, p, e)) - cred.Addresses = make([]types.HostAddress, l, l) - for i := range cred.Addresses { - cred.Addresses[i] = readAddress(b, p, e) - } - l = int(readInt32(b, p, e)) - cred.AuthData = make([]types.AuthorizationDataEntry, l, l) - for i := range cred.AuthData { - cred.AuthData[i] = readAuthDataEntry(b, p, e) - } - cred.Ticket = readData(b, p, e) - cred.SecondTicket = readData(b, p, e) - return -} - -// GetClientPrincipalName returns a PrincipalName type for the client the credentials cache is for. -func (c *CCache) GetClientPrincipalName() types.PrincipalName { - return c.DefaultPrincipal.PrincipalName -} - -// GetClientRealm returns the reals of the client the credentials cache is for. -func (c *CCache) GetClientRealm() string { - return c.DefaultPrincipal.Realm -} - -// GetClientCredentials returns a Credentials object representing the client of the credentials cache. -func (c *CCache) GetClientCredentials() *Credentials { - return &Credentials{ - Username: c.DefaultPrincipal.PrincipalName.GetPrincipalNameString(), - Realm: c.GetClientRealm(), - CName: c.DefaultPrincipal.PrincipalName, - } -} - -// Contains tests if the cache contains a credential for the provided server PrincipalName -func (c *CCache) Contains(p types.PrincipalName) bool { - for _, cred := range c.Credentials { - if cred.Server.PrincipalName.Equal(p) { - return true - } - } - return false -} - -// GetEntry returns a specific credential for the PrincipalName provided. -func (c *CCache) GetEntry(p types.PrincipalName) (credential, bool) { - var cred credential - var found bool - for i := range c.Credentials { - if c.Credentials[i].Server.PrincipalName.Equal(p) { - cred = c.Credentials[i] - found = true - break - } - } - if !found { - return cred, false - } - return cred, true -} - -// GetEntries filters out configuration entries an returns a slice of credentials. -func (c *CCache) GetEntries() []credential { - var creds []credential - for _, cred := range c.Credentials { - // Filter out configuration entries - if strings.HasPrefix(cred.Server.Realm, "X-CACHECONF") { - continue - } - creds = append(creds, cred) - } - return creds -} - -func (h *headerField) valid() bool { - // At this time there is only one defined header field. - // Its tag value is 1, its length is always 8. - // Its contents are two 32-bit integers giving the seconds and microseconds - // of the time offset of the KDC relative to the client. - // Adding this offset to the current time on the client should give the current time on the KDC, if that offset has not changed since the initial authentication. - - // Done as a switch in case other tag values are added in the future. - switch h.tag { - case headerFieldTagKDCOffset: - if h.length != 8 || len(h.value) != 8 { - return false - } - return true - } - return false -} - -func readData(b []byte, p *int, e *binary.ByteOrder) []byte { - l := readInt32(b, p, e) - return readBytes(b, p, int(l), e) -} - -func readAddress(b []byte, p *int, e *binary.ByteOrder) types.HostAddress { - a := types.HostAddress{} - a.AddrType = int32(readInt16(b, p, e)) - a.Address = readData(b, p, e) - return a -} - -func readAuthDataEntry(b []byte, p *int, e *binary.ByteOrder) types.AuthorizationDataEntry { - a := types.AuthorizationDataEntry{} - a.ADType = int32(readInt16(b, p, e)) - a.ADData = readData(b, p, e) - return a -} - -// Read bytes representing a timestamp. -func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time { - return time.Unix(int64(readInt32(b, p, e)), 0) -} - -// Read bytes representing an eight bit integer. -func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) { - buf := bytes.NewBuffer(b[*p : *p+1]) - binary.Read(buf, *e, &i) - *p++ - return -} - -// Read bytes representing a sixteen bit integer. -func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) { - buf := bytes.NewBuffer(b[*p : *p+2]) - binary.Read(buf, *e, &i) - *p += 2 - return -} - -// Read bytes representing a thirty two bit integer. -func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) { - buf := bytes.NewBuffer(b[*p : *p+4]) - binary.Read(buf, *e, &i) - *p += 4 - return -} - -func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte { - buf := bytes.NewBuffer(b[*p : *p+s]) - r := make([]byte, s) - binary.Read(buf, *e, &r) - *p += s - return r -} - -func isNativeEndianLittle() bool { - var x = 0x012345678 - var p = unsafe.Pointer(&x) - var bp = (*[4]byte)(p) - - var endian bool - if 0x01 == bp[0] { - endian = false - } else if (0x78 & 0xff) == (bp[0] & 0xff) { - endian = true - } else { - // Default to big endian - endian = false - } - return endian -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/credentials.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/credentials.go deleted file mode 100644 index b1b47552c2d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/credentials/credentials.go +++ /dev/null @@ -1,257 +0,0 @@ -// Package credentials provides credentials management for Kerberos 5 authentication. -package credentials - -import ( - "time" - - "github.com/hashicorp/go-uuid" - "gopkg.in/jcmturner/gokrb5.v5/iana/nametype" - "gopkg.in/jcmturner/gokrb5.v5/keytab" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -const ( - // AttributeKeyADCredentials assigned number for AD credentials. - AttributeKeyADCredentials = 1 -) - -// Credentials struct for a user. -// Contains either a keytab, password or both. -// Keytabs are used over passwords if both are defined. -type Credentials struct { - Username string - displayName string - Realm string - CName types.PrincipalName - Keytab keytab.Keytab - Password string - Attributes map[int]interface{} - ValidUntil time.Time - - authenticated bool - human bool - authTime time.Time - groupMembership map[string]bool - sessionID string -} - -// ADCredentials contains information obtained from the PAC. -type ADCredentials struct { - EffectiveName string - FullName string - UserID int - PrimaryGroupID int - LogOnTime time.Time - LogOffTime time.Time - PasswordLastSet time.Time - GroupMembershipSIDs []string - LogonDomainName string - LogonDomainID string - LogonServer string -} - -// NewCredentials creates a new Credentials instance. -func NewCredentials(username string, realm string) Credentials { - uid, err := uuid.GenerateUUID() - if err != nil { - uid = "00unique-sess-ions-uuid-unavailable0" - } - return Credentials{ - Username: username, - displayName: username, - Realm: realm, - CName: types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, username), - Keytab: keytab.NewKeytab(), - Attributes: make(map[int]interface{}), - sessionID: uid, - } -} - -// NewCredentialsFromPrincipal creates a new Credentials instance with the user details provides as a PrincipalName type. -func NewCredentialsFromPrincipal(cname types.PrincipalName, realm string) Credentials { - uid, err := uuid.GenerateUUID() - if err != nil { - uid = "00unique-sess-ions-uuid-unavailable0" - } - return Credentials{ - Username: cname.GetPrincipalNameString(), - displayName: cname.GetPrincipalNameString(), - Realm: realm, - CName: cname, - Keytab: keytab.NewKeytab(), - Attributes: make(map[int]interface{}), - groupMembership: make(map[string]bool), - sessionID: uid, - } -} - -// WithKeytab sets the Keytab in the Credentials struct. -func (c *Credentials) WithKeytab(kt keytab.Keytab) *Credentials { - c.Keytab = kt - return c -} - -// WithPassword sets the password in the Credentials struct. -func (c *Credentials) WithPassword(password string) *Credentials { - c.Password = password - return c -} - -// HasKeytab queries if the Credentials has a keytab defined. -func (c *Credentials) HasKeytab() bool { - if len(c.Keytab.Entries) > 0 { - return true - } - return false -} - -// SetValidUntil sets the expiry time of the credentials -func (c *Credentials) SetValidUntil(t time.Time) { - c.ValidUntil = t -} - -// HasPassword queries if the Credentials has a password defined. -func (c *Credentials) HasPassword() bool { - if c.Password != "" { - return true - } - return false -} - -// SetADCredentials adds ADCredentials attributes to the credentials -func (c *Credentials) SetADCredentials(a ADCredentials) { - c.Attributes[AttributeKeyADCredentials] = a - if a.FullName != "" { - c.SetDisplayName(a.FullName) - } - if a.EffectiveName != "" { - c.SetUserName(a.EffectiveName) - } - if a.LogonDomainName != "" { - c.SetDomain(a.LogonDomainName) - } - for i := range a.GroupMembershipSIDs { - c.AddAuthzAttribute(a.GroupMembershipSIDs[i]) - } -} - -// Methods to implement goidentity.Identity interface - -// UserName returns the credential's username. -func (c *Credentials) UserName() string { - return c.Username -} - -// SetUserName sets the username value on the credential. -func (c *Credentials) SetUserName(s string) { - c.Username = s -} - -// Domain returns the credential's domain. -func (c *Credentials) Domain() string { - return c.Realm -} - -// SetDomain sets the domain value on the credential. -func (c *Credentials) SetDomain(s string) { - c.Realm = s -} - -// DisplayName returns the credential's display name. -func (c *Credentials) DisplayName() string { - return c.displayName -} - -// SetDisplayName sets the display name value on the credential. -func (c *Credentials) SetDisplayName(s string) { - c.displayName = s -} - -// Human returns if the credential represents a human or not. -func (c *Credentials) Human() bool { - return c.human -} - -// SetHuman sets the credential as human. -func (c *Credentials) SetHuman(b bool) { - c.human = b -} - -// AuthTime returns the time the credential was authenticated. -func (c *Credentials) AuthTime() time.Time { - return c.authTime -} - -// SetAuthTime sets the time the credential was authenticated. -func (c *Credentials) SetAuthTime(t time.Time) { - c.authTime = t -} - -// AuthzAttributes returns the credentials authorizing attributes. -func (c *Credentials) AuthzAttributes() []string { - s := make([]string, len(c.groupMembership)) - i := 0 - for a := range c.groupMembership { - s[i] = a - i++ - } - return s -} - -// Authenticated indicates if the credential has been successfully authenticated or not. -func (c *Credentials) Authenticated() bool { - return c.authenticated -} - -// SetAuthenticated sets the credential as having been successfully authenticated. -func (c *Credentials) SetAuthenticated(b bool) { - c.authenticated = b -} - -// AddAuthzAttribute adds an authorization attribute to the credential. -func (c *Credentials) AddAuthzAttribute(a string) { - c.groupMembership[a] = true -} - -// RemoveAuthzAttribute removes an authorization attribute from the credential. -func (c *Credentials) RemoveAuthzAttribute(a string) { - if _, ok := c.groupMembership[a]; !ok { - return - } - delete(c.groupMembership, a) -} - -// EnableAuthzAttribute toggles an authorization attribute to an enabled state on the credential. -func (c *Credentials) EnableAuthzAttribute(a string) { - if enabled, ok := c.groupMembership[a]; ok && !enabled { - c.groupMembership[a] = true - } -} - -// DisableAuthzAttribute toggles an authorization attribute to a disabled state on the credential. -func (c *Credentials) DisableAuthzAttribute(a string) { - if enabled, ok := c.groupMembership[a]; ok && enabled { - c.groupMembership[a] = false - } -} - -// Authorized indicates if the credential has the specified authorizing attribute. -func (c *Credentials) Authorized(a string) bool { - if enabled, ok := c.groupMembership[a]; ok && enabled { - return true - } - return false -} - -// SessionID returns the credential's session ID. -func (c *Credentials) SessionID() string { - return c.sessionID -} - -// Expired indicates if the credential has expired. -func (c *Credentials) Expired() bool { - if !c.ValidUntil.IsZero() && time.Now().UTC().After(c.ValidUntil) { - return true - } - return false -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha1-96.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha1-96.go deleted file mode 100644 index 53c4db1db98..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha1-96.go +++ /dev/null @@ -1,173 +0,0 @@ -package crypto - -import ( - "crypto/aes" - "crypto/hmac" - "crypto/sha1" - "hash" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -// RFC 3962 -//+--------------------------------------------------------------------+ -//| protocol key format 128- or 256-bit string | -//| | -//| string-to-key function PBKDF2+DK with variable | -//| iteration count (see | -//| above) | -//| | -//| default string-to-key parameters 00 00 10 00 | -//| | -//| key-generation seed length key size | -//| | -//| random-to-key function identity function | -//| | -//| hash function, H SHA-1 | -//| | -//| HMAC output size, h 12 octets (96 bits) | -//| | -//| message block size, m 1 octet | -//| | -//| encryption/decryption functions, AES in CBC-CTS mode | -//| E and D (cipher block size 16 | -//| octets), with next-to- | -//| last block (last block | -//| if only one) as CBC-style | -//| ivec | -//+--------------------------------------------------------------------+ -// -//+--------------------------------------------------------------------+ -//| encryption types | -//+--------------------------------------------------------------------+ -//| type name etype value key size | -//+--------------------------------------------------------------------+ -//| aes128-cts-hmac-sha1-96 17 128 | -//| aes256-cts-hmac-sha1-96 18 256 | -//+--------------------------------------------------------------------+ -// -//+--------------------------------------------------------------------+ -//| checksum types | -//+--------------------------------------------------------------------+ -//| type name sumtype value length | -//+--------------------------------------------------------------------+ -//| hmac-sha1-96-aes128 15 96 | -//| hmac-sha1-96-aes256 16 96 | -//+--------------------------------------------------------------------+ - -// Aes128CtsHmacSha96 implements Kerberos encryption type aes128-cts-hmac-sha1-96 -type Aes128CtsHmacSha96 struct { -} - -// GetETypeID returns the EType ID number. -func (e Aes128CtsHmacSha96) GetETypeID() int32 { - return etypeID.AES128_CTS_HMAC_SHA1_96 -} - -// GetHashID returns the checksum type ID number. -func (e Aes128CtsHmacSha96) GetHashID() int32 { - return chksumtype.HMAC_SHA1_96_AES128 -} - -// GetKeyByteSize returns the number of bytes for key of this etype. -func (e Aes128CtsHmacSha96) GetKeyByteSize() int { - return 128 / 8 -} - -// GetKeySeedBitLength returns the number of bits for the seed for key generation. -func (e Aes128CtsHmacSha96) GetKeySeedBitLength() int { - return e.GetKeyByteSize() * 8 -} - -// GetHashFunc returns the hash function for this etype. -func (e Aes128CtsHmacSha96) GetHashFunc() func() hash.Hash { - return sha1.New -} - -// GetMessageBlockByteSize returns the block size for the etype's messages. -func (e Aes128CtsHmacSha96) GetMessageBlockByteSize() int { - return 1 -} - -// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. -func (e Aes128CtsHmacSha96) GetDefaultStringToKeyParams() string { - return "00001000" -} - -// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. -func (e Aes128CtsHmacSha96) GetConfounderByteSize() int { - return aes.BlockSize -} - -// GetHMACBitLength returns the bit count size of the integrity hash. -func (e Aes128CtsHmacSha96) GetHMACBitLength() int { - return 96 -} - -// GetCypherBlockBitLength returns the bit count size of the cypher block. -func (e Aes128CtsHmacSha96) GetCypherBlockBitLength() int { - return aes.BlockSize * 8 -} - -// StringToKey returns a key derived from the string provided. -func (e Aes128CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { - return rfc3962.StringToKey(secret, salt, s2kparams, e) -} - -// RandomToKey returns a key from the bytes provided. -func (e Aes128CtsHmacSha96) RandomToKey(b []byte) []byte { - return rfc3961.RandomToKey(b) -} - -// EncryptData encrypts the data provided. -func (e Aes128CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { - return rfc3962.EncryptData(key, data, e) -} - -// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. -func (e Aes128CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { - return rfc3962.EncryptMessage(key, message, usage, e) -} - -// DecryptData decrypts the data provided. -func (e Aes128CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { - return rfc3962.DecryptData(key, data, e) -} - -// DecryptMessage decrypts the message provided and verifies the integrity of the message. -func (e Aes128CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { - return rfc3962.DecryptMessage(key, ciphertext, usage, e) -} - -// DeriveKey derives a key from the protocol key based on the usage value. -func (e Aes128CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { - return rfc3961.DeriveKey(protocolKey, usage, e) -} - -// DeriveRandom generates data needed for key generation. -func (e Aes128CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { - return rfc3961.DeriveRandom(protocolKey, usage, e) -} - -// VerifyIntegrity checks the integrity of the plaintext message. -func (e Aes128CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { - return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) -} - -// GetChecksumHash returns a keyed checksum hash of the bytes provided. -func (e Aes128CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { - return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) -} - -// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. -func (e Aes128CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { - c, err := e.GetChecksumHash(protocolKey, data, usage) - if err != nil { - return false - } - return hmac.Equal(chksum, c) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha256-128.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha256-128.go deleted file mode 100644 index a95ee99e779..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes128-cts-hmac-sha256-128.go +++ /dev/null @@ -1,135 +0,0 @@ -package crypto - -import ( - "crypto/aes" - "crypto/hmac" - "crypto/sha256" - "hash" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -// RFC https://tools.ietf.org/html/rfc8009 - -// Aes128CtsHmacSha256128 implements Kerberos encryption type aes128-cts-hmac-sha256-128 -type Aes128CtsHmacSha256128 struct { -} - -// GetETypeID returns the EType ID number. -func (e Aes128CtsHmacSha256128) GetETypeID() int32 { - return etypeID.AES128_CTS_HMAC_SHA256_128 -} - -// GetHashID returns the checksum type ID number. -func (e Aes128CtsHmacSha256128) GetHashID() int32 { - return chksumtype.HMAC_SHA256_128_AES128 -} - -// GetKeyByteSize returns the number of bytes for key of this etype. -func (e Aes128CtsHmacSha256128) GetKeyByteSize() int { - return 128 / 8 -} - -// GetKeySeedBitLength returns the number of bits for the seed for key generation. -func (e Aes128CtsHmacSha256128) GetKeySeedBitLength() int { - return e.GetKeyByteSize() * 8 -} - -// GetHashFunc returns the hash function for this etype. -func (e Aes128CtsHmacSha256128) GetHashFunc() func() hash.Hash { - return sha256.New -} - -// GetMessageBlockByteSize returns the block size for the etype's messages. -func (e Aes128CtsHmacSha256128) GetMessageBlockByteSize() int { - return 1 -} - -// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. -func (e Aes128CtsHmacSha256128) GetDefaultStringToKeyParams() string { - return "00008000" -} - -// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. -func (e Aes128CtsHmacSha256128) GetConfounderByteSize() int { - return aes.BlockSize -} - -// GetHMACBitLength returns the bit count size of the integrity hash. -func (e Aes128CtsHmacSha256128) GetHMACBitLength() int { - return 128 -} - -// GetCypherBlockBitLength returns the bit count size of the cypher block. -func (e Aes128CtsHmacSha256128) GetCypherBlockBitLength() int { - return aes.BlockSize * 8 -} - -// StringToKey returns a key derived from the string provided. -func (e Aes128CtsHmacSha256128) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { - saltp := rfc8009.GetSaltP(salt, "aes128-cts-hmac-sha256-128") - return rfc8009.StringToKey(secret, saltp, s2kparams, e) -} - -// RandomToKey returns a key from the bytes provided. -func (e Aes128CtsHmacSha256128) RandomToKey(b []byte) []byte { - return rfc8009.RandomToKey(b) -} - -// EncryptData encrypts the data provided. -func (e Aes128CtsHmacSha256128) EncryptData(key, data []byte) ([]byte, []byte, error) { - return rfc8009.EncryptData(key, data, e) -} - -// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. -func (e Aes128CtsHmacSha256128) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { - return rfc8009.EncryptMessage(key, message, usage, e) -} - -// DecryptData decrypts the data provided. -func (e Aes128CtsHmacSha256128) DecryptData(key, data []byte) ([]byte, error) { - return rfc8009.DecryptData(key, data, e) -} - -// DecryptMessage decrypts the message provided and verifies the integrity of the message. -func (e Aes128CtsHmacSha256128) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { - return rfc8009.DecryptMessage(key, ciphertext, usage, e) -} - -// DeriveKey derives a key from the protocol key based on the usage value. -func (e Aes128CtsHmacSha256128) DeriveKey(protocolKey, usage []byte) ([]byte, error) { - return rfc8009.DeriveKey(protocolKey, usage, e), nil -} - -// DeriveRandom generates data needed for key generation. -func (e Aes128CtsHmacSha256128) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { - return rfc8009.DeriveRandom(protocolKey, usage, e) -} - -// VerifyIntegrity checks the integrity of the ciphertext message. -// The HMAC is calculated over the cipher state concatenated with the -// AES output, instead of being calculated over the confounder and -// plaintext. This allows the message receiver to verify the -// integrity of the message before decrypting the message. -// Therefore the pt value to this interface method is not use. Pass any []byte. -func (e Aes128CtsHmacSha256128) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { - // We don't need ib just there for the interface - return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) -} - -// GetChecksumHash returns a keyed checksum hash of the bytes provided. -func (e Aes128CtsHmacSha256128) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { - return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) -} - -// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. -func (e Aes128CtsHmacSha256128) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { - c, err := e.GetChecksumHash(protocolKey, data, usage) - if err != nil { - return false - } - return hmac.Equal(chksum, c) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha1-96.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha1-96.go deleted file mode 100644 index 14a1ce61b51..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha1-96.go +++ /dev/null @@ -1,173 +0,0 @@ -package crypto - -import ( - "crypto/aes" - "crypto/hmac" - "crypto/sha1" - "hash" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -// RFC 3962 -//+--------------------------------------------------------------------+ -//| protocol key format 128- or 256-bit string | -//| | -//| string-to-key function PBKDF2+DK with variable | -//| iteration count (see | -//| above) | -//| | -//| default string-to-key parameters 00 00 10 00 | -//| | -//| key-generation seed length key size | -//| | -//| random-to-key function identity function | -//| | -//| hash function, H SHA-1 | -//| | -//| HMAC output size, h 12 octets (96 bits) | -//| | -//| message block size, m 1 octet | -//| | -//| encryption/decryption functions, AES in CBC-CTS mode | -//| E and D (cipher block size 16 | -//| octets), with next-to- | -//| last block (last block | -//| if only one) as CBC-style | -//| ivec | -//+--------------------------------------------------------------------+ -// -//+--------------------------------------------------------------------+ -//| encryption types | -//+--------------------------------------------------------------------+ -//| type name etype value key size | -//+--------------------------------------------------------------------+ -//| aes128-cts-hmac-sha1-96 17 128 | -//| aes256-cts-hmac-sha1-96 18 256 | -//+--------------------------------------------------------------------+ -// -//+--------------------------------------------------------------------+ -//| checksum types | -//+--------------------------------------------------------------------+ -//| type name sumtype value length | -//+--------------------------------------------------------------------+ -//| hmac-sha1-96-aes128 15 96 | -//| hmac-sha1-96-aes256 16 96 | -//+--------------------------------------------------------------------+ - -// Aes256CtsHmacSha96 implements Kerberos encryption type aes256-cts-hmac-sha1-96 -type Aes256CtsHmacSha96 struct { -} - -// GetETypeID returns the EType ID number. -func (e Aes256CtsHmacSha96) GetETypeID() int32 { - return etypeID.AES256_CTS_HMAC_SHA1_96 -} - -// GetHashID returns the checksum type ID number. -func (e Aes256CtsHmacSha96) GetHashID() int32 { - return chksumtype.HMAC_SHA1_96_AES256 -} - -// GetKeyByteSize returns the number of bytes for key of this etype. -func (e Aes256CtsHmacSha96) GetKeyByteSize() int { - return 256 / 8 -} - -// GetKeySeedBitLength returns the number of bits for the seed for key generation. -func (e Aes256CtsHmacSha96) GetKeySeedBitLength() int { - return e.GetKeyByteSize() * 8 -} - -// GetHashFunc returns the hash function for this etype. -func (e Aes256CtsHmacSha96) GetHashFunc() func() hash.Hash { - return sha1.New -} - -// GetMessageBlockByteSize returns the block size for the etype's messages. -func (e Aes256CtsHmacSha96) GetMessageBlockByteSize() int { - return 1 -} - -// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. -func (e Aes256CtsHmacSha96) GetDefaultStringToKeyParams() string { - return "00001000" -} - -// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. -func (e Aes256CtsHmacSha96) GetConfounderByteSize() int { - return aes.BlockSize -} - -// GetHMACBitLength returns the bit count size of the integrity hash. -func (e Aes256CtsHmacSha96) GetHMACBitLength() int { - return 96 -} - -// GetCypherBlockBitLength returns the bit count size of the cypher block. -func (e Aes256CtsHmacSha96) GetCypherBlockBitLength() int { - return aes.BlockSize * 8 -} - -// StringToKey returns a key derived from the string provided. -func (e Aes256CtsHmacSha96) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { - return rfc3962.StringToKey(secret, salt, s2kparams, e) -} - -// RandomToKey returns a key from the bytes provided. -func (e Aes256CtsHmacSha96) RandomToKey(b []byte) []byte { - return rfc3961.RandomToKey(b) -} - -// EncryptData encrypts the data provided. -func (e Aes256CtsHmacSha96) EncryptData(key, data []byte) ([]byte, []byte, error) { - return rfc3962.EncryptData(key, data, e) -} - -// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. -func (e Aes256CtsHmacSha96) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { - return rfc3962.EncryptMessage(key, message, usage, e) -} - -// DecryptData decrypts the data provided. -func (e Aes256CtsHmacSha96) DecryptData(key, data []byte) ([]byte, error) { - return rfc3962.DecryptData(key, data, e) -} - -// DecryptMessage decrypts the message provided and verifies the integrity of the message. -func (e Aes256CtsHmacSha96) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { - return rfc3962.DecryptMessage(key, ciphertext, usage, e) -} - -// DeriveKey derives a key from the protocol key based on the usage value. -func (e Aes256CtsHmacSha96) DeriveKey(protocolKey, usage []byte) ([]byte, error) { - return rfc3961.DeriveKey(protocolKey, usage, e) -} - -// DeriveRandom generates data needed for key generation. -func (e Aes256CtsHmacSha96) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { - return rfc3961.DeriveRandom(protocolKey, usage, e) -} - -// VerifyIntegrity checks the integrity of the plaintext message. -func (e Aes256CtsHmacSha96) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { - return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) -} - -// GetChecksumHash returns a keyed checksum hash of the bytes provided. -func (e Aes256CtsHmacSha96) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { - return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) -} - -// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. -func (e Aes256CtsHmacSha96) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { - c, err := e.GetChecksumHash(protocolKey, data, usage) - if err != nil { - return false - } - return hmac.Equal(chksum, c) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha384-192.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha384-192.go deleted file mode 100644 index f073a05fee7..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/aes256-cts-hmac-sha384-192.go +++ /dev/null @@ -1,135 +0,0 @@ -package crypto - -import ( - "crypto/aes" - "crypto/hmac" - "crypto/sha512" - "hash" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -// RFC https://tools.ietf.org/html/rfc8009 - -// Aes256CtsHmacSha384192 implements Kerberos encryption type aes256-cts-hmac-sha384-192 -type Aes256CtsHmacSha384192 struct { -} - -// GetETypeID returns the EType ID number. -func (e Aes256CtsHmacSha384192) GetETypeID() int32 { - return etypeID.AES256_CTS_HMAC_SHA384_192 -} - -// GetHashID returns the checksum type ID number. -func (e Aes256CtsHmacSha384192) GetHashID() int32 { - return chksumtype.HMAC_SHA384_192_AES256 -} - -// GetKeyByteSize returns the number of bytes for key of this etype. -func (e Aes256CtsHmacSha384192) GetKeyByteSize() int { - return 192 / 8 -} - -// GetKeySeedBitLength returns the number of bits for the seed for key generation. -func (e Aes256CtsHmacSha384192) GetKeySeedBitLength() int { - return e.GetKeyByteSize() * 8 -} - -// GetHashFunc returns the hash function for this etype. -func (e Aes256CtsHmacSha384192) GetHashFunc() func() hash.Hash { - return sha512.New384 -} - -// GetMessageBlockByteSize returns the block size for the etype's messages. -func (e Aes256CtsHmacSha384192) GetMessageBlockByteSize() int { - return 1 -} - -// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. -func (e Aes256CtsHmacSha384192) GetDefaultStringToKeyParams() string { - return "00008000" -} - -// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. -func (e Aes256CtsHmacSha384192) GetConfounderByteSize() int { - return aes.BlockSize -} - -// GetHMACBitLength returns the bit count size of the integrity hash. -func (e Aes256CtsHmacSha384192) GetHMACBitLength() int { - return 192 -} - -// GetCypherBlockBitLength returns the bit count size of the cypher block. -func (e Aes256CtsHmacSha384192) GetCypherBlockBitLength() int { - return aes.BlockSize * 8 -} - -// StringToKey returns a key derived from the string provided. -func (e Aes256CtsHmacSha384192) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { - saltp := rfc8009.GetSaltP(salt, "aes256-cts-hmac-sha384-192") - return rfc8009.StringToKey(secret, saltp, s2kparams, e) -} - -// RandomToKey returns a key from the bytes provided. -func (e Aes256CtsHmacSha384192) RandomToKey(b []byte) []byte { - return rfc8009.RandomToKey(b) -} - -// EncryptData encrypts the data provided. -func (e Aes256CtsHmacSha384192) EncryptData(key, data []byte) ([]byte, []byte, error) { - return rfc8009.EncryptData(key, data, e) -} - -// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. -func (e Aes256CtsHmacSha384192) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { - return rfc8009.EncryptMessage(key, message, usage, e) -} - -// DecryptData decrypts the data provided. -func (e Aes256CtsHmacSha384192) DecryptData(key, data []byte) ([]byte, error) { - return rfc8009.DecryptData(key, data, e) -} - -// DecryptMessage decrypts the message provided and verifies the integrity of the message. -func (e Aes256CtsHmacSha384192) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { - return rfc8009.DecryptMessage(key, ciphertext, usage, e) -} - -// DeriveKey derives a key from the protocol key based on the usage value. -func (e Aes256CtsHmacSha384192) DeriveKey(protocolKey, usage []byte) ([]byte, error) { - return rfc8009.DeriveKey(protocolKey, usage, e), nil -} - -// DeriveRandom generates data needed for key generation. -func (e Aes256CtsHmacSha384192) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { - return rfc8009.DeriveRandom(protocolKey, usage, e) -} - -// VerifyIntegrity checks the integrity of the ciphertext message. -// The HMAC is calculated over the cipher state concatenated with the -// AES output, instead of being calculated over the confounder and -// plaintext. This allows the message receiver to verify the -// integrity of the message before decrypting the message. -// Therefore the pt value to this interface method is not use. Pass any []byte. -func (e Aes256CtsHmacSha384192) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { - // We don't need ib just there for the interface - return rfc8009.VerifyIntegrity(protocolKey, ct, usage, e) -} - -// GetChecksumHash returns a keyed checksum hash of the bytes provided. -func (e Aes256CtsHmacSha384192) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { - return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) -} - -// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. -func (e Aes256CtsHmacSha384192) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { - c, err := e.GetChecksumHash(protocolKey, data, usage) - if err != nil { - return false - } - return hmac.Equal(chksum, c) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/common/common.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/common/common.go deleted file mode 100644 index 874f93c0588..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/common/common.go +++ /dev/null @@ -1,143 +0,0 @@ -// Package common provides encryption methods common across encryption types -package common - -import ( - "bytes" - "crypto/hmac" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" -) - -// ZeroPad pads bytes with zeros to nearest multiple of message size m. -func ZeroPad(b []byte, m int) ([]byte, error) { - if m <= 0 { - return nil, errors.New("Invalid message block size when padding") - } - if b == nil || len(b) == 0 { - return nil, errors.New("Data not valid to pad: Zero size") - } - if l := len(b) % m; l != 0 { - n := m - l - z := make([]byte, n) - b = append(b, z...) - } - return b, nil -} - -// PKCS7Pad pads bytes according to RFC 2315 to nearest multiple of message size m. -func PKCS7Pad(b []byte, m int) ([]byte, error) { - if m <= 0 { - return nil, errors.New("Invalid message block size when padding") - } - if b == nil || len(b) == 0 { - return nil, errors.New("Data not valid to pad: Zero size") - } - n := m - (len(b) % m) - pb := make([]byte, len(b)+n) - copy(pb, b) - copy(pb[len(b):], bytes.Repeat([]byte{byte(n)}, n)) - return pb, nil -} - -// PKCS7Unpad removes RFC 2315 padding from byes where message size is m. -func PKCS7Unpad(b []byte, m int) ([]byte, error) { - if m <= 0 { - return nil, errors.New("invalid message block size when unpadding") - } - if b == nil || len(b) == 0 { - return nil, errors.New("padded data not valid: Zero size") - } - if len(b)%m != 0 { - return nil, errors.New("padded data not valid: Not multiple of message block size") - } - c := b[len(b)-1] - n := int(c) - if n == 0 || n > len(b) { - return nil, errors.New("padded data not valid: Data may not have been padded") - } - for i := 0; i < n; i++ { - if b[len(b)-n+i] != c { - return nil, errors.New("padded data not valid") - } - } - return b[:len(b)-n], nil -} - -// GetHash generates the keyed hash value according to the etype's hash function. -func GetHash(pt, key []byte, usage []byte, etype etype.EType) ([]byte, error) { - k, err := etype.DeriveKey(key, usage) - if err != nil { - return nil, fmt.Errorf("unable to derive key for checksum: %v", err) - } - mac := hmac.New(etype.GetHashFunc(), k) - p := make([]byte, len(pt)) - copy(p, pt) - mac.Write(p) - return mac.Sum(nil)[:etype.GetHMACBitLength()/8], nil -} - -// GetChecksumHash returns a keyed checksum hash of the bytes provided. -func GetChecksumHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { - return GetHash(b, key, GetUsageKc(usage), etype) -} - -// GetIntegrityHash returns a keyed integrity hash of the bytes provided. -func GetIntegrityHash(b, key []byte, usage uint32, etype etype.EType) ([]byte, error) { - return GetHash(b, key, GetUsageKi(usage), etype) -} - -// VerifyChecksum compares the checksum of the msg bytes is the same as the checksum provided. -func VerifyChecksum(key, chksum, msg []byte, usage uint32, etype etype.EType) bool { - //The ciphertext output is the concatenation of the output of the basic - //encryption function E and a (possibly truncated) HMAC using the - //specified hash function H, both applied to the plaintext with a - //random confounder prefix and sufficient padding to bring it to a - //multiple of the message block size. When the HMAC is computed, the - //key is used in the protocol key form. - expectedMAC, _ := GetChecksumHash(msg, key, usage, etype) - return hmac.Equal(chksum, expectedMAC) -} - -// GetUsageKc returns the checksum key usage value for the usage number un. -// -// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below. -// -// Kc = DK(base-key, usage | 0x99); -func GetUsageKc(un uint32) []byte { - return getUsage(un, 0x99) -} - -// GetUsageKe returns the encryption key usage value for the usage number un -// -// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below. -// -// Ke = DK(base-key, usage | 0xAA); -func GetUsageKe(un uint32) []byte { - return getUsage(un, 0xAA) -} - -// GetUsageKi returns the integrity key usage value for the usage number un -// -// RFC 3961: The "well-known constant" used for the DK function is the key usage number, expressed as four octets in big-endian order, followed by one octet indicated below. -// -// Ki = DK(base-key, usage | 0x55); -func GetUsageKi(un uint32) []byte { - return getUsage(un, 0x55) -} - -func getUsage(un uint32, o byte) []byte { - var buf bytes.Buffer - binary.Write(&buf, binary.BigEndian, un) - return append(buf.Bytes(), o) -} - -// IterationsToS2Kparams converts the number of iterations as an integer to a string representation. -func IterationsToS2Kparams(i uint32) string { - b := make([]byte, 4, 4) - binary.BigEndian.PutUint32(b, i) - return hex.EncodeToString(b) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/crypto.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/crypto.go deleted file mode 100644 index 3ad5a576480..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/crypto.go +++ /dev/null @@ -1,175 +0,0 @@ -// Package crypto implements cryptographic functions for Kerberos 5 implementation. -package crypto - -import ( - "encoding/hex" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" - "gopkg.in/jcmturner/gokrb5.v5/iana/patype" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// GetEtype returns an instances of the required etype struct for the etype ID. -func GetEtype(id int32) (etype.EType, error) { - switch id { - case etypeID.AES128_CTS_HMAC_SHA1_96: - var et Aes128CtsHmacSha96 - return et, nil - case etypeID.AES256_CTS_HMAC_SHA1_96: - var et Aes256CtsHmacSha96 - return et, nil - case etypeID.AES128_CTS_HMAC_SHA256_128: - var et Aes128CtsHmacSha256128 - return et, nil - case etypeID.AES256_CTS_HMAC_SHA384_192: - var et Aes256CtsHmacSha384192 - return et, nil - case etypeID.DES3_CBC_SHA1_KD: - var et Des3CbcSha1Kd - return et, nil - case etypeID.RC4_HMAC: - var et RC4HMAC - return et, nil - default: - return nil, fmt.Errorf("unknown or unsupported EType: %d", id) - } -} - -// GetChksumEtype returns an instances of the required etype struct for the checksum ID. -func GetChksumEtype(id int32) (etype.EType, error) { - switch id { - case chksumtype.HMAC_SHA1_96_AES128: - var et Aes128CtsHmacSha96 - return et, nil - case chksumtype.HMAC_SHA1_96_AES256: - var et Aes256CtsHmacSha96 - return et, nil - case chksumtype.HMAC_SHA256_128_AES128: - var et Aes128CtsHmacSha256128 - return et, nil - case chksumtype.HMAC_SHA384_192_AES256: - var et Aes256CtsHmacSha384192 - return et, nil - case chksumtype.HMAC_SHA1_DES3_KD: - var et Des3CbcSha1Kd - return et, nil - case chksumtype.KERB_CHECKSUM_HMAC_MD5: - var et RC4HMAC - return et, nil - //case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: - // var et RC4HMAC - // return et, nil - default: - return nil, fmt.Errorf("unknown or unsupported checksum type: %d", id) - } -} - -// GetKeyFromPassword generates an encryption key from the principal's password. -func GetKeyFromPassword(passwd string, cname types.PrincipalName, realm string, etypeID int32, pas types.PADataSequence) (types.EncryptionKey, etype.EType, error) { - var key types.EncryptionKey - et, err := GetEtype(etypeID) - if err != nil { - return key, et, fmt.Errorf("error getting encryption type: %v", err) - } - sk2p := et.GetDefaultStringToKeyParams() - var salt string - var paID int32 - for _, pa := range pas { - switch pa.PADataType { - case patype.PA_PW_SALT: - if paID > pa.PADataType { - continue - } - salt = string(pa.PADataValue) - case patype.PA_ETYPE_INFO: - if paID > pa.PADataType { - continue - } - var eti types.ETypeInfo - err := eti.Unmarshal(pa.PADataValue) - if err != nil { - return key, et, fmt.Errorf("error unmashaling PA Data to PA-ETYPE-INFO2: %v", err) - } - if etypeID != eti[0].EType { - et, err = GetEtype(eti[0].EType) - if err != nil { - return key, et, fmt.Errorf("error getting encryption type: %v", err) - } - } - salt = string(eti[0].Salt) - case patype.PA_ETYPE_INFO2: - if paID > pa.PADataType { - continue - } - var et2 types.ETypeInfo2 - err := et2.Unmarshal(pa.PADataValue) - if err != nil { - return key, et, fmt.Errorf("error unmashalling PA Data to PA-ETYPE-INFO2: %v", err) - } - if etypeID != et2[0].EType { - et, err = GetEtype(et2[0].EType) - if err != nil { - return key, et, fmt.Errorf("error getting encryption type: %v", err) - } - } - if len(et2[0].S2KParams) == 4 { - sk2p = hex.EncodeToString(et2[0].S2KParams) - } - salt = et2[0].Salt - } - } - if salt == "" { - salt = cname.GetSalt(realm) - } - k, err := et.StringToKey(passwd, salt, sk2p) - if err != nil { - return key, et, fmt.Errorf("error deriving key from string: %+v", err) - } - key = types.EncryptionKey{ - KeyType: etypeID, - KeyValue: k, - } - return key, et, nil -} - -// GetEncryptedData encrypts the data provided and returns and EncryptedData type. -// Pass a usage value of zero to use the key provided directly rather than deriving one. -func GetEncryptedData(plainBytes []byte, key types.EncryptionKey, usage uint32, kvno int) (types.EncryptedData, error) { - var ed types.EncryptedData - et, err := GetEtype(key.KeyType) - if err != nil { - return ed, fmt.Errorf("error getting etype: %v", err) - } - _, b, err := et.EncryptMessage(key.KeyValue, plainBytes, usage) - if err != nil { - return ed, err - } - - ed = types.EncryptedData{ - EType: key.KeyType, - Cipher: b, - KVNO: kvno, - } - return ed, nil -} - -// DecryptEncPart decrypts the EncryptedData. -func DecryptEncPart(ed types.EncryptedData, key types.EncryptionKey, usage uint32) ([]byte, error) { - return DecryptMessage(ed.Cipher, key, usage) -} - -// DecryptMessage decrypts the ciphertext and verifies the integrity. -func DecryptMessage(ciphertext []byte, key types.EncryptionKey, usage uint32) ([]byte, error) { - et, err := GetEtype(key.KeyType) - if err != nil { - return []byte{}, fmt.Errorf("error decrypting: %v", err) - } - b, err := et.DecryptMessage(key.KeyValue, ciphertext, usage) - if err != nil { - return nil, fmt.Errorf("error decrypting: %v", err) - } - return b, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/des3-cbc-sha1-kd.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/des3-cbc-sha1-kd.go deleted file mode 100644 index e068bbb0177..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/des3-cbc-sha1-kd.go +++ /dev/null @@ -1,174 +0,0 @@ -package crypto - -import ( - "crypto/des" - "crypto/hmac" - "crypto/sha1" - "errors" - "hash" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -//RFC: 3961 Section 6.3 - -/* - des3-cbc-hmac-sha1-kd, hmac-sha1-des3-kd - ------------------------------------------------ - protocol key format 24 bytes, parity in low - bit of each - - key-generation seed 21 bytes - length - - hash function SHA-1 - - HMAC output size 160 bits - - message block size 8 bytes - - default string-to-key empty string - params - - encryption and triple-DES encrypt and - decryption functions decrypt, in outer-CBC - mode (cipher block size - 8 octets) - - key generation functions: - - random-to-key DES3random-to-key (see - below) - - string-to-key DES3string-to-key (see - below) - - The des3-cbc-hmac-sha1-kd encryption type is assigned the value - sixteen (16). The hmac-sha1-des3-kd checksum algorithm is assigned a - checksum type number of twelve (12)*/ - -// Des3CbcSha1Kd implements Kerberos encryption type des3-cbc-hmac-sha1-kd -type Des3CbcSha1Kd struct { -} - -// GetETypeID returns the EType ID number. -func (e Des3CbcSha1Kd) GetETypeID() int32 { - return etypeID.DES3_CBC_SHA1_KD -} - -// GetHashID returns the checksum type ID number. -func (e Des3CbcSha1Kd) GetHashID() int32 { - return chksumtype.HMAC_SHA1_DES3_KD -} - -// GetKeyByteSize returns the number of bytes for key of this etype. -func (e Des3CbcSha1Kd) GetKeyByteSize() int { - return 24 -} - -// GetKeySeedBitLength returns the number of bits for the seed for key generation. -func (e Des3CbcSha1Kd) GetKeySeedBitLength() int { - return 21 * 8 -} - -// GetHashFunc returns the hash function for this etype. -func (e Des3CbcSha1Kd) GetHashFunc() func() hash.Hash { - return sha1.New -} - -// GetMessageBlockByteSize returns the block size for the etype's messages. -func (e Des3CbcSha1Kd) GetMessageBlockByteSize() int { - //For traditional CBC mode with padding, it would be the underlying cipher's block size - return des.BlockSize -} - -// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. -func (e Des3CbcSha1Kd) GetDefaultStringToKeyParams() string { - var s string - return s -} - -// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. -func (e Des3CbcSha1Kd) GetConfounderByteSize() int { - return des.BlockSize -} - -// GetHMACBitLength returns the bit count size of the integrity hash. -func (e Des3CbcSha1Kd) GetHMACBitLength() int { - return e.GetHashFunc()().Size() * 8 -} - -// GetCypherBlockBitLength returns the bit count size of the cypher block. -func (e Des3CbcSha1Kd) GetCypherBlockBitLength() int { - return des.BlockSize * 8 -} - -// StringToKey returns a key derived from the string provided. -func (e Des3CbcSha1Kd) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { - if s2kparams != "" { - return []byte{}, errors.New("s2kparams must be an empty string") - } - return rfc3961.DES3StringToKey(secret, salt, e) -} - -// RandomToKey returns a key from the bytes provided. -func (e Des3CbcSha1Kd) RandomToKey(b []byte) []byte { - return rfc3961.DES3RandomToKey(b) -} - -// DeriveRandom generates data needed for key generation. -func (e Des3CbcSha1Kd) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { - r, err := rfc3961.DeriveRandom(protocolKey, usage, e) - return r, err -} - -// DeriveKey derives a key from the protocol key based on the usage value. -func (e Des3CbcSha1Kd) DeriveKey(protocolKey, usage []byte) ([]byte, error) { - r, err := e.DeriveRandom(protocolKey, usage) - if err != nil { - return nil, err - } - return e.RandomToKey(r), nil -} - -// EncryptData encrypts the data provided. -func (e Des3CbcSha1Kd) EncryptData(key, data []byte) ([]byte, []byte, error) { - return rfc3961.DES3EncryptData(key, data, e) -} - -// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. -func (e Des3CbcSha1Kd) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { - return rfc3961.DES3EncryptMessage(key, message, usage, e) -} - -// DecryptData decrypts the data provided. -func (e Des3CbcSha1Kd) DecryptData(key, data []byte) ([]byte, error) { - return rfc3961.DES3DecryptData(key, data, e) -} - -// DecryptMessage decrypts the message provided and verifies the integrity of the message. -func (e Des3CbcSha1Kd) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { - return rfc3961.DES3DecryptMessage(key, ciphertext, usage, e) -} - -// VerifyIntegrity checks the integrity of the plaintext message. -func (e Des3CbcSha1Kd) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { - return rfc3961.VerifyIntegrity(protocolKey, ct, pt, usage, e) -} - -// GetChecksumHash returns a keyed checksum hash of the bytes provided. -func (e Des3CbcSha1Kd) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { - return common.GetHash(data, protocolKey, common.GetUsageKc(usage), e) -} - -// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. -func (e Des3CbcSha1Kd) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { - c, err := e.GetChecksumHash(protocolKey, data, usage) - if err != nil { - return false - } - return hmac.Equal(chksum, c) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/etype/etype.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/etype/etype.go deleted file mode 100644 index ee7510e2512..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/etype/etype.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package etype provides the Kerberos Encryption Type interface -package etype - -import "hash" - -// EType is the interface defining the Encryption Type. -type EType interface { - GetETypeID() int32 - GetHashID() int32 - GetKeyByteSize() int - GetKeySeedBitLength() int // key-generation seed length, k - GetDefaultStringToKeyParams() string // default string-to-key parameters (s2kparams) - StringToKey(string, salt, s2kparams string) ([]byte, error) // string-to-key (UTF-8 string, UTF-8 string, opaque)->(protocol-key) - RandomToKey(b []byte) []byte // random-to-key (bitstring[K])->(protocol-key) - GetHMACBitLength() int // HMAC output size, h - GetMessageBlockByteSize() int // message block size, m - EncryptData(key, data []byte) ([]byte, []byte, error) // E function - encrypt (specific-key, state, octet string)->(state, octet string) - EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) - DecryptData(key, data []byte) ([]byte, error) // D function - DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) - GetCypherBlockBitLength() int // cipher block size, c - GetConfounderByteSize() int // This is the same as the cipher block size but in bytes. - DeriveKey(protocolKey, usage []byte) ([]byte, error) // DK key-derivation (protocol-key, integer)->(specific-key) - DeriveRandom(protocolKey, usage []byte) ([]byte, error) // DR pseudo-random (protocol-key, octet-string)->(octet-string) - VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool - GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) - VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool - GetHashFunc() func() hash.Hash -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rc4-hmac.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rc4-hmac.go deleted file mode 100644 index 741bd284b4d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rc4-hmac.go +++ /dev/null @@ -1,137 +0,0 @@ -package crypto - -import ( - "bytes" - "crypto/md5" - "hash" - "io" - - "golang.org/x/crypto/md4" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961" - "gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -//http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8u40-b25/sun/security/krb5/internal/crypto/dk/ArcFourCrypto.java#ArcFourCrypto.encrypt%28byte%5B%5D%2Cint%2Cbyte%5B%5D%2Cbyte%5B%5D%2Cbyte%5B%5D%2Cint%2Cint%29 - -// RC4HMAC implements Kerberos encryption type aes256-cts-hmac-sha1-96 -type RC4HMAC struct { -} - -// GetETypeID returns the EType ID number. -func (e RC4HMAC) GetETypeID() int32 { - return etypeID.RC4_HMAC -} - -// GetHashID returns the checksum type ID number. -func (e RC4HMAC) GetHashID() int32 { - return chksumtype.KERB_CHECKSUM_HMAC_MD5 -} - -// GetKeyByteSize returns the number of bytes for key of this etype. -func (e RC4HMAC) GetKeyByteSize() int { - return 16 -} - -// GetKeySeedBitLength returns the number of bits for the seed for key generation. -func (e RC4HMAC) GetKeySeedBitLength() int { - return e.GetKeyByteSize() * 8 -} - -// GetHashFunc returns the hash function for this etype. -func (e RC4HMAC) GetHashFunc() func() hash.Hash { - return md5.New -} - -// GetMessageBlockByteSize returns the block size for the etype's messages. -func (e RC4HMAC) GetMessageBlockByteSize() int { - return 1 -} - -// GetDefaultStringToKeyParams returns the default key derivation parameters in string form. -func (e RC4HMAC) GetDefaultStringToKeyParams() string { - return "" -} - -// GetConfounderByteSize returns the byte count for confounder to be used during cryptographic operations. -func (e RC4HMAC) GetConfounderByteSize() int { - return 8 -} - -// GetHMACBitLength returns the bit count size of the integrity hash. -func (e RC4HMAC) GetHMACBitLength() int { - return md5.Size * 8 -} - -// GetCypherBlockBitLength returns the bit count size of the cypher block. -func (e RC4HMAC) GetCypherBlockBitLength() int { - return 8 // doesn't really apply -} - -// StringToKey returns a key derived from the string provided. -func (e RC4HMAC) StringToKey(secret string, salt string, s2kparams string) ([]byte, error) { - return rfc4757.StringToKey(secret) -} - -// RandomToKey returns a key from the bytes provided. -func (e RC4HMAC) RandomToKey(b []byte) []byte { - r := bytes.NewReader(b) - h := md4.New() - io.Copy(h, r) - return h.Sum(nil) -} - -// EncryptData encrypts the data provided. -func (e RC4HMAC) EncryptData(key, data []byte) ([]byte, []byte, error) { - b, err := rfc4757.EncryptData(key, data, e) - return []byte{}, b, err -} - -// EncryptMessage encrypts the message provided and concatenates it with the integrity hash to create an encrypted message. -func (e RC4HMAC) EncryptMessage(key, message []byte, usage uint32) ([]byte, []byte, error) { - b, err := rfc4757.EncryptMessage(key, message, usage, false, e) - return []byte{}, b, err -} - -// DecryptData decrypts the data provided. -func (e RC4HMAC) DecryptData(key, data []byte) ([]byte, error) { - return rfc4757.DecryptData(key, data, e) -} - -// DecryptMessage decrypts the message provided and verifies the integrity of the message. -func (e RC4HMAC) DecryptMessage(key, ciphertext []byte, usage uint32) ([]byte, error) { - return rfc4757.DecryptMessage(key, ciphertext, usage, false, e) -} - -// DeriveKey derives a key from the protocol key based on the usage value. -func (e RC4HMAC) DeriveKey(protocolKey, usage []byte) ([]byte, error) { - return rfc4757.HMAC(protocolKey, usage), nil -} - -// DeriveRandom generates data needed for key generation. -func (e RC4HMAC) DeriveRandom(protocolKey, usage []byte) ([]byte, error) { - return rfc3961.DeriveRandom(protocolKey, usage, e) -} - -// VerifyIntegrity checks the integrity of the plaintext message. -func (e RC4HMAC) VerifyIntegrity(protocolKey, ct, pt []byte, usage uint32) bool { - return rfc4757.VerifyIntegrity(protocolKey, pt, ct, e) -} - -// GetChecksumHash returns a keyed checksum hash of the bytes provided. -func (e RC4HMAC) GetChecksumHash(protocolKey, data []byte, usage uint32) ([]byte, error) { - return rfc4757.Checksum(protocolKey, usage, data) -} - -// VerifyChecksum compares the checksum of the message bytes is the same as the checksum provided. -func (e RC4HMAC) VerifyChecksum(protocolKey, data, chksum []byte, usage uint32) bool { - checksum, err := rfc4757.Checksum(protocolKey, usage, data) - if err != nil { - return false - } - if !bytes.Equal(checksum, chksum) { - return false - } - return true -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/encryption.go deleted file mode 100644 index d19ed1009eb..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/encryption.go +++ /dev/null @@ -1,125 +0,0 @@ -// Package rfc3961 provides encryption and checksum methods as specified in RFC 3961 -package rfc3961 - -import ( - "crypto/cipher" - "crypto/des" - "crypto/hmac" - "crypto/rand" - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" -) - -// DES3EncryptData encrypts the data provided using DES3 and methods specific to the etype provided. -func DES3EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { - if len(key) != e.GetKeyByteSize() { - return nil, nil, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) - } - data, _ = common.ZeroPad(data, e.GetMessageBlockByteSize()) - - block, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, nil, fmt.Errorf("error creating cipher: %v", err) - } - - //RFC 3961: initial cipher state All bits zero - ivz := make([]byte, des.BlockSize) - - ct := make([]byte, len(data)) - mode := cipher.NewCBCEncrypter(block, ivz) - mode.CryptBlocks(ct, data) - return ct[len(ct)-e.GetMessageBlockByteSize():], ct, nil -} - -// DES3EncryptMessage encrypts the message provided using DES3 and methods specific to the etype provided. -// The encrypted data is concatenated with its integrity hash to create an encrypted message. -func DES3EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { - //confounder - c := make([]byte, e.GetConfounderByteSize()) - _, err := rand.Read(c) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) - } - plainBytes := append(c, message...) - plainBytes, _ = common.ZeroPad(plainBytes, e.GetMessageBlockByteSize()) - - // Derive key for encryption from usage - var k []byte - if usage != 0 { - k, err = e.DeriveKey(key, common.GetUsageKe(usage)) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) - } - } - - iv, b, err := e.EncryptData(k, plainBytes) - if err != nil { - return iv, b, fmt.Errorf("error encrypting data: %v", err) - } - - // Generate and append integrity hash - ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) - if err != nil { - return iv, b, fmt.Errorf("error encrypting data: %v", err) - } - b = append(b, ih...) - return iv, b, nil -} - -// DES3DecryptData decrypts the data provided using DES3 and methods specific to the etype provided. -func DES3DecryptData(key, data []byte, e etype.EType) ([]byte, error) { - if len(key) != e.GetKeyByteSize() { - return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) - } - - if len(data) < des.BlockSize || len(data)%des.BlockSize != 0 { - return []byte{}, errors.New("ciphertext is not a multiple of the block size") - } - block, err := des.NewTripleDESCipher(key) - if err != nil { - return []byte{}, fmt.Errorf("error creating cipher: %v", err) - } - pt := make([]byte, len(data)) - ivz := make([]byte, des.BlockSize) - mode := cipher.NewCBCDecrypter(block, ivz) - mode.CryptBlocks(pt, data) - return pt, nil -} - -// DES3DecryptMessage decrypts the message provided using DES3 and methods specific to the etype provided. -// The integrity of the message is also verified. -func DES3DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { - //Derive the key - k, err := e.DeriveKey(key, common.GetUsageKe(usage)) - if err != nil { - return nil, fmt.Errorf("error deriving key: %v", err) - } - // Strip off the checksum from the end - b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) - if err != nil { - return nil, fmt.Errorf("error decrypting: %v", err) - } - //Verify checksum - if !e.VerifyIntegrity(key, ciphertext, b, usage) { - return nil, errors.New("error decrypting: integrity verification failed") - } - //Remove the confounder bytes - return b[e.GetConfounderByteSize():], nil -} - -// VerifyIntegrity verifies the integrity of cipertext bytes ct. -func VerifyIntegrity(key, ct, pt []byte, usage uint32, etype etype.EType) bool { - //The ciphertext output is the concatenation of the output of the basic - //encryption function E and a (possibly truncated) HMAC using the - //specified hash function H, both applied to the plaintext with a - //random confounder prefix and sufficient padding to bring it to a - //multiple of the message block size. When the HMAC is computed, the - //key is used in the protocol key form. - h := make([]byte, etype.GetHMACBitLength()/8) - copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) - expectedMAC, _ := common.GetIntegrityHash(pt, key, usage, etype) - return hmac.Equal(h, expectedMAC) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/keyDerivation.go deleted file mode 100644 index b9f0dc9e30f..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/keyDerivation.go +++ /dev/null @@ -1,134 +0,0 @@ -package rfc3961 - -import ( - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" -) - -const ( - prfconstant = "prf" -) - -// DeriveRandom implements the RFC 3961 defined function: DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)). -// -// key: base key or protocol key. Likely to be a key from a keytab file. -// -// usage: a constant. -// -// n: block size in bits (not bytes) - note if you use something like aes.BlockSize this is in bytes. -// -// k: key length / key seed length in bits. Eg. for AES256 this value is 256. -// -// e: the encryption etype function to use. -func DeriveRandom(key, usage []byte, e etype.EType) ([]byte, error) { - n := e.GetCypherBlockBitLength() - k := e.GetKeySeedBitLength() - //Ensure the usage constant is at least the size of the cypher block size. Pass it through the nfold algorithm that will "stretch" it if needs be. - nFoldUsage := Nfold(usage, n) - //k-truncate implemented by creating a byte array the size of k (k is in bits hence /8) - out := make([]byte, k/8) - - /*If the output of E is shorter than k bits, it is fed back into the encryption as many times as necessary. - The construct is as follows (where | indicates concatentation): - - K1 = E(Key, n-fold(Constant), initial-cipher-state) - K2 = E(Key, K1, initial-cipher-state) - K3 = E(Key, K2, initial-cipher-state) - K4 = ... - - DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...)*/ - _, K, err := e.EncryptData(key, nFoldUsage) - if err != nil { - return out, err - } - for i := copy(out, K); i < len(out); { - _, K, _ = e.EncryptData(key, K) - i = i + copy(out[i:], K) - } - return out, nil -} - -// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. -func DeriveKey(protocolKey, usage []byte, e etype.EType) ([]byte, error) { - r, err := e.DeriveRandom(protocolKey, usage) - if err != nil { - return nil, err - } - return e.RandomToKey(r), nil -} - -// RandomToKey returns a key from the bytes provided according to the definition in RFC 3961. -func RandomToKey(b []byte) []byte { - return b -} - -// DES3RandomToKey returns a key from the bytes provided according to the definition in RFC 3961 for DES3 etypes. -func DES3RandomToKey(b []byte) []byte { - r := stretch56Bits(b[:7]) - r2 := stretch56Bits(b[7:14]) - r = append(r, r2...) - r3 := stretch56Bits(b[14:21]) - r = append(r, r3...) - return r -} - -// DES3StringToKey returns a key derived from the string provided according to the definition in RFC 3961 for DES3 etypes. -func DES3StringToKey(secret, salt string, e etype.EType) ([]byte, error) { - s := secret + salt - tkey := e.RandomToKey(Nfold([]byte(s), e.GetKeySeedBitLength())) - return e.DeriveKey(tkey, []byte("kerberos")) -} - -// PseudoRandom function as defined in RFC 3961 -func PseudoRandom(key, b []byte, e etype.EType) ([]byte, error) { - h := e.GetHashFunc()() - h.Write(b) - tmp := h.Sum(nil)[:e.GetMessageBlockByteSize()] - k, err := e.DeriveKey(key, []byte(prfconstant)) - if err != nil { - return []byte{}, err - } - _, prf, err := e.EncryptData(k, tmp) - if err != nil { - return []byte{}, err - } - return prf, nil -} - -func stretch56Bits(b []byte) []byte { - d := make([]byte, len(b), len(b)) - copy(d, b) - var lb byte - for i, v := range d { - bv, nb := calcEvenParity(v) - d[i] = nb - if bv != 0 { - lb = lb | (1 << uint(i+1)) - } else { - lb = lb &^ (1 << uint(i+1)) - } - } - _, lb = calcEvenParity(lb) - d = append(d, lb) - return d -} - -func calcEvenParity(b byte) (uint8, uint8) { - lowestbit := b & 0x01 - // c counter of 1s in the first 7 bits of the byte - var c int - // Iterate over the highest 7 bits (hence p starts at 1 not zero) and count the 1s. - for p := 1; p < 8; p++ { - v := b & (1 << uint(p)) - if v != 0 { - c++ - } - } - if c%2 == 0 { - //Even number of 1s so set parity to 1 - b = b | 1 - } else { - //Odd number of 1s so set parity to 0 - b = b &^ 1 - } - return lowestbit, b -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/nfold.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/nfold.go deleted file mode 100644 index 779d1c6ef66..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961/nfold.go +++ /dev/null @@ -1,128 +0,0 @@ -package rfc3961 - -/* -Implementation of the n-fold algorithm as defined in RFC 3961. - -n-fold is an algorithm that takes m input bits and "stretches" them -to form n output bits with equal contribution from each input bit to -the output, as described in [Blumenthal96]: - -We first define a primitive called n-folding, which takes a -variable-length input block and produces a fixed-length output -sequence. The intent is to give each input bit approximately -equal weight in determining the value of each output bit. Note -that whenever we need to treat a string of octets as a number, the -assumed representation is Big-Endian -- Most Significant Byte -first. - -To n-fold a number X, replicate the input value to a length that -is the least common multiple of n and the length of X. Before -each repetition, the input is rotated to the right by 13 bit -positions. The successive n-bit chunks are added together using -1's-complement addition (that is, with end-around carry) to yield -a n-bit result.... -*/ - -/* Credits -This golang implementation of nfold used the following project for help with implementation detail. -Although their source is in java it was helpful as a reference implementation of the RFC. -You can find the source code of their open source project along with license information below. -We acknowledge and are grateful to these developers for their contributions to open source - -Project: Apache Directory (http://http://directory.apache.org/) -https://svn.apache.org/repos/asf/directory/apacheds/tags/1.5.1/kerberos-shared/src/main/java/org/apache/directory/server/kerberos/shared/crypto/encryption/NFold.java -License: http://www.apache.org/licenses/LICENSE-2.0 -*/ - -// Nfold expands the key to ensure it is not smaller than one cipher block. -// Defined in RFC 3961. -// -// m input bytes that will be "stretched" to the least common multiple of n bits and the bit length of m. -func Nfold(m []byte, n int) []byte { - k := len(m) * 8 - - //Get the lowest common multiple of the two bit sizes - lcm := lcm(n, k) - relicate := lcm / k - var sumBytes []byte - - for i := 0; i < relicate; i++ { - rotation := 13 * i - sumBytes = append(sumBytes, rotateRight(m, rotation)...) - } - - nfold := make([]byte, n/8) - sum := make([]byte, n/8) - for i := 0; i < lcm/n; i++ { - for j := 0; j < n/8; j++ { - sum[j] = sumBytes[j+(i*len(sum))] - } - nfold = onesComplementAddition(nfold, sum) - } - return nfold -} - -func onesComplementAddition(n1, n2 []byte) []byte { - numBits := len(n1) * 8 - out := make([]byte, numBits/8) - carry := 0 - for i := numBits - 1; i > -1; i-- { - n1b := getBit(&n1, i) - n2b := getBit(&n2, i) - s := n1b + n2b + carry - - if s == 0 || s == 1 { - setBit(&out, i, s) - carry = 0 - } else if s == 2 { - carry = 1 - } else if s == 3 { - setBit(&out, i, 1) - carry = 1 - } - } - if carry == 1 { - carryArray := make([]byte, len(n1)) - carryArray[len(carryArray)-1] = 1 - out = onesComplementAddition(out, carryArray) - } - return out -} - -func rotateRight(b []byte, step int) []byte { - out := make([]byte, len(b)) - bitLen := len(b) * 8 - for i := 0; i < bitLen; i++ { - v := getBit(&b, i) - setBit(&out, (i+step)%bitLen, v) - } - return out -} - -func lcm(x, y int) int { - return (x * y) / gcd(x, y) -} - -func gcd(x, y int) int { - for y != 0 { - x, y = y, x%y - } - return x -} - -func getBit(b *[]byte, p int) int { - pByte := p / 8 - pBit := uint(p % 8) - vByte := (*b)[pByte] - vInt := int(vByte >> (8 - (pBit + 1)) & 0x0001) - return vInt -} - -func setBit(b *[]byte, p, v int) { - pByte := p / 8 - pBit := uint(p % 8) - oldByte := (*b)[pByte] - var newByte byte - newByte = byte(v<<(8-(pBit+1))) | oldByte - (*b)[pByte] = newByte -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/encryption.go deleted file mode 100644 index 41736124517..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/encryption.go +++ /dev/null @@ -1,89 +0,0 @@ -// Package rfc3962 provides encryption and checksum methods as specified in RFC 3962 -package rfc3962 - -import ( - "crypto/rand" - "errors" - "fmt" - - "gopkg.in/jcmturner/aescts.v1" - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" -) - -// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 3962. -func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { - if len(key) != e.GetKeyByteSize() { - return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) - } - ivz := make([]byte, e.GetCypherBlockBitLength()/8) - return aescts.Encrypt(key, ivz, data) -} - -// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. -// The encrypted data is concatenated with its integrity hash to create an encrypted message. -func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { - if len(key) != e.GetKeyByteSize() { - return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) - } - //confounder - c := make([]byte, e.GetConfounderByteSize()) - _, err := rand.Read(c) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) - } - plainBytes := append(c, message...) - - // Derive key for encryption from usage - var k []byte - if usage != 0 { - k, err = e.DeriveKey(key, common.GetUsageKe(usage)) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) - } - } - - // Encrypt the data - iv, b, err := e.EncryptData(k, plainBytes) - if err != nil { - return iv, b, fmt.Errorf("error encrypting data: %v", err) - } - - // Generate and append integrity hash - ih, err := common.GetIntegrityHash(plainBytes, key, usage, e) - if err != nil { - return iv, b, fmt.Errorf("error encrypting data: %v", err) - } - b = append(b, ih...) - return iv, b, nil -} - -// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 3962. -func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { - if len(key) != e.GetKeyByteSize() { - return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) - } - ivz := make([]byte, e.GetCypherBlockBitLength()/8) - return aescts.Decrypt(key, ivz, data) -} - -// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 3962. -// The integrity of the message is also verified. -func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { - //Derive the key - k, err := e.DeriveKey(key, common.GetUsageKe(usage)) - if err != nil { - return nil, fmt.Errorf("error deriving key: %v", err) - } - // Strip off the checksum from the end - b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) - if err != nil { - return nil, err - } - //Verify checksum - if !e.VerifyIntegrity(key, ciphertext, b, usage) { - return nil, errors.New("integrity verification failed") - } - //Remove the confounder bytes - return b[e.GetConfounderByteSize():], nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/keyDerivation.go deleted file mode 100644 index a25d4483d48..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962/keyDerivation.go +++ /dev/null @@ -1,58 +0,0 @@ -package rfc3962 - -import ( - "encoding/binary" - "encoding/hex" - "errors" - - "github.com/jcmturner/gofork/x/crypto/pbkdf2" - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" -) - -const ( - s2kParamsZero = 4294967296 -) - -// StringToKey returns a key derived from the string provided according to the definition in RFC 3961. -func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { - i, err := S2KparamsToItertions(s2kparams) - if err != nil { - return nil, err - } - return StringToKeyIter(secret, salt, i, e) -} - -// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 -func StringToPBKDF2(secret, salt string, iterations int64, e etype.EType) []byte { - return pbkdf2.Key64([]byte(secret), []byte(salt), iterations, int64(e.GetKeyByteSize()), e.GetHashFunc()) -} - -// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 3961. -func StringToKeyIter(secret, salt string, iterations int64, e etype.EType) ([]byte, error) { - tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) - return e.DeriveKey(tkey, []byte("kerberos")) -} - -// S2KparamsToItertions converts the string representation of iterations to an integer -func S2KparamsToItertions(s2kparams string) (int64, error) { - //process s2kparams string - //The parameter string is four octets indicating an unsigned - //number in big-endian order. This is the number of iterations to be - //performed. If the value is 00 00 00 00, the number of iterations to - //be performed is 4,294,967,296 (2**32). - var i uint32 - if len(s2kparams) != 8 { - return int64(s2kParamsZero), errors.New("invalid s2kparams length") - } - b, err := hex.DecodeString(s2kparams) - if err != nil { - return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot decode string to bytes") - } - i = binary.BigEndian.Uint32(b) - //buf := bytes.NewBuffer(b) - //err = binary.Read(buf, binary.BigEndian, &i) - if err != nil { - return int64(s2kParamsZero), errors.New("invalid s2kparams, cannot convert to big endian int32") - } - return int64(i), nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/checksum.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/checksum.go deleted file mode 100644 index 45276e95322..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/checksum.go +++ /dev/null @@ -1,40 +0,0 @@ -package rfc4757 - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "io" -) - -// Checksum returns a hash of the data in accordance with RFC 4757 -func Checksum(key []byte, usage uint32, data []byte) ([]byte, error) { - // Create hashing key - s := append([]byte(`signaturekey`), byte(0x00)) //includes zero octet at end - mac := hmac.New(md5.New, key) - mac.Write(s) - Ksign := mac.Sum(nil) - - // Format data - tb := UsageToMSMsgType(usage) - p := append(tb, data...) - h := md5.New() - rb := bytes.NewReader(p) - _, err := io.Copy(h, rb) - if err != nil { - return []byte{}, err - } - tmp := h.Sum(nil) - - // Generate HMAC - mac = hmac.New(md5.New, Ksign) - mac.Write(tmp) - return mac.Sum(nil), nil -} - -// HMAC returns a keyed MD5 checksum of the data -func HMAC(key []byte, data []byte) []byte { - mac := hmac.New(md5.New, key) - mac.Write(data) - return mac.Sum(nil) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/encryption.go deleted file mode 100644 index 3c8505dbd1d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/encryption.go +++ /dev/null @@ -1,83 +0,0 @@ -// Package rfc4757 provides encryption and checksum methods as specified in RFC 4757 -package rfc4757 - -import ( - "bytes" - "crypto/rand" - "crypto/rc4" - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" -) - -// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 4757. -func EncryptData(key, data []byte, e etype.EType) ([]byte, error) { - if len(key) != e.GetKeyByteSize() { - return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) - } - rc4Cipher, err := rc4.NewCipher(key) - if err != nil { - return []byte{}, fmt.Errorf("error creating RC4 cipher: %v", err) - } - ed := make([]byte, len(data)) - copy(ed, data) - rc4Cipher.XORKeyStream(ed, ed) - rc4Cipher.Reset() - return ed, nil -} - -// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 4757. -func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { - return EncryptData(key, data, e) -} - -// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. -// The encrypted data is concatenated with its RC4 header containing integrity checksum and confounder to create an encrypted message. -func EncryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { - confounder := make([]byte, e.GetConfounderByteSize()) // size = 8 - _, err := rand.Read(confounder) - if err != nil { - return []byte{}, fmt.Errorf("error generating confounder: %v", err) - } - k1 := key - k2 := HMAC(k1, UsageToMSMsgType(usage)) - toenc := append(confounder, data...) - chksum := HMAC(k2, toenc) - k3 := HMAC(k2, chksum) - - ed, err := EncryptData(k3, toenc, e) - if err != nil { - return []byte{}, fmt.Errorf("error encrypting data: %v", err) - } - - msg := append(chksum, ed...) - return msg, nil -} - -// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 4757. -// The integrity of the message is also verified. -func DecryptMessage(key, data []byte, usage uint32, export bool, e etype.EType) ([]byte, error) { - checksum := data[:e.GetHMACBitLength()/8] - ct := data[e.GetHMACBitLength()/8:] - _, k2, k3 := deriveKeys(key, checksum, usage, export) - - pt, err := DecryptData(k3, ct, e) - if err != nil { - return []byte{}, fmt.Errorf("error decrypting data: %v", err) - } - - if !VerifyIntegrity(k2, pt, data, e) { - return []byte{}, errors.New("integrity checksum incorrect") - } - return pt[e.GetConfounderByteSize():], nil -} - -// VerifyIntegrity checks the integrity checksum of the data matches that calculated from the decrypted data. -func VerifyIntegrity(key, pt, data []byte, e etype.EType) bool { - chksum := HMAC(key, pt) - if bytes.Equal(chksum, data[:e.GetHMACBitLength()/8]) { - return true - } - return false -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/keyDerivation.go deleted file mode 100644 index 5e7ec4800bf..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/keyDerivation.go +++ /dev/null @@ -1,55 +0,0 @@ -package rfc4757 - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - - "golang.org/x/crypto/md4" -) - -// StringToKey returns a key derived from the string provided according to the definition in RFC 4757. -func StringToKey(secret string) ([]byte, error) { - b := make([]byte, len(secret)*2, len(secret)*2) - for i, r := range secret { - u := fmt.Sprintf("%04x", r) - c, err := hex.DecodeString(u) - if err != nil { - return []byte{}, errors.New("character could not be encoded") - } - // Swap round the two bytes to make little endian as we put into byte slice - b[2*i] = c[1] - b[2*i+1] = c[0] - } - r := bytes.NewReader(b) - h := md4.New() - _, err := io.Copy(h, r) - if err != nil { - return []byte{}, err - } - return h.Sum(nil), nil -} - -func deriveKeys(key, checksum []byte, usage uint32, export bool) (k1, k2, k3 []byte) { - //if export { - // L40 := make([]byte, 14, 14) - // copy(L40, []byte(`fortybits`)) - // k1 = HMAC(key, L40) - //} else { - // tb := MessageTypeBytes(usage) - // k1 = HMAC(key, tb) - //} - //k2 = k1[:16] - //if export { - // mask := []byte{0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB,0xAB} - // copy(k1[7:16], mask) - //} - //k3 = HMAC(k1, checksum) - //return - k1 = key - k2 = HMAC(k1, UsageToMSMsgType(usage)) - k3 = HMAC(k2, checksum) - return -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/msgtype.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/msgtype.go deleted file mode 100644 index 068588d3baa..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757/msgtype.go +++ /dev/null @@ -1,20 +0,0 @@ -package rfc4757 - -import "encoding/binary" - -// UsageToMSMsgType converts Kerberos key usage numbers to Microsoft message type encoded as a little-endian four byte slice. -func UsageToMSMsgType(usage uint32) []byte { - // Translate usage numbers to the Microsoft T numbers - switch usage { - case 3: - usage = 8 - case 9: - usage = 8 - case 23: - usage = 13 - } - // Now convert to bytes - tb := make([]byte, 4) // We force an int32 input so we can't go over 4 bytes - binary.PutUvarint(tb, uint64(usage)) - return tb -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/encryption.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/encryption.go deleted file mode 100644 index caa6098e6df..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/encryption.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package rfc8009 provides encryption and checksum methods as specified in RFC 8009 -package rfc8009 - -import ( - "crypto/aes" - "crypto/hmac" - "crypto/rand" - "errors" - "fmt" - - "gopkg.in/jcmturner/aescts.v1" - "gopkg.in/jcmturner/gokrb5.v5/crypto/common" - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -// EncryptData encrypts the data provided using methods specific to the etype provided as defined in RFC 8009. -func EncryptData(key, data []byte, e etype.EType) ([]byte, []byte, error) { - kl := e.GetKeyByteSize() - if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { - kl = 32 - } - if len(key) != kl { - return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", e.GetKeyByteSize(), len(key)) - } - ivz := make([]byte, aes.BlockSize) - return aescts.Encrypt(key, ivz, data) -} - -// EncryptMessage encrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. -// The encrypted data is concatenated with its integrity hash to create an encrypted message. -func EncryptMessage(key, message []byte, usage uint32, e etype.EType) ([]byte, []byte, error) { - kl := e.GetKeyByteSize() - if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { - kl = 32 - } - if len(key) != kl { - return []byte{}, []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) - } - if len(key) != e.GetKeyByteSize() { - } - //confounder - c := make([]byte, e.GetConfounderByteSize()) - _, err := rand.Read(c) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("could not generate random confounder: %v", err) - } - plainBytes := append(c, message...) - - // Derive key for encryption from usage - var k []byte - if usage != 0 { - k, err = e.DeriveKey(key, common.GetUsageKe(usage)) - if err != nil { - return []byte{}, []byte{}, fmt.Errorf("error deriving key for encryption: %v", err) - } - } - - // Encrypt the data - iv, b, err := e.EncryptData(k, plainBytes) - if err != nil { - return iv, b, fmt.Errorf("error encrypting data: %v", err) - } - - ivz := make([]byte, e.GetConfounderByteSize()) - ih, err := GetIntegityHash(ivz, b, key, usage, e) - if err != nil { - return iv, b, fmt.Errorf("error encrypting data: %v", err) - } - b = append(b, ih...) - return iv, b, nil -} - -// DecryptData decrypts the data provided using the methods specific to the etype provided as defined in RFC 8009. -func DecryptData(key, data []byte, e etype.EType) ([]byte, error) { - kl := e.GetKeyByteSize() - if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { - kl = 32 - } - if len(key) != kl { - return []byte{}, fmt.Errorf("incorrect keysize: expected: %v actual: %v", kl, len(key)) - } - ivz := make([]byte, aes.BlockSize) - return aescts.Decrypt(key, ivz, data) -} - -// DecryptMessage decrypts the message provided using the methods specific to the etype provided as defined in RFC 8009. -// The integrity of the message is also verified. -func DecryptMessage(key, ciphertext []byte, usage uint32, e etype.EType) ([]byte, error) { - //Derive the key - k, err := e.DeriveKey(key, common.GetUsageKe(usage)) - if err != nil { - return nil, fmt.Errorf("error deriving key: %v", err) - } - // Strip off the checksum from the end - b, err := e.DecryptData(k, ciphertext[:len(ciphertext)-e.GetHMACBitLength()/8]) - if err != nil { - return nil, err - } - //Verify checksum - if !e.VerifyIntegrity(key, ciphertext, b, usage) { - return nil, errors.New("integrity verification failed") - } - //Remove the confounder bytes - return b[e.GetConfounderByteSize():], nil -} - -// GetIntegityHash returns a keyed integrity hash of the bytes provided as defined in RFC 8009 -func GetIntegityHash(iv, c, key []byte, usage uint32, e etype.EType) ([]byte, error) { - // Generate and append integrity hash - // The HMAC is calculated over the cipher state concatenated with the - // AES output, instead of being calculated over the confounder and - // plaintext. This allows the message receiver to verify the - // integrity of the message before decrypting the message. - // H = HMAC(Ki, IV | C) - ib := append(iv, c...) - return common.GetIntegrityHash(ib, key, usage, e) -} - -// VerifyIntegrity verifies the integrity of cipertext bytes ct. -func VerifyIntegrity(key, ct []byte, usage uint32, etype etype.EType) bool { - h := make([]byte, etype.GetHMACBitLength()/8) - copy(h, ct[len(ct)-etype.GetHMACBitLength()/8:]) - ivz := make([]byte, etype.GetConfounderByteSize()) - ib := append(ivz, ct[:len(ct)-(etype.GetHMACBitLength()/8)]...) - expectedMAC, _ := common.GetIntegrityHash(ib, key, usage, etype) - return hmac.Equal(h, expectedMAC) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/keyDerivation.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/keyDerivation.go deleted file mode 100644 index 74cb903b242..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009/keyDerivation.go +++ /dev/null @@ -1,144 +0,0 @@ -package rfc8009 - -import ( - "crypto/hmac" - "encoding/binary" - "encoding/hex" - "errors" - - "golang.org/x/crypto/pbkdf2" - "gopkg.in/jcmturner/gokrb5.v5/crypto/etype" - "gopkg.in/jcmturner/gokrb5.v5/iana/etypeID" -) - -const ( - s2kParamsZero = 32768 -) - -// DeriveRandom for key derivation as defined in RFC 8009 -func DeriveRandom(protocolKey, usage []byte, e etype.EType) ([]byte, error) { - h := e.GetHashFunc()() - return KDF_HMAC_SHA2(protocolKey, []byte("prf"), usage, h.Size(), e), nil -} - -// DeriveKey derives a key from the protocol key based on the usage and the etype's specific methods. -// -// https://tools.ietf.org/html/rfc8009#section-5 -// -// If the enctype is aes128-cts-hmac-sha256-128: -// Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128) -// Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128) -// Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128) -// -// If the enctype is aes256-cts-hmac-sha384-192: -// Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192) -// Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256) -// Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192) -func DeriveKey(protocolKey, label []byte, e etype.EType) []byte { - var context []byte - var kl int - // Key length is longer for aes256-cts-hmac-sha384-192 is it is a Ke or from StringToKey (where label is "kerberos") - if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { - switch label[len(label)-1] { - case 0x73: - // 0x73 is "s" so label could be kerberos meaning StringToKey so now check if the label is "kerberos" - kerblabel := []byte("kerberos") - if len(label) != len(kerblabel) { - break - } - for i, b := range label { - if b != kerblabel[i] { - kl = e.GetKeySeedBitLength() - break - } - } - if kl == 0 { - // This is StringToKey - kl = 256 - } - case 0xAA: - // This is a Ke - kl = 256 - } - } - if kl == 0 { - kl = e.GetKeySeedBitLength() - } - return e.RandomToKey(KDF_HMAC_SHA2(protocolKey, label, context, kl, e)) -} - -// RandomToKey returns a key from the bytes provided according to the definition in RFC 8009. -func RandomToKey(b []byte) []byte { - return b -} - -// StringToKey returns a key derived from the string provided according to the definition in RFC 8009. -func StringToKey(secret, salt, s2kparams string, e etype.EType) ([]byte, error) { - i, err := S2KparamsToItertions(s2kparams) - if err != nil { - return nil, err - } - return StringToKeyIter(secret, salt, int(i), e) -} - -// StringToKeyIter returns a key derived from the string provided according to the definition in RFC 8009. -func StringToKeyIter(secret, salt string, iterations int, e etype.EType) ([]byte, error) { - tkey := e.RandomToKey(StringToPBKDF2(secret, salt, iterations, e)) - return e.DeriveKey(tkey, []byte("kerberos")) -} - -// StringToPBKDF2 generates an encryption key from a pass phrase and salt string using the PBKDF2 function from PKCS #5 v2.0 -func StringToPBKDF2(secret, salt string, iterations int, e etype.EType) []byte { - kl := e.GetKeyByteSize() - if e.GetETypeID() == etypeID.AES256_CTS_HMAC_SHA384_192 { - kl = 32 - } - return pbkdf2.Key([]byte(secret), []byte(salt), iterations, kl, e.GetHashFunc()) -} - -// KDF_HMAC_SHA2 key derivation: https://tools.ietf.org/html/rfc8009#section-3 -func KDF_HMAC_SHA2(protocolKey, label, context []byte, kl int, e etype.EType) []byte { - //k: Length in bits of the key to be outputted, expressed in big-endian binary representation in 4 bytes. - k := make([]byte, 4, 4) - binary.BigEndian.PutUint32(k, uint32(kl)) - - c := make([]byte, 4, 4) - binary.BigEndian.PutUint32(c, uint32(1)) - c = append(c, label...) - c = append(c, byte(uint8(0))) - if len(context) > 0 { - c = append(c, context...) - } - c = append(c, k...) - - mac := hmac.New(e.GetHashFunc(), protocolKey) - mac.Write(c) - return mac.Sum(nil)[:(kl / 8)] -} - -// GetSaltP returns the salt value based on the etype name: https://tools.ietf.org/html/rfc8009#section-4 -func GetSaltP(salt, ename string) string { - b := []byte(ename) - b = append(b, byte(uint8(0))) - b = append(b, []byte(salt)...) - return string(b) -} - -// S2KparamsToItertions converts the string representation of iterations to an integer for RFC 8009. -func S2KparamsToItertions(s2kparams string) (int, error) { - var i uint32 - if len(s2kparams) != 8 { - return s2kParamsZero, errors.New("Invalid s2kparams length") - } - b, err := hex.DecodeString(s2kparams) - if err != nil { - return s2kParamsZero, errors.New("Invalid s2kparams, cannot decode string to bytes") - } - i = binary.BigEndian.Uint32(b) - //buf := bytes.NewBuffer(b) - //err = binary.Read(buf, binary.BigEndian, &i) - if err != nil { - return s2kParamsZero, errors.New("Invalid s2kparams, cannot convert to big endian int32") - } - return int(i), nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/ContextFlags.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/ContextFlags.go deleted file mode 100644 index d5b26948123..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/ContextFlags.go +++ /dev/null @@ -1,36 +0,0 @@ -package gssapi - -import "github.com/jcmturner/gofork/encoding/asn1" - -/* -ContextFlags ::= BIT STRING { - delegFlag (0), - mutualFlag (1), - replayFlag (2), - sequenceFlag (3), - anonFlag (4), - confFlag (5), - integFlag (6) -} (SIZE (32)) -*/ - -const ( - delegFlag = 0 - mutualFlag = 1 - replayFlag = 2 - sequenceFlag = 3 - anonFlag = 4 - confFlag = 5 - integFlag = 6 -) - -// ContextFlags flags for GSSAPI -type ContextFlags asn1.BitString - -// NewContextFlags creates a new ContextFlags instance. -func NewContextFlags() ContextFlags { - var c ContextFlags - c.BitLength = 32 - c.Bytes = make([]byte, 4) - return c -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/MechType.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/MechType.go deleted file mode 100644 index 587cdccdb87..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/MechType.go +++ /dev/null @@ -1,9 +0,0 @@ -package gssapi - -import "github.com/jcmturner/gofork/encoding/asn1" - -// MechTypeOIDKRB5 is the MechType OID for Kerberos 5 -var MechTypeOIDKRB5 = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} - -// MechTypeOIDMSLegacyKRB5 is the MechType OID for MS legacy Kerberos 5 -var MechTypeOIDMSLegacyKRB5 = asn1.ObjectIdentifier{1, 2, 840, 48018, 1, 2, 2} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/NegotiationToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/NegotiationToken.go deleted file mode 100644 index f603c47faa8..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/NegotiationToken.go +++ /dev/null @@ -1,149 +0,0 @@ -package gssapi - -import ( - "errors" - "fmt" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/credentials" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -/* -https://msdn.microsoft.com/en-us/library/ms995330.aspx - -NegotiationToken ::= CHOICE { - negTokenInit [0] NegTokenInit, This is the Negotiation token sent from the client to the server. - negTokenResp [1] NegTokenResp -} - -NegTokenInit ::= SEQUENCE { - mechTypes [0] MechTypeList, - reqFlags [1] ContextFlags OPTIONAL, - -- inherited from RFC 2478 for backward compatibility, - -- RECOMMENDED to be left out - mechToken [2] OCTET STRING OPTIONAL, - mechListMIC [3] OCTET STRING OPTIONAL, - ... -} - -NegTokenResp ::= SEQUENCE { - negState [0] ENUMERATED { - accept-completed (0), - accept-incomplete (1), - reject (2), - request-mic (3) - } OPTIONAL, - -- REQUIRED in the first reply from the target - supportedMech [1] MechType OPTIONAL, - -- present only in the first reply from the target - responseToken [2] OCTET STRING OPTIONAL, - mechListMIC [3] OCTET STRING OPTIONAL, - ... -} -*/ - -// NegTokenInit implements Negotiation Token of type Init -type NegTokenInit struct { - MechTypes []asn1.ObjectIdentifier `asn1:"explicit,tag:0"` - ReqFlags ContextFlags `asn1:"explicit,optional,tag:1"` - MechToken []byte `asn1:"explicit,optional,tag:2"` - MechTokenMIC []byte `asn1:"explicit,optional,tag:3"` -} - -// NegTokenResp implements Negotiation Token of type Resp/Targ -type NegTokenResp struct { - NegState asn1.Enumerated `asn1:"explicit,tag:0"` - SupportedMech asn1.ObjectIdentifier `asn1:"explicit,optional,tag:1"` - ResponseToken []byte `asn1:"explicit,optional,tag:2"` - MechListMIC []byte `asn1:"explicit,optional,tag:3"` -} - -// NegTokenTarg implements Negotiation Token of type Resp/Targ -type NegTokenTarg NegTokenResp - -// UnmarshalNegToken umarshals and returns either a NegTokenInit or a NegTokenResp. -// -// The boolean indicates if the response is a NegTokenInit. -// If error is nil and the boolean is false the response is a NegTokenResp. -func UnmarshalNegToken(b []byte) (bool, interface{}, error) { - var a asn1.RawValue - _, err := asn1.Unmarshal(b, &a) - if err != nil { - return false, nil, fmt.Errorf("error unmarshalling NegotiationToken: %v", err) - } - switch a.Tag { - case 0: - var negToken NegTokenInit - _, err = asn1.Unmarshal(a.Bytes, &negToken) - if err != nil { - return false, nil, fmt.Errorf("error unmarshalling NegotiationToken type %d (Init): %v", a.Tag, err) - } - return true, negToken, nil - case 1: - var negToken NegTokenResp - _, err = asn1.Unmarshal(a.Bytes, &negToken) - if err != nil { - return false, nil, fmt.Errorf("error unmarshalling NegotiationToken type %d (Resp/Targ): %v", a.Tag, err) - } - return false, negToken, nil - default: - return false, nil, errors.New("unknown choice type for NegotiationToken") - } - -} - -// Marshal an Init negotiation token -func (n *NegTokenInit) Marshal() ([]byte, error) { - b, err := asn1.Marshal(*n) - if err != nil { - return nil, err - } - nt := asn1.RawValue{ - Tag: 0, - Class: 2, - IsCompound: true, - Bytes: b, - } - nb, err := asn1.Marshal(nt) - if err != nil { - return nil, err - } - return nb, nil -} - -// Marshal a Resp/Targ negotiation token -func (n *NegTokenResp) Marshal() ([]byte, error) { - b, err := asn1.Marshal(*n) - if err != nil { - return nil, err - } - nt := asn1.RawValue{ - Tag: 1, - Class: 2, - IsCompound: true, - Bytes: b, - } - nb, err := asn1.Marshal(nt) - if err != nil { - return nil, err - } - return nb, nil -} - -// NewNegTokenInitKrb5 creates new Init negotiation token for Kerberos 5 -func NewNegTokenInitKrb5(creds credentials.Credentials, tkt messages.Ticket, sessionKey types.EncryptionKey) (NegTokenInit, error) { - mt, err := NewAPREQMechToken(creds, tkt, sessionKey, []int{GSS_C_INTEG_FLAG, GSS_C_CONF_FLAG}, []int{}) - if err != nil { - return NegTokenInit{}, fmt.Errorf("error getting MechToken; %v", err) - } - mtb, err := mt.Marshal() - if err != nil { - return NegTokenInit{}, fmt.Errorf("error marshalling MechToken; %v", err) - } - return NegTokenInit{ - MechTypes: []asn1.ObjectIdentifier{MechTypeOIDKRB5}, - MechToken: mtb, - }, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/WrapToken.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/WrapToken.go deleted file mode 100644 index 10d6c37c51f..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/WrapToken.go +++ /dev/null @@ -1,234 +0,0 @@ -package gssapi - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -/* -From RFC 4121, section 4.2.6.2: - - Use of the GSS_Wrap() call yields a token (referred as the Wrap token - in this document), which consists of a descriptive header, followed - by a body portion that contains either the input user data in - plaintext concatenated with the checksum, or the input user data - encrypted. The GSS_Wrap() token SHALL have the following format: - - Octet no Name Description - -------------------------------------------------------------- - 0..1 TOK_ID Identification field. Tokens emitted by - GSS_Wrap() contain the hex value 05 04 - expressed in big-endian order in this - field. - 2 Flags Attributes field, as described in section - 4.2.2. - 3 Filler Contains the hex value FF. - 4..5 EC Contains the "extra count" field, in big- - endian order as described in section 4.2.3. - 6..7 RRC Contains the "right rotation count" in big- - endian order, as described in section - 4.2.5. - 8..15 SndSeqNum Sequence number field in clear text, - expressed in big-endian order. - 16..last Data Encrypted data for Wrap tokens with - confidentiality, or plaintext data followed - by the checksum for Wrap tokens without - confidentiality, as described in section - 4.2.4. - -Quick notes: - - "EC" or "Extra Count" refers to the length of the cheksum. - - "Flags" (complete details in section 4.2.2) is a set of bits: - - if bit 0 is set, it means the token was sent by the acceptor (generally the kerberized service). - - bit 1 indicates that the token's payload is encrypted - - bit 2 indicates if the message is protected using a subkey defined by the acceptor. - - When computing checksums, EC and RRC MUST be set to 0. - - Wrap Tokens are not ASN.1 encoded. -*/ -const ( - HdrLen = 16 // Length of the Wrap Token's header - FillerByte byte = 0xFF -) - -// WrapToken represents a GSS API Wrap token, as defined in RFC 4121. -// It contains the header fields, the payload and the checksum, and provides -// the logic for converting to/from bytes plus computing and verifying checksums -type WrapToken struct { - // const GSS Token ID: 0x0504 - Flags byte // contains three flags: acceptor, sealed, acceptor subkey - // const Filler: 0xFF - EC uint16 // checksum length. big-endian - RRC uint16 // right rotation count. big-endian - SndSeqNum uint64 // sender's sequence number. big-endian - Payload []byte // your data! :) - CheckSum []byte // authenticated checksum of { payload | header } -} - -// Return the 2 bytes identifying a GSS API Wrap token -func getGssWrapTokenId() *[2]byte { - return &[2]byte{0x05, 0x04} -} - -// Marshal the WrapToken into a byte slice. -// The payload should have been set and the checksum computed, otherwise an error is returned. -func (wt *WrapToken) Marshal() ([]byte, error) { - if wt.CheckSum == nil { - return nil, errors.New("checksum has not been set") - } - if wt.Payload == nil { - return nil, errors.New("payload has not been set") - } - - pldOffset := HdrLen // Offset of the payload in the token - chkSOffset := HdrLen + len(wt.Payload) // Offset of the checksum in the token - - bytes := make([]byte, chkSOffset+int(wt.EC)) - copy(bytes[0:], getGssWrapTokenId()[:]) - bytes[2] = wt.Flags - bytes[3] = FillerByte - binary.BigEndian.PutUint16(bytes[4:6], wt.EC) - binary.BigEndian.PutUint16(bytes[6:8], wt.RRC) - binary.BigEndian.PutUint64(bytes[8:16], wt.SndSeqNum) - copy(bytes[pldOffset:], wt.Payload) - copy(bytes[chkSOffset:], wt.CheckSum) - return bytes, nil -} - -// ComputeAndSetCheckSum uses the passed encryption key and key usage to compute the checksum over the payload and -// the header, and sets the CheckSum field of this WrapToken. -// If the payload has not been set or the checksum has already been set, an error is returned. -func (wt *WrapToken) ComputeAndSetCheckSum(key types.EncryptionKey, keyUsage uint32) error { - if wt.Payload == nil { - return errors.New("payload has not been set") - } - if wt.CheckSum != nil { - return errors.New("checksum has already been computed") - } - chkSum, cErr := wt.ComputeCheckSum(key, keyUsage) - if cErr != nil { - return cErr - } - wt.CheckSum = chkSum - return nil -} - -// ComputeCheckSum computes and returns the checksum of this token, computed using the passed key and key usage. -// Conforms to RFC 4121 in that the checksum will be computed over { body | header }, -// with the EC and RRC flags zeroed out. -// In the context of Kerberos Wrap tokens, mostly keyusage GSSAPI_ACCEPTOR_SEAL (=22) -// and GSSAPI_INITIATOR_SEAL (=24) will be used. -// Note: This will NOT update the struct's Checksum field. -func (wt *WrapToken) ComputeCheckSum(key types.EncryptionKey, keyUsage uint32) ([]byte, error) { - if wt.Payload == nil { - return nil, errors.New("cannot compute checksum with uninitialized payload") - } - // Build a slice containing { payload | header } - checksumMe := make([]byte, HdrLen+len(wt.Payload)) - copy(checksumMe[0:], wt.Payload) - copy(checksumMe[len(wt.Payload):], getChecksumHeader(wt.Flags, wt.SndSeqNum)) - - encType, err := crypto.GetEtype(key.KeyType) - if err != nil { - return nil, err - } - return encType.GetChecksumHash(key.KeyValue, checksumMe, keyUsage) -} - -// Build a header suitable for a checksum computation -func getChecksumHeader(flags byte, senderSeqNum uint64) []byte { - header := make([]byte, 16) - copy(header[0:], []byte{0x05, 0x04, flags, 0xFF, 0x00, 0x00, 0x00, 0x00}) - binary.BigEndian.PutUint64(header[8:], senderSeqNum) - return header -} - -// VerifyCheckSum computes the token's checksum with the provided key and usage, -// and compares it to the checksum present in the token. -// In case of any failure, (false, Err) is returned, with Err an explanatory error. -func (wt *WrapToken) VerifyCheckSum(key types.EncryptionKey, keyUsage uint32) (bool, error) { - computed, cErr := wt.ComputeCheckSum(key, keyUsage) - if cErr != nil { - return false, cErr - } - if !bytes.Equal(computed, wt.CheckSum) { - return false, fmt.Errorf( - "checksum mismatch. Computed: %s, Contained in token: %s", - hex.EncodeToString(computed), hex.EncodeToString(wt.CheckSum)) - } - return true, nil -} - -// Unmarshal bytes into the corresponding WrapToken. -// If expectFromAcceptor is true, we expect the token to have been emitted by the gss acceptor, -// and will check the according flag, returning an error if the token does not match the expectation. -func (wt *WrapToken) Unmarshal(b []byte, expectFromAcceptor bool) error { - // Check if we can read a whole header - if len(b) < 16 { - return errors.New("bytes shorter than header length") - } - // Is the Token ID correct? - if !bytes.Equal(getGssWrapTokenId()[:], b[0:2]) { - return fmt.Errorf("wrong Token ID. Expected %s, was %s", - hex.EncodeToString(getGssWrapTokenId()[:]), - hex.EncodeToString(b[0:2])) - } - // Check the acceptor flag - flags := b[2] - isFromAcceptor := flags&0x01 == 1 - if isFromAcceptor && !expectFromAcceptor { - return errors.New("unexpected acceptor flag is set: not expecting a token from the acceptor") - } - if !isFromAcceptor && expectFromAcceptor { - return errors.New("expected acceptor flag is not set: expecting a token from the acceptor, not the initiator") - } - // Check the filler byte - if b[3] != FillerByte { - return fmt.Errorf("unexpected filler byte: expecting 0xFF, was %s ", hex.EncodeToString(b[3:4])) - } - checksumL := binary.BigEndian.Uint16(b[4:6]) - // Sanity check on the checksum length - if int(checksumL) > len(b)-HdrLen { - return fmt.Errorf("inconsistent checksum length: %d bytes to parse, checksum length is %d", len(b), checksumL) - } - - wt.Flags = flags - wt.EC = checksumL - wt.RRC = binary.BigEndian.Uint16(b[6:8]) - wt.SndSeqNum = binary.BigEndian.Uint64(b[8:16]) - wt.Payload = b[16 : len(b)-int(checksumL)] - wt.CheckSum = b[len(b)-int(checksumL):] - return nil -} - -// NewInitiatorToken builds a new initiator token (acceptor flag will be set to 0) and computes the authenticated checksum. -// Other flags are set to 0, and the RRC and sequence number are initialized to 0. -// Note that in certain circumstances you may need to provide a sequence number that has been defined earlier. -// This is currently not supported. -func NewInitiatorToken(payload []byte, key types.EncryptionKey) (*WrapToken, error) { - encType, err := crypto.GetEtype(key.KeyType) - if err != nil { - return nil, err - } - - token := WrapToken{ - Flags: 0x00, // all zeroed out (this is a token sent by the initiator) - // Checksum size: lenth of output of the HMAC function, in bytes. - EC: uint16(encType.GetHMACBitLength() / 8), - RRC: 0, - SndSeqNum: 0, - Payload: payload, - } - - if err := token.ComputeAndSetCheckSum(key, keyusage.GSSAPI_INITIATOR_SEAL); err != nil { - return nil, err - } - - return &token, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/gssapi.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/gssapi.go deleted file mode 100644 index 137f42e79f3..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/gssapi.go +++ /dev/null @@ -1,102 +0,0 @@ -// Package gssapi implements Generic Security Services Application Program Interface required for SPNEGO kerberos authentication. -package gssapi - -import ( - "errors" - "fmt" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/asn1tools" - "gopkg.in/jcmturner/gokrb5.v5/credentials" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// SPNEGO_OID is the OID for SPNEGO header type. -var SPNEGO_OID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 2} - -// SPNEGO header struct -type SPNEGO struct { - Init bool - Resp bool - NegTokenInit NegTokenInit - NegTokenResp NegTokenResp -} - -// Unmarshal SPNEGO negotiation token -func (s *SPNEGO) Unmarshal(b []byte) error { - var r []byte - var err error - if b[0] != byte(161) { - // Not a NegTokenResp/Targ could be a NegTokenInit - var oid asn1.ObjectIdentifier - r, err = asn1.UnmarshalWithParams(b, &oid, fmt.Sprintf("application,explicit,tag:%v", 0)) - if err != nil { - return fmt.Errorf("not a valid SPNEGO token: %v", err) - } - // Check the OID is the SPNEGO OID value - if !oid.Equal(SPNEGO_OID) { - return fmt.Errorf("OID %s does not match SPNEGO OID %s", oid.String(), SPNEGO_OID.String()) - } - } else { - // Could be a NegTokenResp/Targ - r = b - } - - var a asn1.RawValue - _, err = asn1.Unmarshal(r, &a) - if err != nil { - return fmt.Errorf("error unmarshalling SPNEGO: %v", err) - } - switch a.Tag { - case 0: - _, err = asn1.Unmarshal(a.Bytes, &s.NegTokenInit) - if err != nil { - return fmt.Errorf("error unmarshalling NegotiationToken type %d (Init): %v", a.Tag, err) - } - s.Init = true - case 1: - _, err = asn1.Unmarshal(a.Bytes, &s.NegTokenResp) - if err != nil { - return fmt.Errorf("error unmarshalling NegotiationToken type %d (Resp/Targ): %v", a.Tag, err) - } - s.Resp = true - default: - return errors.New("unknown choice type for NegotiationToken") - } - return nil -} - -// Marshal SPNEGO negotiation token -func (s *SPNEGO) Marshal() ([]byte, error) { - var b []byte - if s.Init { - hb, _ := asn1.Marshal(SPNEGO_OID) - tb, err := s.NegTokenInit.Marshal() - if err != nil { - return b, fmt.Errorf("could not marshal NegTokenInit: %v", err) - } - b = append(hb, tb...) - return asn1tools.AddASNAppTag(b, 0), nil - } - if s.Resp { - b, err := s.NegTokenResp.Marshal() - if err != nil { - return b, fmt.Errorf("could not marshal NegTokenResp: %v", err) - } - return b, nil - } - return b, errors.New("SPNEGO cannot be marshalled. It contains neither a NegTokenInit or NegTokenResp") -} - -// GetSPNEGOKrbNegTokenInit returns an SPNEGO struct containing a NegTokenInit. -func GetSPNEGOKrbNegTokenInit(creds credentials.Credentials, tkt messages.Ticket, sessionKey types.EncryptionKey) (SPNEGO, error) { - negTokenInit, err := NewNegTokenInitKrb5(creds, tkt, sessionKey) - if err != nil { - return SPNEGO{}, fmt.Errorf("could not create NegTokenInit: %v", err) - } - return SPNEGO{ - Init: true, - NegTokenInit: negTokenInit, - }, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/krb5Token.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/krb5Token.go deleted file mode 100644 index d5cb61bb6fb..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/gssapi/krb5Token.go +++ /dev/null @@ -1,202 +0,0 @@ -package gssapi - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/asn1tools" - "gopkg.in/jcmturner/gokrb5.v5/credentials" - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// GSSAPI MechToken IDs and flags. -const ( - TOK_ID_KRB_AP_REQ = "0100" - TOK_ID_KRB_AP_REP = "0200" - TOK_ID_KRB_ERROR = "0300" - - GSS_C_DELEG_FLAG = 1 - GSS_C_MUTUAL_FLAG = 2 - GSS_C_REPLAY_FLAG = 4 - GSS_C_SEQUENCE_FLAG = 8 - GSS_C_CONF_FLAG = 16 - GSS_C_INTEG_FLAG = 32 -) - -// MechToken implementation for GSSAPI. -type MechToken struct { - OID asn1.ObjectIdentifier - TokID []byte - APReq messages.APReq - APRep messages.APRep - KRBError messages.KRBError -} - -// Marshal a MechToken into a slice of bytes. -func (m *MechToken) Marshal() ([]byte, error) { - // Create the header - b, _ := asn1.Marshal(m.OID) - b = append(b, m.TokID...) - var tb []byte - var err error - switch hex.EncodeToString(m.TokID) { - case TOK_ID_KRB_AP_REQ: - tb, err = m.APReq.Marshal() - if err != nil { - return []byte{}, fmt.Errorf("error marshalling AP_REQ for MechToken: %v", err) - } - case TOK_ID_KRB_AP_REP: - return []byte{}, errors.New("marshal of AP_REP GSSAPI MechToken not supported by gokrb5") - case TOK_ID_KRB_ERROR: - return []byte{}, errors.New("marshal of KRB_ERROR GSSAPI MechToken not supported by gokrb5") - } - if err != nil { - return []byte{}, fmt.Errorf("error mashalling kerberos message within mech token: %v", err) - } - b = append(b, tb...) - return asn1tools.AddASNAppTag(b, 0), nil -} - -// Unmarshal a MechToken. -func (m *MechToken) Unmarshal(b []byte) error { - var oid asn1.ObjectIdentifier - r, err := asn1.UnmarshalWithParams(b, &oid, fmt.Sprintf("application,explicit,tag:%v", 0)) - if err != nil { - return fmt.Errorf("error unmarshalling MechToken OID: %v", err) - } - m.OID = oid - m.TokID = r[0:2] - switch hex.EncodeToString(m.TokID) { - case TOK_ID_KRB_AP_REQ: - var a messages.APReq - err = a.Unmarshal(r[2:]) - if err != nil { - return fmt.Errorf("error unmarshalling MechToken AP_REQ: %v", err) - } - m.APReq = a - case TOK_ID_KRB_AP_REP: - var a messages.APRep - err = a.Unmarshal(r[2:]) - if err != nil { - return fmt.Errorf("error unmarshalling MechToken AP_REP: %v", err) - } - m.APRep = a - case TOK_ID_KRB_ERROR: - var a messages.KRBError - err = a.Unmarshal(r[2:]) - if err != nil { - return fmt.Errorf("error unmarshalling MechToken KRBError: %v", err) - } - m.KRBError = a - } - return nil -} - -// IsAPReq tests if the MechToken contains an AP_REQ. -func (m *MechToken) IsAPReq() bool { - if hex.EncodeToString(m.TokID) == TOK_ID_KRB_AP_REQ { - return true - } - return false -} - -// IsAPRep tests if the MechToken contains an AP_REP. -func (m *MechToken) IsAPRep() bool { - if hex.EncodeToString(m.TokID) == TOK_ID_KRB_AP_REP { - return true - } - return false -} - -// IsKRBError tests if the MechToken contains an KRB_ERROR. -func (m *MechToken) IsKRBError() bool { - if hex.EncodeToString(m.TokID) == TOK_ID_KRB_ERROR { - return true - } - return false -} - -// NewAPREQMechToken creates new Kerberos AP_REQ MechToken. -func NewAPREQMechToken(creds credentials.Credentials, tkt messages.Ticket, sessionKey types.EncryptionKey, GSSAPIFlags []int, APOptions []int) (MechToken, error) { - var m MechToken - m.OID = MechTypeOIDKRB5 - tb, _ := hex.DecodeString(TOK_ID_KRB_AP_REQ) - m.TokID = tb - - auth, err := NewAuthenticator(creds, GSSAPIFlags) - if err != nil { - return m, err - } - APReq, err := messages.NewAPReq( - tkt, - sessionKey, - auth, - ) - if err != nil { - return m, err - } - for _, o := range APOptions { - types.SetFlag(&APReq.APOptions, o) - } - m.APReq = APReq - return m, nil -} - -// NewAuthenticator creates a new kerberos authenticator for kerberos MechToken -func NewAuthenticator(creds credentials.Credentials, flags []int) (types.Authenticator, error) { - //RFC 4121 Section 4.1.1 - auth, err := types.NewAuthenticator(creds.Realm, creds.CName) - if err != nil { - return auth, krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") - } - auth.Cksum = types.Checksum{ - CksumType: chksumtype.GSSAPI, - Checksum: newAuthenticatorChksum(flags), - } - return auth, nil -} - -// Create new authenticator checksum for kerberos MechToken -func newAuthenticatorChksum(flags []int) []byte { - a := make([]byte, 24) - binary.LittleEndian.PutUint32(a[:4], 16) - for _, i := range flags { - if i == GSS_C_DELEG_FLAG { - x := make([]byte, 28-len(a)) - a = append(a, x...) - } - f := binary.LittleEndian.Uint32(a[20:24]) - f |= uint32(i) - binary.LittleEndian.PutUint32(a[20:24], f) - } - return a -} - -/* -The authenticator checksum field SHALL have the following format: - -Octet Name Description ------------------------------------------------------------------ -0..3 Lgth Number of octets in Bnd field; Represented - in little-endian order; Currently contains - hex value 10 00 00 00 (16). -4..19 Bnd Channel binding information, as described in - section 4.1.1.2. -20..23 Flags Four-octet context-establishment flags in - little-endian order as described in section - 4.1.1.1. -24..25 DlgOpt The delegation option identifier (=1) in - little-endian order [optional]. This field - and the next two fields are present if and - only if GSS_C_DELEG_FLAG is set as described - in section 4.1.1.1. -26..27 Dlgth The length of the Deleg field in little-endian order [optional]. -28..(n-1) Deleg A KRB_CRED message (n = Dlgth + 28) [optional]. -n..last Exts Extensions [optional]. -*/ diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/addrtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/addrtype/constants.go deleted file mode 100644 index 457b89d7ace..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/addrtype/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package addrtype provides Address type assigned numbers. -package addrtype - -// Address type IDs. -const ( - IPv4 int32 = 2 - Directional int32 = 3 - ChaosNet int32 = 5 - XNS int32 = 6 - ISO int32 = 7 - DECNETPhaseIV int32 = 12 - AppleTalkDDP int32 = 16 - NetBios int32 = 20 - IPv6 int32 = 24 -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/adtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/adtype/constants.go deleted file mode 100644 index e805b746660..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/adtype/constants.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package adtype provides Authenticator type assigned numbers. -package adtype - -// Authenticator type IDs. -const ( - ADIfRelevant int32 = 1 - ADIntendedForServer int32 = 2 - ADIntendedForApplicationClass int32 = 3 - ADKDCIssued int32 = 4 - ADAndOr int32 = 5 - ADMandatoryTicketExtensions int32 = 6 - ADInTicketExtensions int32 = 7 - ADMandatoryForKDC int32 = 8 - OSFDCE int32 = 64 - SESAME int32 = 65 - ADOSFDCEPKICertID int32 = 66 - ADAuthenticationStrength int32 = 70 - ADFXFastArmor int32 = 71 - ADFXFastUsed int32 = 72 - ADWin2KPAC int32 = 128 - ADEtypeNegotiation int32 = 129 - //Reserved values 9-63 -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag/constants.go deleted file mode 100644 index d74cd60e93e..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag/constants.go +++ /dev/null @@ -1,24 +0,0 @@ -// Package asnAppTag provides ASN1 application tag numbers. -package asnAppTag - -// ASN1 application tag numbers. -const ( - Ticket = 1 - Authenticator = 2 - EncTicketPart = 3 - ASREQ = 10 - TGSREQ = 12 - ASREP = 11 - TGSREP = 13 - APREQ = 14 - APREP = 15 - KRBSafe = 20 - KRBPriv = 21 - KRBCred = 22 - EncASRepPart = 25 - EncTGSRepPart = 26 - EncAPRepPart = 27 - EncKrbPrivPart = 28 - EncKrbCredPart = 29 - KRBError = 30 -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype/constants.go deleted file mode 100644 index 93db952dda1..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype/constants.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package chksumtype provides Kerberos 5 checksum type assigned numbers. -package chksumtype - -// Checksum type IDs. -const ( - //RESERVED : 0 - CRC32 int32 = 1 - RSA_MD4 int32 = 2 - RSA_MD4_DES int32 = 3 - DES_MAC int32 = 4 - DES_MAC_K int32 = 5 - RSA_MD4_DES_K int32 = 6 - RSA_MD5 int32 = 7 - RSA_MD5_DES int32 = 8 - RSA_MD5_DES3 int32 = 9 - SHA1_ID10 int32 = 10 - //UNASSIGNED : 11 - HMAC_SHA1_DES3_KD int32 = 12 - HMAC_SHA1_DES3 int32 = 13 - SHA1_ID14 int32 = 14 - HMAC_SHA1_96_AES128 int32 = 15 - HMAC_SHA1_96_AES256 int32 = 16 - CMAC_CAMELLIA128 int32 = 17 - CMAC_CAMELLIA256 int32 = 18 - HMAC_SHA256_128_AES128 int32 = 19 - HMAC_SHA384_192_AES256 int32 = 20 - //UNASSIGNED : 21-32770 - GSSAPI int32 = 32771 - //UNASSIGNED : 32772-2147483647 - KERB_CHECKSUM_HMAC_MD5_UNSIGNED uint32 = 4294967158 // 0xFFFFFF76 documentation says this is -138 but in an unsigned int this is 4294967158 - KERB_CHECKSUM_HMAC_MD5 int32 = -138 -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/constants.go deleted file mode 100644 index 0b8e916d5b6..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/constants.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package iana provides Kerberos 5 assigned numbers. -package iana - -// PVNO is the Protocol Version Number. -const PVNO = 5 diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/errorcode/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/errorcode/constants.go deleted file mode 100644 index fd756bc5e36..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/errorcode/constants.go +++ /dev/null @@ -1,155 +0,0 @@ -// Package errorcode provides Kerberos 5 assigned error codes. -package errorcode - -import "fmt" - -// Kerberos error codes. -const ( - KDC_ERR_NONE int32 = 0 //No error - KDC_ERR_NAME_EXP int32 = 1 //Client's entry in database has expired - KDC_ERR_SERVICE_EXP int32 = 2 //Server's entry in database has expired - KDC_ERR_BAD_PVNO int32 = 3 //Requested protocol version number not supported - KDC_ERR_C_OLD_MAST_KVNO int32 = 4 //Client's key encrypted in old master key - KDC_ERR_S_OLD_MAST_KVNO int32 = 5 //Server's key encrypted in old master key - KDC_ERR_C_PRINCIPAL_UNKNOWN int32 = 6 //Client not found in Kerberos database - KDC_ERR_S_PRINCIPAL_UNKNOWN int32 = 7 //Server not found in Kerberos database - KDC_ERR_PRINCIPAL_NOT_UNIQUE int32 = 8 //Multiple principal entries in database - KDC_ERR_NULL_KEY int32 = 9 //The client or server has a null key - KDC_ERR_CANNOT_POSTDATE int32 = 10 //Ticket not eligible for postdating - KDC_ERR_NEVER_VALID int32 = 11 //Requested starttime is later than end time - KDC_ERR_POLICY int32 = 12 //KDC policy rejects request - KDC_ERR_BADOPTION int32 = 13 //KDC cannot accommodate requested option - KDC_ERR_ETYPE_NOSUPP int32 = 14 //KDC has no support for encryption type - KDC_ERR_SUMTYPE_NOSUPP int32 = 15 //KDC has no support for checksum type - KDC_ERR_PADATA_TYPE_NOSUPP int32 = 16 //KDC has no support for padata type - KDC_ERR_TRTYPE_NOSUPP int32 = 17 //KDC has no support for transited type - KDC_ERR_CLIENT_REVOKED int32 = 18 //Clients credentials have been revoked - KDC_ERR_SERVICE_REVOKED int32 = 19 //Credentials for server have been revoked - KDC_ERR_TGT_REVOKED int32 = 20 //TGT has been revoked - KDC_ERR_CLIENT_NOTYET int32 = 21 //Client not yet valid; try again later - KDC_ERR_SERVICE_NOTYET int32 = 22 //Server not yet valid; try again later - KDC_ERR_KEY_EXPIRED int32 = 23 //Password has expired; change password to reset - KDC_ERR_PREAUTH_FAILED int32 = 24 //Pre-authentication information was invalid - KDC_ERR_PREAUTH_REQUIRED int32 = 25 //Additional pre-authentication required - KDC_ERR_SERVER_NOMATCH int32 = 26 //Requested server and ticket don't match - KDC_ERR_MUST_USE_USER2USER int32 = 27 //Server principal valid for user2user only - KDC_ERR_PATH_NOT_ACCEPTED int32 = 28 //KDC Policy rejects transited path - KDC_ERR_SVC_UNAVAILABLE int32 = 29 //A service is not available - KRB_AP_ERR_BAD_INTEGRITY int32 = 31 //Integrity check on decrypted field failed - KRB_AP_ERR_TKT_EXPIRED int32 = 32 //Ticket expired - KRB_AP_ERR_TKT_NYV int32 = 33 //Ticket not yet valid - KRB_AP_ERR_REPEAT int32 = 34 //Request is a replay - KRB_AP_ERR_NOT_US int32 = 35 //The ticket isn't for us - KRB_AP_ERR_BADMATCH int32 = 36 //Ticket and authenticator don't match - KRB_AP_ERR_SKEW int32 = 37 //Clock skew too great - KRB_AP_ERR_BADADDR int32 = 38 //Incorrect net address - KRB_AP_ERR_BADVERSION int32 = 39 //Protocol version mismatch - KRB_AP_ERR_MSG_TYPE int32 = 40 //Invalid msg type - KRB_AP_ERR_MODIFIED int32 = 41 //Message stream modified - KRB_AP_ERR_BADORDER int32 = 42 //Message out of order - KRB_AP_ERR_BADKEYVER int32 = 44 //Specified version of key is not available - KRB_AP_ERR_NOKEY int32 = 45 //Service key not available - KRB_AP_ERR_MUT_FAIL int32 = 46 //Mutual authentication failed - KRB_AP_ERR_BADDIRECTION int32 = 47 //Incorrect message direction - KRB_AP_ERR_METHOD int32 = 48 //Alternative authentication method required - KRB_AP_ERR_BADSEQ int32 = 49 //Incorrect sequence number in message - KRB_AP_ERR_INAPP_CKSUM int32 = 50 //Inappropriate type of checksum in message - KRB_AP_PATH_NOT_ACCEPTED int32 = 51 //Policy rejects transited path - KRB_ERR_RESPONSE_TOO_BIG int32 = 52 //Response too big for UDP; retry with TCP - KRB_ERR_GENERIC int32 = 60 //Generic error (description in e-text) - KRB_ERR_FIELD_TOOLONG int32 = 61 //Field is too long for this implementation - KDC_ERROR_CLIENT_NOT_TRUSTED int32 = 62 //Reserved for PKINIT - KDC_ERROR_KDC_NOT_TRUSTED int32 = 63 //Reserved for PKINIT - KDC_ERROR_INVALID_SIG int32 = 64 //Reserved for PKINIT - KDC_ERR_KEY_TOO_WEAK int32 = 65 //Reserved for PKINIT - KDC_ERR_CERTIFICATE_MISMATCH int32 = 66 //Reserved for PKINIT - KRB_AP_ERR_NO_TGT int32 = 67 //No TGT available to validate USER-TO-USER - KDC_ERR_WRONG_REALM int32 = 68 //Reserved for future use - KRB_AP_ERR_USER_TO_USER_REQUIRED int32 = 69 //Ticket must be for USER-TO-USER - KDC_ERR_CANT_VERIFY_CERTIFICATE int32 = 70 //Reserved for PKINIT - KDC_ERR_INVALID_CERTIFICATE int32 = 71 //Reserved for PKINIT - KDC_ERR_REVOKED_CERTIFICATE int32 = 72 //Reserved for PKINIT - KDC_ERR_REVOCATION_STATUS_UNKNOWN int32 = 73 //Reserved for PKINIT - KDC_ERR_REVOCATION_STATUS_UNAVAILABLE int32 = 74 //Reserved for PKINIT - KDC_ERR_CLIENT_NAME_MISMATCH int32 = 75 //Reserved for PKINIT - KDC_ERR_KDC_NAME_MISMATCH int32 = 76 //Reserved for PKINIT -) - -// Lookup an error code description. -func Lookup(i int32) string { - if s, ok := errorcodeLookup[i]; ok { - return fmt.Sprintf("(%d) %s", i, s) - } - return fmt.Sprintf("Unknown ErrorCode %d", i) -} - -var errorcodeLookup = map[int32]string{ - KDC_ERR_NONE: "KDC_ERR_NONE No error", - KDC_ERR_NAME_EXP: "KDC_ERR_NAME_EXP Client's entry in database has expired", - KDC_ERR_SERVICE_EXP: "KDC_ERR_SERVICE_EXP Server's entry in database has expired", - KDC_ERR_BAD_PVNO: "KDC_ERR_BAD_PVNO Requested protocol version number not supported", - KDC_ERR_C_OLD_MAST_KVNO: "KDC_ERR_C_OLD_MAST_KVNO Client's key encrypted in old master key", - KDC_ERR_S_OLD_MAST_KVNO: "KDC_ERR_S_OLD_MAST_KVNO Server's key encrypted in old master key", - KDC_ERR_C_PRINCIPAL_UNKNOWN: "KDC_ERR_C_PRINCIPAL_UNKNOWN Client not found in Kerberos database", - KDC_ERR_S_PRINCIPAL_UNKNOWN: "KDC_ERR_S_PRINCIPAL_UNKNOWN Server not found in Kerberos database", - KDC_ERR_PRINCIPAL_NOT_UNIQUE: "KDC_ERR_PRINCIPAL_NOT_UNIQUE Multiple principal entries in database", - KDC_ERR_NULL_KEY: "KDC_ERR_NULL_KEY The client or server has a null key", - KDC_ERR_CANNOT_POSTDATE: "KDC_ERR_CANNOT_POSTDATE Ticket not eligible for postdating", - KDC_ERR_NEVER_VALID: "KDC_ERR_NEVER_VALID Requested starttime is later than end time", - KDC_ERR_POLICY: "KDC_ERR_POLICY KDC policy rejects request", - KDC_ERR_BADOPTION: "KDC_ERR_BADOPTION KDC cannot accommodate requested option", - KDC_ERR_ETYPE_NOSUPP: "KDC_ERR_ETYPE_NOSUPP KDC has no support for encryption type", - KDC_ERR_SUMTYPE_NOSUPP: "KDC_ERR_SUMTYPE_NOSUPP KDC has no support for checksum type", - KDC_ERR_PADATA_TYPE_NOSUPP: "KDC_ERR_PADATA_TYPE_NOSUPP KDC has no support for padata type", - KDC_ERR_TRTYPE_NOSUPP: "KDC_ERR_TRTYPE_NOSUPP KDC has no support for transited type", - KDC_ERR_CLIENT_REVOKED: "KDC_ERR_CLIENT_REVOKED Clients credentials have been revoked", - KDC_ERR_SERVICE_REVOKED: "KDC_ERR_SERVICE_REVOKED Credentials for server have been revoked", - KDC_ERR_TGT_REVOKED: "KDC_ERR_TGT_REVOKED TGT has been revoked", - KDC_ERR_CLIENT_NOTYET: "KDC_ERR_CLIENT_NOTYET Client not yet valid; try again later", - KDC_ERR_SERVICE_NOTYET: "KDC_ERR_SERVICE_NOTYET Server not yet valid; try again later", - KDC_ERR_KEY_EXPIRED: "KDC_ERR_KEY_EXPIRED Password has expired; change password to reset", - KDC_ERR_PREAUTH_FAILED: "KDC_ERR_PREAUTH_FAILED Pre-authentication information was invalid", - KDC_ERR_PREAUTH_REQUIRED: "KDC_ERR_PREAUTH_REQUIRED Additional pre-authentication required", - KDC_ERR_SERVER_NOMATCH: "KDC_ERR_SERVER_NOMATCH Requested server and ticket don't match", - KDC_ERR_MUST_USE_USER2USER: "KDC_ERR_MUST_USE_USER2USER Server principal valid for user2user only", - KDC_ERR_PATH_NOT_ACCEPTED: "KDC_ERR_PATH_NOT_ACCEPTED KDC Policy rejects transited path", - KDC_ERR_SVC_UNAVAILABLE: "KDC_ERR_SVC_UNAVAILABLE A service is not available", - KRB_AP_ERR_BAD_INTEGRITY: "KRB_AP_ERR_BAD_INTEGRITY Integrity check on decrypted field failed", - KRB_AP_ERR_TKT_EXPIRED: "KRB_AP_ERR_TKT_EXPIRED Ticket expired", - KRB_AP_ERR_TKT_NYV: "KRB_AP_ERR_TKT_NYV Ticket not yet valid", - KRB_AP_ERR_REPEAT: "KRB_AP_ERR_REPEAT Request is a replay", - KRB_AP_ERR_NOT_US: "KRB_AP_ERR_NOT_US The ticket isn't for us", - KRB_AP_ERR_BADMATCH: "KRB_AP_ERR_BADMATCH Ticket and authenticator don't match", - KRB_AP_ERR_SKEW: "KRB_AP_ERR_SKEW Clock skew too great", - KRB_AP_ERR_BADADDR: "KRB_AP_ERR_BADADDR Incorrect net address", - KRB_AP_ERR_BADVERSION: "KRB_AP_ERR_BADVERSION Protocol version mismatch", - KRB_AP_ERR_MSG_TYPE: "KRB_AP_ERR_MSG_TYPE Invalid msg type", - KRB_AP_ERR_MODIFIED: "KRB_AP_ERR_MODIFIED Message stream modified", - KRB_AP_ERR_BADORDER: "KRB_AP_ERR_BADORDER Message out of order", - KRB_AP_ERR_BADKEYVER: "KRB_AP_ERR_BADKEYVER Specified version of key is not available", - KRB_AP_ERR_NOKEY: "KRB_AP_ERR_NOKEY Service key not available", - KRB_AP_ERR_MUT_FAIL: "KRB_AP_ERR_MUT_FAIL Mutual authentication failed", - KRB_AP_ERR_BADDIRECTION: "KRB_AP_ERR_BADDIRECTION Incorrect message direction", - KRB_AP_ERR_METHOD: "KRB_AP_ERR_METHOD Alternative authentication method required", - KRB_AP_ERR_BADSEQ: "KRB_AP_ERR_BADSEQ Incorrect sequence number in message", - KRB_AP_ERR_INAPP_CKSUM: "KRB_AP_ERR_INAPP_CKSUM Inappropriate type of checksum in message", - KRB_AP_PATH_NOT_ACCEPTED: "KRB_AP_PATH_NOT_ACCEPTED Policy rejects transited path", - KRB_ERR_RESPONSE_TOO_BIG: "KRB_ERR_RESPONSE_TOO_BIG Response too big for UDP; retry with TCP", - KRB_ERR_GENERIC: "KRB_ERR_GENERIC Generic error (description in e-text)", - KRB_ERR_FIELD_TOOLONG: "KRB_ERR_FIELD_TOOLONG Field is too long for this implementation", - KDC_ERROR_CLIENT_NOT_TRUSTED: "KDC_ERROR_CLIENT_NOT_TRUSTED Reserved for PKINIT", - KDC_ERROR_KDC_NOT_TRUSTED: "KDC_ERROR_KDC_NOT_TRUSTED Reserved for PKINIT", - KDC_ERROR_INVALID_SIG: "KDC_ERROR_INVALID_SIG Reserved for PKINIT", - KDC_ERR_KEY_TOO_WEAK: "KDC_ERR_KEY_TOO_WEAK Reserved for PKINIT", - KDC_ERR_CERTIFICATE_MISMATCH: "KDC_ERR_CERTIFICATE_MISMATCH Reserved for PKINIT", - KRB_AP_ERR_NO_TGT: "KRB_AP_ERR_NO_TGT No TGT available to validate USER-TO-USER", - KDC_ERR_WRONG_REALM: "KDC_ERR_WRONG_REALM Reserved for future use", - KRB_AP_ERR_USER_TO_USER_REQUIRED: "KRB_AP_ERR_USER_TO_USER_REQUIRED Ticket must be for USER-TO-USER", - KDC_ERR_CANT_VERIFY_CERTIFICATE: "KDC_ERR_CANT_VERIFY_CERTIFICATE Reserved for PKINIT", - KDC_ERR_INVALID_CERTIFICATE: "KDC_ERR_INVALID_CERTIFICATE Reserved for PKINIT", - KDC_ERR_REVOKED_CERTIFICATE: "KDC_ERR_REVOKED_CERTIFICATE Reserved for PKINIT", - KDC_ERR_REVOCATION_STATUS_UNKNOWN: "KDC_ERR_REVOCATION_STATUS_UNKNOWN Reserved for PKINIT", - KDC_ERR_REVOCATION_STATUS_UNAVAILABLE: "KDC_ERR_REVOCATION_STATUS_UNAVAILABLE Reserved for PKINIT", - KDC_ERR_CLIENT_NAME_MISMATCH: "KDC_ERR_CLIENT_NAME_MISMATCH Reserved for PKINIT", - KDC_ERR_KDC_NAME_MISMATCH: "KDC_ERR_KDC_NAME_MISMATCH Reserved for PKINIT", -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/etypeID/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/etypeID/constants.go deleted file mode 100644 index 65cd72fbe07..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/etypeID/constants.go +++ /dev/null @@ -1,101 +0,0 @@ -// Package etypeID provides Kerberos 5 encryption type assigned numbers. -package etypeID - -// Kerberos encryption type assigned numbers. -const ( - //RESERVED : 0 - DES_CBC_CRC int32 = 1 - DES_CBC_MD4 int32 = 2 - DES_CBC_MD5 int32 = 3 - DES_CBC_RAW int32 = 4 - DES3_CBC_MD5 int32 = 5 - DES3_CBC_RAW int32 = 6 - DES3_CBC_SHA1 int32 = 7 - DES_HMAC_SHA1 int32 = 8 - DSAWITHSHA1_CMSOID int32 = 9 - MD5WITHRSAENCRYPTION_CMSOID int32 = 10 - SHA1WITHRSAENCRYPTION_CMSOID int32 = 11 - RC2CBC_ENVOID int32 = 12 - RSAENCRYPTION_ENVOID int32 = 13 - RSAES_OAEP_ENV_OID int32 = 14 - DES_EDE3_CBC_ENV_OID int32 = 15 - DES3_CBC_SHA1_KD int32 = 16 - AES128_CTS_HMAC_SHA1_96 int32 = 17 - AES256_CTS_HMAC_SHA1_96 int32 = 18 - AES128_CTS_HMAC_SHA256_128 int32 = 19 - AES256_CTS_HMAC_SHA384_192 int32 = 20 - //UNASSIGNED : 21-22 - RC4_HMAC int32 = 23 - RC4_HMAC_EXP int32 = 24 - CAMELLIA128_CTS_CMAC int32 = 25 - CAMELLIA256_CTS_CMAC int32 = 26 - //UNASSIGNED : 27-64 - SUBKEY_KEYMATERIAL int32 = 65 - //UNASSIGNED : 66-2147483647 -) - -// ETypesByName is a map of EncType names to their assigned EncType number. -var ETypesByName = map[string]int32{ - "des-cbc-crc": DES_CBC_CRC, - "des-cbc-md4": DES_CBC_MD4, - "des-cbc-md5": DES_CBC_MD5, - "des-cbc-raw": DES_CBC_RAW, - "des3-cbc-md5": DES3_CBC_MD5, - "des3-cbc-raw": DES3_CBC_RAW, - "des3-cbc-sha1": DES3_CBC_SHA1, - "des3-hmac-sha1": DES_HMAC_SHA1, - "des3-cbc-sha1-kd": DES3_CBC_SHA1_KD, - "des-hmac-sha1": DES_HMAC_SHA1, - "dsaWithSHA1-CmsOID": DSAWITHSHA1_CMSOID, - "md5WithRSAEncryption-CmsOID": MD5WITHRSAENCRYPTION_CMSOID, - "sha1WithRSAEncryption-CmsOID": SHA1WITHRSAENCRYPTION_CMSOID, - "rc2CBC-EnvOID": RC2CBC_ENVOID, - "rsaEncryption-EnvOID": RSAENCRYPTION_ENVOID, - "rsaES-OAEP-ENV-OID": RSAES_OAEP_ENV_OID, - "des-ede3-cbc-Env-OID": DES_EDE3_CBC_ENV_OID, - "aes128-cts-hmac-sha1-96": AES128_CTS_HMAC_SHA1_96, - "aes128-cts": AES128_CTS_HMAC_SHA1_96, - "aes128-sha1": AES128_CTS_HMAC_SHA1_96, - "aes256-cts-hmac-sha1-96": AES256_CTS_HMAC_SHA1_96, - "aes256-cts": AES128_CTS_HMAC_SHA1_96, - "aes256-sha1": AES128_CTS_HMAC_SHA1_96, - "aes128-cts-hmac-sha256-128": AES128_CTS_HMAC_SHA256_128, - "aes128-sha2": AES128_CTS_HMAC_SHA256_128, - "aes256-cts-hmac-sha384-192": AES256_CTS_HMAC_SHA384_192, - "aes256-sha2": AES256_CTS_HMAC_SHA384_192, - "arcfour-hmac": RC4_HMAC, - "rc4-hmac": RC4_HMAC, - "arcfour-hmac-md5": RC4_HMAC, - "arcfour-hmac-exp": RC4_HMAC_EXP, - "rc4-hmac-exp": RC4_HMAC_EXP, - "arcfour-hmac-md5-exp": RC4_HMAC_EXP, - "camellia128-cts-cmac": CAMELLIA128_CTS_CMAC, - "camellia128-cts": CAMELLIA128_CTS_CMAC, - "camellia256-cts-cmac": CAMELLIA256_CTS_CMAC, - "camellia256-cts": CAMELLIA256_CTS_CMAC, - "subkey-keymaterial": SUBKEY_KEYMATERIAL, -} - -// EtypeSupported resolves the etype name string to the etype ID. -// If zero is returned the etype is not supported by gokrb5. -func EtypeSupported(etype string) int32 { - // Slice of supported enctype IDs - s := []int32{ - AES128_CTS_HMAC_SHA1_96, - AES256_CTS_HMAC_SHA1_96, - AES128_CTS_HMAC_SHA256_128, - AES256_CTS_HMAC_SHA384_192, - DES3_CBC_SHA1_KD, - RC4_HMAC, - } - id := ETypesByName[etype] - if id == 0 { - return id - } - for _, sid := range s { - if id == sid { - return id - } - } - return 0 -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/flags/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/flags/constants.go deleted file mode 100644 index 67a1fa752ae..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/flags/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -// Package flags provides Kerberos 5 flag assigned numbers. -package flags - -// Flag values for KRB5 messages and tickets. -const ( - Reserved = 0 - Forwardable = 1 - Forwarded = 2 - Proxiable = 3 - Proxy = 4 - AllowPostDate = 5 - MayPostDate = 5 - PostDated = 6 - Invalid = 7 - Renewable = 8 - Initial = 9 - PreAuthent = 10 - HWAuthent = 11 - OptHardwareAuth = 11 - RequestAnonymous = 12 - TransitedPolicyChecked = 12 - OKAsDelegate = 13 - EncPARep = 15 - Canonicalize = 15 - DisableTransitedCheck = 26 - RenewableOK = 27 - EncTktInSkey = 28 - Renew = 30 - Validate = 31 -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/keyusage/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/keyusage/constants.go deleted file mode 100644 index 5b232d1d40d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/keyusage/constants.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package keyusage provides Kerberos 5 key usage assigned numbers. -package keyusage - -// Key usage numbers. -const ( - AS_REQ_PA_ENC_TIMESTAMP = 1 - KDC_REP_TICKET = 2 - AS_REP_ENCPART = 3 - TGS_REQ_KDC_REQ_BODY_AUTHDATA_SESSION_KEY = 4 - TGS_REQ_KDC_REQ_BODY_AUTHDATA_SUB_KEY = 5 - TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM = 6 - TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR = 7 - TGS_REP_ENCPART_SESSION_KEY = 8 - TGS_REP_ENCPART_AUTHENTICATOR_SUB_KEY = 9 - AP_REQ_AUTHENTICATOR_CHKSUM = 10 - AP_REQ_AUTHENTICATOR = 11 - AP_REP_ENCPART = 12 - KRB_PRIV_ENCPART = 13 - KRB_CRED_ENCPART = 14 - KRB_SAFE_CHKSUM = 15 - KERB_NON_KERB_SALT = 16 - KERB_NON_KERB_CKSUM_SALT = 17 - //18. Reserved for future use in Kerberos and related protocols. - AD_KDC_ISSUED_CHKSUM = 19 - //20-21. Reserved for future use in Kerberos and related protocols. - GSSAPI_ACCEPTOR_SEAL = 22 - GSSAPI_ACCEPTOR_SIGN = 23 - GSSAPI_INITIATOR_SEAL = 24 - GSSAPI_INITIATOR_SIGN = 25 - KEY_USAGE_FAST_REQ_CHKSUM = 50 - KEY_USAGE_FAST_ENC = 51 - KEY_USAGE_FAST_REP = 52 - KEY_USAGE_FAST_FINISHED = 53 - KEY_USAGE_ENC_CHALLENGE_CLIENT = 54 - KEY_USAGE_ENC_CHALLENGE_KDC = 55 - KEY_USAGE_AS_REQ = 56 - //26-511. Reserved for future use in Kerberos and related protocols. - //512-1023. Reserved for uses internal to a Kerberos implementation. - //1024. Encryption for application use in protocols that do not specify key usage values - //1025. Checksums for application use in protocols that do not specify key usage values - //1026-2047. Reserved for application use. -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/msgtype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/msgtype/constants.go deleted file mode 100644 index ad21810b675..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/msgtype/constants.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package msgtype provides Kerberos 5 message type assigned numbers. -package msgtype - -// KRB message type IDs. -const ( - KRB_AS_REQ = 10 //Request for initial authentication - KRB_AS_REP = 11 //Response to KRB_AS_REQ request - KRB_TGS_REQ = 12 //Request for authentication based on TGT - KRB_TGS_REP = 13 //Response to KRB_TGS_REQ request - KRB_AP_REQ = 14 //Application request to server - KRB_AP_REP = 15 //Response to KRB_AP_REQ_MUTUAL - KRB_RESERVED16 = 16 //Reserved for user-to-user krb_tgt_request - KRB_RESERVED17 = 17 //Reserved for user-to-user krb_tgt_reply - KRB_SAFE = 20 // Safe (checksummed) application message - KRB_PRIV = 21 // Private (encrypted) application message - KRB_CRED = 22 //Private (encrypted) message to forward credentials - KRB_ERROR = 30 //Error response -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/nametype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/nametype/constants.go deleted file mode 100644 index c111a05f926..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/nametype/constants.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package nametype provides Kerberos 5 principal name type numbers. -package nametype - -// Kerberos name type IDs. -const ( - KRB_NT_UNKNOWN int32 = 0 //Name type not known - KRB_NT_PRINCIPAL int32 = 1 //Just the name of the principal as in DCE, or for users - KRB_NT_SRV_INST int32 = 2 //Service and other unique instance (krbtgt) - KRB_NT_SRV_HST int32 = 3 //Service with host name as instance (telnet, rcommands) - KRB_NT_SRV_XHST int32 = 4 //Service with host as remaining components - KRB_NT_UID int32 = 5 //Unique ID - KRB_NT_X500_PRINCIPAL int32 = 6 //Encoded X.509 Distinguished name [RFC2253] - KRB_NT_SMTP_NAME int32 = 7 //Name in form of SMTP email name (e.g., user@example.com) - KRB_NT_ENTERPRISE int32 = 10 //Enterprise name; may be mapped to principal name -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/patype/constants.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/patype/constants.go deleted file mode 100644 index aa04f63765c..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/iana/patype/constants.go +++ /dev/null @@ -1,77 +0,0 @@ -// Package patype provides Kerberos 5 pre-authentication type assigned numbers. -package patype - -// Kerberos pre-authentication type assigned numbers. -const ( - PA_TGS_REQ int32 = 1 - PA_ENC_TIMESTAMP int32 = 2 - PA_PW_SALT int32 = 3 - //RESERVED : 4 - PA_ENC_UNIX_TIME int32 = 5 - PA_SANDIA_SECUREID int32 = 6 - PA_SESAME int32 = 7 - PA_OSF_DCE int32 = 8 - PA_CYBERSAFE_SECUREID int32 = 9 - PA_AFS3_SALT int32 = 10 - PA_ETYPE_INFO int32 = 11 - PA_SAM_CHALLENGE int32 = 12 - PA_SAM_RESPONSE int32 = 13 - PA_PK_AS_REQ_OLD int32 = 14 - PA_PK_AS_REP_OLD int32 = 15 - PA_PK_AS_REQ int32 = 16 - PA_PK_AS_REP int32 = 17 - PA_PK_OCSP_RESPONSE int32 = 18 - PA_ETYPE_INFO2 int32 = 19 - PA_USE_SPECIFIED_KVNO int32 = 20 - PA_SVR_REFERRAL_INFO int32 = 20 - PA_SAM_REDIRECT int32 = 21 - PA_GET_FROM_TYPED_DATA int32 = 22 - TD_PADATA int32 = 22 - PA_SAM_ETYPE_INFO int32 = 23 - PA_ALT_PRINC int32 = 24 - PA_SERVER_REFERRAL int32 = 25 - //UNASSIGNED : 26-29 - PA_SAM_CHALLENGE2 int32 = 30 - PA_SAM_RESPONSE2 int32 = 31 - //UNASSIGNED : 32-40 - PA_EXTRA_TGT int32 = 41 - //UNASSIGNED : 42-100 - TD_PKINIT_CMS_CERTIFICATES int32 = 101 - TD_KRB_PRINCIPAL int32 = 102 - TD_KRB_REALM int32 = 103 - TD_TRUSTED_CERTIFIERS int32 = 104 - TD_CERTIFICATE_INDEX int32 = 105 - TD_APP_DEFINED_ERROR int32 = 106 - TD_REQ_NONCE int32 = 107 - TD_REQ_SEQ int32 = 108 - TD_DH_PARAMETERS int32 = 109 - //UNASSIGNED : 110 - TD_CMS_DIGEST_ALGORITHMS int32 = 111 - TD_CERT_DIGEST_ALGORITHMS int32 = 112 - //UNASSIGNED : 113-127 - PA_PAC_REQUEST int32 = 128 - PA_FOR_USER int32 = 129 - PA_FOR_X509_USER int32 = 130 - PA_FOR_CHECK_DUPS int32 = 131 - PA_AS_CHECKSUM int32 = 132 - PA_FX_COOKIE int32 = 133 - PA_AUTHENTICATION_SET int32 = 134 - PA_AUTH_SET_SELECTED int32 = 135 - PA_FX_FAST int32 = 136 - PA_FX_ERROR int32 = 137 - PA_ENCRYPTED_CHALLENGE int32 = 138 - //UNASSIGNED : 139-140 - PA_OTP_CHALLENGE int32 = 141 - PA_OTP_REQUEST int32 = 142 - PA_OTP_CONFIRM int32 = 143 - PA_OTP_PIN_CHANGE int32 = 144 - PA_EPAK_AS_REQ int32 = 145 - PA_EPAK_AS_REP int32 = 146 - PA_PKINIT_KX int32 = 147 - PA_PKU2U_NAME int32 = 148 - PA_REQ_ENC_PA_REP int32 = 149 - PA_AS_FRESHNESS int32 = 150 - //UNASSIGNED : 151-164 - PA_SUPPORTED_ETYPES int32 = 165 - PA_EXTENDED_ERROR int32 = 166 -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/changepasswddata.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/changepasswddata.go deleted file mode 100644 index f48cb388c49..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/changepasswddata.go +++ /dev/null @@ -1,23 +0,0 @@ -package kadmin - -import ( - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// ChangePasswdData is the payload to a password change message. -type ChangePasswdData struct { - NewPasswd []byte `asn1:"explicit,tag:0"` - TargName types.PrincipalName `asn1:"explicit,optional,tag:1"` - TargRealm string `asn1:"generalstring,optional,explicit,tag:2"` -} - -// Marshal ChangePasswdData into a byte slice. -func (c *ChangePasswdData) Marshal() ([]byte, error) { - b, err := asn1.Marshal(*c) - if err != nil { - return []byte{}, err - } - //b = asn1tools.AddASNAppTag(b, asnAppTag.) - return b, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/message.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/message.go deleted file mode 100644 index 13104a5d26d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/message.go +++ /dev/null @@ -1,114 +0,0 @@ -package kadmin - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -const ( - verisonHex = "ff80" -) - -// Request message for changing password. -type Request struct { - APREQ messages.APReq - KRBPriv messages.KRBPriv -} - -// Reply message for a password change. -type Reply struct { - MessageLength int - Version int - APREPLength int - APREP messages.APRep - KRBPriv messages.KRBPriv - KRBError messages.KRBError - IsKRBError bool - ResultCode uint16 - Result string -} - -// Marshal a Request into a byte slice. -func (m *Request) Marshal() (b []byte, err error) { - b = []byte{255, 128} // protocol version number: contains the hex constant 0xff80 (big-endian integer). - ab, e := m.APREQ.Marshal() - if e != nil { - err = fmt.Errorf("error marshaling AP_REQ: %v", e) - return - } - if len(ab) > math.MaxUint16 { - err = errors.New("length of AP_REQ greater then max Uint16 size") - return - } - al := make([]byte, 2) - binary.BigEndian.PutUint16(al, uint16(len(ab))) - b = append(b, al...) - b = append(b, ab...) - pb, e := m.KRBPriv.Marshal() - if e != nil { - err = fmt.Errorf("error marshaling KRB_Priv: %v", e) - return - } - b = append(b, pb...) - if len(b)+2 > math.MaxUint16 { - err = errors.New("length of message greater then max Uint16 size") - return - } - ml := make([]byte, 2) - binary.BigEndian.PutUint16(ml, uint16(len(b)+2)) - b = append(ml, b...) - return -} - -// Unmarshal a byte slice into a Reply. -func (m *Reply) Unmarshal(b []byte) error { - m.MessageLength = int(binary.BigEndian.Uint16(b[0:2])) - m.Version = int(binary.BigEndian.Uint16(b[2:4])) - if m.Version != 1 { - return fmt.Errorf("kadmin reply has incorrect protocol version number: %d", m.Version) - } - m.APREPLength = int(binary.BigEndian.Uint16(b[4:6])) - if m.APREPLength != 0 { - err := m.APREP.Unmarshal(b[6 : 6+m.APREPLength]) - if err != nil { - return err - } - err = m.KRBPriv.Unmarshal(b[6+m.APREPLength : m.MessageLength]) - if err != nil { - return err - } - } else { - m.IsKRBError = true - m.KRBError.Unmarshal(b[6:m.MessageLength]) - m.ResultCode, m.Result = parseResponse(m.KRBError.EData) - } - return nil -} - -func parseResponse(b []byte) (c uint16, s string) { - c = binary.BigEndian.Uint16(b[0:2]) - buf := bytes.NewBuffer(b[2:]) - m := make([]byte, len(b)-2) - binary.Read(buf, binary.BigEndian, &m) - s = string(m) - return -} - -// Decrypt the encrypted part of the KRBError within the change password Reply. -func (m *Reply) Decrypt(key types.EncryptionKey) error { - if m.IsKRBError { - return m.KRBError - } - err := m.KRBPriv.DecryptEncPart(key) - if err != nil { - return err - } - m.ResultCode, m.Result = parseResponse(m.KRBPriv.DecryptedEncPart.UserData) - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/passwd.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/passwd.go deleted file mode 100644 index 1b03fd1acb5..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/kadmin/passwd.go +++ /dev/null @@ -1,68 +0,0 @@ -// Package kadmin provides Kerberos administration capabilities. -package kadmin - -import ( - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/messages" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// ChangePasswdMsg generate a change password request and also return the key needed to decrypt the reply. -func ChangePasswdMsg(cname types.PrincipalName, realm, password string, tkt messages.Ticket, sessionKey types.EncryptionKey) (r Request, k types.EncryptionKey, err error) { - // Create change password data struct and marshal to bytes - chgpasswd := ChangePasswdData{ - NewPasswd: []byte(password), - TargName: cname, - TargRealm: realm, - } - chpwdb, err := chgpasswd.Marshal() - if err != nil { - err = krberror.Errorf(err, krberror.KRBMsgError, "error marshaling change passwd data") - return - } - - // Generate authenticator - auth, err := types.NewAuthenticator(realm, cname) - if err != nil { - err = krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") - return - } - etype, err := crypto.GetEtype(sessionKey.KeyType) - if err != nil { - err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey etype") - return - } - err = auth.GenerateSeqNumberAndSubKey(etype.GetETypeID(), etype.GetKeyByteSize()) - if err != nil { - err = krberror.Errorf(err, krberror.KRBMsgError, "error generating subkey") - return - } - k = auth.SubKey - - // Generate AP_REQ - APreq, err := messages.NewAPReq(tkt, sessionKey, auth) - if err != nil { - return - } - - // Form the KRBPriv encpart data - kp := messages.EncKrbPrivPart{ - UserData: chpwdb, - Timestamp: auth.CTime, - Usec: auth.Cusec, - SequenceNumber: auth.SeqNumber, - } - kpriv := messages.NewKRBPriv(kp) - err = kpriv.EncryptEncPart(k) - if err != nil { - err = krberror.Errorf(err, krberror.EncryptingError, "error encrypting change passwd data") - return - } - - r = Request{ - APREQ: APreq, - KRBPriv: kpriv, - } - return -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/keytab/keytab.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/keytab/keytab.go deleted file mode 100644 index c466458879a..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/keytab/keytab.go +++ /dev/null @@ -1,369 +0,0 @@ -// Package keytab implements Kerberos keytabs: https://web.mit.edu/kerberos/krb5-devel/doc/formats/keytab_file_format.html. -package keytab - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "time" - "unsafe" - - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -const ( - keytabFirstByte byte = 05 -) - -// Keytab struct. -type Keytab struct { - Version uint8 - Entries []entry -} - -// Keytab entry struct. -type entry struct { - Principal principal - Timestamp time.Time - KVNO8 uint8 - Key types.EncryptionKey - KVNO uint32 -} - -// Keytab entry principal struct. -type principal struct { - NumComponents int16 - Realm string - Components []string - NameType int32 -} - -// NewKeytab creates new, empty Keytab type. -func NewKeytab() Keytab { - var e []entry - return Keytab{ - Version: 0, - Entries: e, - } -} - -// GetEncryptionKey returns the EncryptionKey from the Keytab for the newest entry with the required kvno, etype and matching principal. -func (kt *Keytab) GetEncryptionKey(nameString []string, realm string, kvno int, etype int32) (types.EncryptionKey, error) { - var key types.EncryptionKey - var t time.Time - for _, k := range kt.Entries { - if k.Principal.Realm == realm && len(k.Principal.Components) == len(nameString) && - k.Key.KeyType == etype && - (k.KVNO == uint32(kvno) || kvno == 0) && - k.Timestamp.After(t) { - - p := true - for i, n := range k.Principal.Components { - if nameString[i] != n { - p = false - break - } - } - if p { - key = k.Key - t = k.Timestamp - } - } - } - if len(key.KeyValue) < 1 { - return key, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", nameString, realm, kvno, etype) - } - return key, nil -} - -// Create a new Keytab entry. -func newKeytabEntry() entry { - var b []byte - return entry{ - Principal: newPrincipal(), - Timestamp: time.Time{}, - KVNO8: 0, - Key: types.EncryptionKey{ - KeyType: 0, - KeyValue: b, - }, - KVNO: 0, - } -} - -// Create a new principal. -func newPrincipal() principal { - var c []string - return principal{ - NumComponents: 0, - Realm: "", - Components: c, - NameType: 0, - } -} - -// Load a Keytab file into a Keytab type. -func Load(ktPath string) (kt Keytab, err error) { - k, err := ioutil.ReadFile(ktPath) - if err != nil { - return - } - return Parse(k) -} - -// Marshal keytab into byte slice -func (kt Keytab) Marshal() ([]byte, error) { - b := []byte{keytabFirstByte, kt.Version} - for _, e := range kt.Entries { - eb, err := e.marshal(int(kt.Version)) - if err != nil { - return b, err - } - b = append(b, eb...) - } - return b, nil -} - -// Write the keytab bytes to io.Writer. -// Returns the number of bytes written -func (kt Keytab) Write(w io.Writer) (int, error) { - b, err := kt.Marshal() - if err != nil { - return 0, fmt.Errorf("error marshaling keytab: %v", err) - } - return w.Write(b) -} - -// Parse byte slice of Keytab data into Keytab type. -func Parse(b []byte) (kt Keytab, err error) { - //The first byte of the file always has the value 5 - if b[0] != keytabFirstByte { - err = errors.New("invalid keytab data. First byte does not equal 5") - return - } - //Get keytab version - //The 2nd byte contains the version number (1 or 2) - kt.Version = uint8(b[1]) - if kt.Version != 1 && kt.Version != 2 { - err = errors.New("invalid keytab data. Keytab version is neither 1 nor 2") - return - } - //Version 1 of the file format uses native byte order for integer representations. Version 2 always uses big-endian byte order - var endian binary.ByteOrder - endian = binary.BigEndian - if kt.Version == 1 && isNativeEndianLittle() { - endian = binary.LittleEndian - } - /* - After the two-byte version indicator, the file contains a sequence of signed 32-bit record lengths followed by key records or holes. - A positive record length indicates a valid key entry whose size is equal to or less than the record length. - A negative length indicates a zero-filled hole whose size is the inverse of the length. - A length of 0 indicates the end of the file. - */ - // n tracks position in the byte array - n := 2 - l := readInt32(b, &n, &endian) - for l != 0 { - if l < 0 { - //Zero padded so skip over - l = l * -1 - n = n + int(l) - } else { - //fmt.Printf("Bytes for entry: %v\n", b[n:n+int(l)]) - eb := b[n : n+int(l)] - n = n + int(l) - ke := newKeytabEntry() - // p keeps track as to where we are in the byte stream - var p int - parsePrincipal(eb, &p, &kt, &ke, &endian) - ke.Timestamp = readTimestamp(eb, &p, &endian) - ke.KVNO8 = uint8(readInt8(eb, &p, &endian)) - ke.Key.KeyType = int32(readInt16(eb, &p, &endian)) - kl := int(readInt16(eb, &p, &endian)) - ke.Key.KeyValue = readBytes(eb, &p, kl, &endian) - //The 32-bit key version overrides the 8-bit key version. - // To determine if it is present, the implementation must check that at least 4 bytes remain in the record after the other fields are read, - // and that the value of the 32-bit integer contained in those bytes is non-zero. - if len(eb)-p >= 4 { - // The 32-bit key may be present - ke.KVNO = uint32(readInt32(eb, &p, &endian)) - } - if ke.KVNO == 0 { - // Handles if the value from the last 4 bytes was zero and also if there are not the 4 bytes present. Makes sense to put the same value here as KVNO8 - ke.KVNO = uint32(ke.KVNO8) - } - // Add the entry to the keytab - kt.Entries = append(kt.Entries, ke) - } - // Check if there are still 4 bytes left to read - if n > len(b) || len(b[n:]) < 4 { - break - } - // Read the size of the next entry - l = readInt32(b, &n, &endian) - } - return -} - -func (e entry) marshal(v int) ([]byte, error) { - var b []byte - pb, err := e.Principal.marshal(v) - if err != nil { - return b, err - } - b = append(b, pb...) - - var endian binary.ByteOrder - endian = binary.BigEndian - if v == 1 && isNativeEndianLittle() { - endian = binary.LittleEndian - } - - t := make([]byte, 9) - endian.PutUint32(t[0:4], uint32(e.Timestamp.Unix())) - t[4] = byte(e.KVNO8) - endian.PutUint16(t[5:7], uint16(e.Key.KeyType)) - endian.PutUint16(t[7:9], uint16(len(e.Key.KeyValue))) - b = append(b, t...) - - buf := new(bytes.Buffer) - err = binary.Write(buf, endian, e.Key.KeyValue) - if err != nil { - return b, err - } - b = append(b, buf.Bytes()...) - - t = make([]byte, 4) - endian.PutUint32(t, e.KVNO) - b = append(b, t...) - - // Add the length header - t = make([]byte, 4) - endian.PutUint32(t, uint32(len(b))) - b = append(t, b...) - return b, nil -} - -// Parse the Keytab bytes of a principal into a Keytab entry's principal. -func parsePrincipal(b []byte, p *int, kt *Keytab, ke *entry, e *binary.ByteOrder) error { - ke.Principal.NumComponents = readInt16(b, p, e) - if kt.Version == 1 { - //In version 1 the number of components includes the realm. Minus 1 to make consistent with version 2 - ke.Principal.NumComponents-- - } - lenRealm := readInt16(b, p, e) - ke.Principal.Realm = string(readBytes(b, p, int(lenRealm), e)) - for i := 0; i < int(ke.Principal.NumComponents); i++ { - l := readInt16(b, p, e) - ke.Principal.Components = append(ke.Principal.Components, string(readBytes(b, p, int(l), e))) - } - if kt.Version != 1 { - //Name Type is omitted in version 1 - ke.Principal.NameType = readInt32(b, p, e) - } - return nil -} - -func (p principal) marshal(v int) ([]byte, error) { - //var b []byte - b := make([]byte, 2) - var endian binary.ByteOrder - endian = binary.BigEndian - if v == 1 && isNativeEndianLittle() { - endian = binary.LittleEndian - } - endian.PutUint16(b[0:], uint16(p.NumComponents)) - realm, err := marshalString(p.Realm, v) - if err != nil { - return b, err - } - b = append(b, realm...) - for _, c := range p.Components { - cb, err := marshalString(c, v) - if err != nil { - return b, err - } - b = append(b, cb...) - } - if v != 1 { - t := make([]byte, 4) - endian.PutUint32(t, uint32(p.NameType)) - b = append(b, t...) - } - return b, nil -} - -func marshalString(s string, v int) ([]byte, error) { - sb := []byte(s) - b := make([]byte, 2) - var endian binary.ByteOrder - endian = binary.BigEndian - if v == 1 && isNativeEndianLittle() { - endian = binary.LittleEndian - } - endian.PutUint16(b[0:], uint16(len(sb))) - buf := new(bytes.Buffer) - err := binary.Write(buf, endian, sb) - if err != nil { - return b, err - } - b = append(b, buf.Bytes()...) - return b, err -} - -// Read bytes representing a timestamp. -func readTimestamp(b []byte, p *int, e *binary.ByteOrder) time.Time { - return time.Unix(int64(readInt32(b, p, e)), 0) -} - -// Read bytes representing an eight bit integer. -func readInt8(b []byte, p *int, e *binary.ByteOrder) (i int8) { - buf := bytes.NewBuffer(b[*p : *p+1]) - binary.Read(buf, *e, &i) - *p++ - return -} - -// Read bytes representing a sixteen bit integer. -func readInt16(b []byte, p *int, e *binary.ByteOrder) (i int16) { - buf := bytes.NewBuffer(b[*p : *p+2]) - binary.Read(buf, *e, &i) - *p += 2 - return -} - -// Read bytes representing a thirty two bit integer. -func readInt32(b []byte, p *int, e *binary.ByteOrder) (i int32) { - buf := bytes.NewBuffer(b[*p : *p+4]) - binary.Read(buf, *e, &i) - *p += 4 - return -} - -func readBytes(b []byte, p *int, s int, e *binary.ByteOrder) []byte { - buf := bytes.NewBuffer(b[*p : *p+s]) - r := make([]byte, s) - binary.Read(buf, *e, &r) - *p += s - return r -} - -func isNativeEndianLittle() bool { - var x = 0x012345678 - var p = unsafe.Pointer(&x) - var bp = (*[4]byte)(p) - - var endian bool - if 0x01 == bp[0] { - endian = false - } else if (0x78 & 0xff) == (bp[0] & 0xff) { - endian = true - } else { - // Default to big endian - endian = false - } - return endian -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/krberror/error.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/krberror/error.go deleted file mode 100644 index d591bdeb37c..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/krberror/error.go +++ /dev/null @@ -1,67 +0,0 @@ -// Package krberror provides error type and functions for gokrb5. -package krberror - -import ( - "fmt" - "strings" -) - -// Error type descriptions. -const ( - separator = " < " - EncodingError = "Encoding_Error" - NetworkingError = "Networking_Error" - DecryptingError = "Decrypting_Error" - EncryptingError = "Encrypting_Error" - ChksumError = "Checksum_Error" - KRBMsgError = "KRBMessage_Handling_Error" - ConfigError = "Configuration_Error" - KDCError = "KDC_Error" -) - -// Krberror is an error type for gokrb5 -type Krberror struct { - RootCause string - EText []string -} - -// Error function to implement the error interface. -func (e Krberror) Error() string { - return fmt.Sprintf("[Root cause: %s] ", e.RootCause) + strings.Join(e.EText, separator) -} - -// Add another error statement to the error. -func (e *Krberror) Add(et string, s string) { - e.EText = append([]string{fmt.Sprintf("%s: %s", et, s)}, e.EText...) -} - -// NewKrberror creates a new instance of Krberror. -func NewKrberror(et, s string) Krberror { - return Krberror{ - RootCause: et, - EText: []string{s}, - } -} - -// Errorf appends to or creates a new Krberror. -func Errorf(err error, et, format string, a ...interface{}) Krberror { - if e, ok := err.(Krberror); ok { - e.Add(et, fmt.Sprintf(format, a...)) - return e - } - return NewErrorf(et, format+": %s", append(a, err)...) -} - -// NewErrorf creates a new Krberror from a formatted string. -func NewErrorf(et, format string, a ...interface{}) Krberror { - var s string - if len(a) > 0 { - s = fmt.Sprintf("%s: %s", et, fmt.Sprintf(format, a...)) - } else { - s = fmt.Sprintf("%s: %s", et, format) - } - return Krberror{ - RootCause: et, - EText: []string{s}, - } -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APRep.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APRep.go deleted file mode 100644 index 5f8e299125d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APRep.go +++ /dev/null @@ -1,64 +0,0 @@ -package messages - -import ( - "fmt" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -/* -AP-REP ::= [APPLICATION 15] SEQUENCE { -pvno [0] INTEGER (5), -msg-type [1] INTEGER (15), -enc-part [2] EncryptedData -- EncAPRepPart -} - -EncAPRepPart ::= [APPLICATION 27] SEQUENCE { - ctime [0] KerberosTime, - cusec [1] Microseconds, - subkey [2] EncryptionKey OPTIONAL, - seq-number [3] UInt32 OPTIONAL -} -*/ - -// APRep implements RFC 4120 KRB_AP_REP: https://tools.ietf.org/html/rfc4120#section-5.5.2. -type APRep struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - EncPart types.EncryptedData `asn1:"explicit,tag:2"` -} - -// EncAPRepPart is the encrypted part of KRB_AP_REP. -type EncAPRepPart struct { - CTime time.Time `asn1:"generalized,explicit,tag:0"` - Cusec int `asn1:"explicit,tag:1"` - Subkey types.EncryptionKey `asn1:"optional,explicit,tag:2"` - SequenceNumber int64 `asn1:"optional,explicit,tag:3"` -} - -// Unmarshal bytes b into the APRep struct. -func (a *APRep) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREP)) - if err != nil { - return processUnmarshalReplyError(b, err) - } - expectedMsgType := msgtype.KRB_AP_REP - if a.MsgType != expectedMsgType { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_AP_REP. Expected: %v; Actual: %v", expectedMsgType, a.MsgType) - } - return nil -} - -// Unmarshal bytes b into the APRep encrypted part struct. -func (a *EncAPRepPart) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncAPRepPart)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "AP_REP unmarshal error") - } - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APReq.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APReq.go deleted file mode 100644 index fbf20000209..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/APReq.go +++ /dev/null @@ -1,150 +0,0 @@ -package messages - -import ( - "fmt" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/asn1tools" - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -/*AP-REQ ::= [APPLICATION 14] SEQUENCE { -pvno [0] INTEGER (5), -msg-type [1] INTEGER (14), -ap-options [2] APOptions, -ticket [3] Ticket, -authenticator [4] EncryptedData -- Authenticator -} - -APOptions ::= KerberosFlags --- reserved(0), --- use-session-key(1), --- mutual-required(2)*/ - -type marshalAPReq struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - APOptions asn1.BitString `asn1:"explicit,tag:2"` - // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag - Ticket asn1.RawValue `asn1:"explicit,tag:3"` - Authenticator types.EncryptedData `asn1:"explicit,tag:4"` -} - -// APReq implements RFC 4120 KRB_AP_REQ: https://tools.ietf.org/html/rfc4120#section-5.5.1. -type APReq struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - APOptions asn1.BitString `asn1:"explicit,tag:2"` - Ticket Ticket `asn1:"explicit,tag:3"` - Authenticator types.EncryptedData `asn1:"explicit,tag:4"` -} - -// NewAPReq generates a new KRB_AP_REQ struct. -func NewAPReq(tkt Ticket, sessionKey types.EncryptionKey, auth types.Authenticator) (APReq, error) { - var a APReq - ed, err := encryptAuthenticator(auth, sessionKey, tkt) - if err != nil { - return a, krberror.Errorf(err, krberror.KRBMsgError, "error creating Authenticator for AP_REQ") - } - a = APReq{ - PVNO: iana.PVNO, - MsgType: msgtype.KRB_AP_REQ, - APOptions: types.NewKrbFlags(), - Ticket: tkt, - Authenticator: ed, - } - return a, nil -} - -// Encrypt Authenticator -func encryptAuthenticator(a types.Authenticator, sessionKey types.EncryptionKey, tkt Ticket) (types.EncryptedData, error) { - var ed types.EncryptedData - m, err := a.Marshal() - if err != nil { - return ed, krberror.Errorf(err, krberror.EncodingError, "marshaling error of EncryptedData form of Authenticator") - } - usage := authenticatorKeyUsage(tkt.SName) - ed, err = crypto.GetEncryptedData(m, sessionKey, uint32(usage), tkt.EncPart.KVNO) - if err != nil { - return ed, krberror.Errorf(err, krberror.EncryptingError, "error encrypting Authenticator") - } - return ed, nil -} - -// DecryptAuthenticator decrypts the Authenticator within the AP_REQ. -// sessionKey may simply be the key within the decrypted EncPart of the ticket within the AP_REQ. -func (a *APReq) DecryptAuthenticator(sessionKey types.EncryptionKey) (auth types.Authenticator, err error) { - usage := authenticatorKeyUsage(a.Ticket.SName) - ab, e := crypto.DecryptEncPart(a.Authenticator, sessionKey, uint32(usage)) - if e != nil { - err = fmt.Errorf("error decrypting authenticator: %v", e) - return - } - e = auth.Unmarshal(ab) - if e != nil { - err = fmt.Errorf("error unmarshaling authenticator") - return - } - return -} - -func authenticatorKeyUsage(pn types.PrincipalName) int { - if pn.NameString[0] == "krbtgt" { - return keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR - } - return keyusage.AP_REQ_AUTHENTICATOR -} - -// Unmarshal bytes b into the APReq struct. -func (a *APReq) Unmarshal(b []byte) error { - var m marshalAPReq - _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.APREQ)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "unmarshal error of AP_REQ") - } - if m.MsgType != msgtype.KRB_AP_REQ { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an AP_REQ. Expected: %v; Actual: %v", msgtype.KRB_AP_REQ, m.MsgType) - } - a.PVNO = m.PVNO - a.MsgType = m.MsgType - a.APOptions = m.APOptions - a.Authenticator = m.Authenticator - a.Ticket, err = UnmarshalTicket(m.Ticket.Bytes) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "unmarshaling error of Ticket within AP_REQ") - } - return nil -} - -// Marshal APReq struct. -func (a *APReq) Marshal() ([]byte, error) { - m := marshalAPReq{ - PVNO: a.PVNO, - MsgType: a.MsgType, - APOptions: a.APOptions, - Authenticator: a.Authenticator, - } - var b []byte - b, err := a.Ticket.Marshal() - if err != nil { - return b, err - } - m.Ticket = asn1.RawValue{ - Class: asn1.ClassContextSpecific, - IsCompound: true, - Tag: 3, - Bytes: b, - } - mk, err := asn1.Marshal(m) - if err != nil { - return mk, krberror.Errorf(err, krberror.EncodingError, "marshaling error of AP_REQ") - } - mk = asn1tools.AddASNAppTag(mk, asnAppTag.APREQ) - return mk, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCRep.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCRep.go deleted file mode 100644 index f5130d3f03d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCRep.go +++ /dev/null @@ -1,312 +0,0 @@ -package messages - -// Reference: https://www.ietf.org/rfc/rfc4120.txt -// Section: 5.4.2 - -import ( - "fmt" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/config" - "gopkg.in/jcmturner/gokrb5.v5/credentials" - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/flags" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/patype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -type marshalKDCRep struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - PAData types.PADataSequence `asn1:"explicit,optional,tag:2"` - CRealm string `asn1:"generalstring,explicit,tag:3"` - CName types.PrincipalName `asn1:"explicit,tag:4"` - // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag - Ticket asn1.RawValue `asn1:"explicit,tag:5"` - EncPart types.EncryptedData `asn1:"explicit,tag:6"` -} - -// KDCRepFields represents the KRB_KDC_REP fields. -type KDCRepFields struct { - PVNO int - MsgType int - PAData []types.PAData - CRealm string - CName types.PrincipalName - Ticket Ticket - EncPart types.EncryptedData - DecryptedEncPart EncKDCRepPart -} - -// ASRep implements RFC 4120 KRB_AS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. -type ASRep struct { - KDCRepFields -} - -// TGSRep implements RFC 4120 KRB_TGS_REP: https://tools.ietf.org/html/rfc4120#section-5.4.2. -type TGSRep struct { - KDCRepFields -} - -// EncKDCRepPart is the encrypted part of KRB_KDC_REP. -type EncKDCRepPart struct { - Key types.EncryptionKey `asn1:"explicit,tag:0"` - LastReqs []LastReq `asn1:"explicit,tag:1"` - Nonce int `asn1:"explicit,tag:2"` - KeyExpiration time.Time `asn1:"generalized,explicit,optional,tag:3"` - Flags asn1.BitString `asn1:"explicit,tag:4"` - AuthTime time.Time `asn1:"generalized,explicit,tag:5"` - StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` - EndTime time.Time `asn1:"generalized,explicit,tag:7"` - RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` - SRealm string `asn1:"generalstring,explicit,tag:9"` - SName types.PrincipalName `asn1:"explicit,tag:10"` - CAddr []types.HostAddress `asn1:"explicit,optional,tag:11"` - EncPAData types.PADataSequence `asn1:"explicit,optional,tag:12"` -} - -// LastReq part of KRB_KDC_REP. -type LastReq struct { - LRType int32 `asn1:"explicit,tag:0"` - LRValue time.Time `asn1:"generalized,explicit,tag:1"` -} - -// Unmarshal bytes b into the ASRep struct. -func (k *ASRep) Unmarshal(b []byte) error { - var m marshalKDCRep - _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREP)) - if err != nil { - return processUnmarshalReplyError(b, err) - } - if m.MsgType != msgtype.KRB_AS_REP { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an AS_REP. Expected: %v; Actual: %v", msgtype.KRB_AS_REP, m.MsgType) - } - //Process the raw ticket within - tkt, err := UnmarshalTicket(m.Ticket.Bytes) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within AS_REP") - } - k.KDCRepFields = KDCRepFields{ - PVNO: m.PVNO, - MsgType: m.MsgType, - PAData: m.PAData, - CRealm: m.CRealm, - CName: m.CName, - Ticket: tkt, - EncPart: m.EncPart, - } - return nil -} - -// Unmarshal bytes b into the TGSRep struct. -func (k *TGSRep) Unmarshal(b []byte) error { - var m marshalKDCRep - _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREP)) - if err != nil { - return processUnmarshalReplyError(b, err) - } - if m.MsgType != msgtype.KRB_TGS_REP { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate an TGS_REP. Expected: %v; Actual: %v", msgtype.KRB_TGS_REP, m.MsgType) - } - //Process the raw ticket within - tkt, err := UnmarshalTicket(m.Ticket.Bytes) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling Ticket within TGS_REP") - } - k.KDCRepFields = KDCRepFields{ - PVNO: m.PVNO, - MsgType: m.MsgType, - PAData: m.PAData, - CRealm: m.CRealm, - CName: m.CName, - Ticket: tkt, - EncPart: m.EncPart, - } - return nil -} - -// Unmarshal bytes b into encrypted part of KRB_KDC_REP. -func (e *EncKDCRepPart) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncASRepPart)) - if err != nil { - // Try using tag 26 - /* Ref: RFC 4120 - Compatibility note: Some implementations unconditionally send an - encrypted EncTGSRepPart (application tag number 26) in this field - regardless of whether the reply is a AS-REP or a TGS-REP. In the - interest of compatibility, implementors MAY relax the check on the - tag number of the decrypted ENC-PART.*/ - _, err = asn1.UnmarshalWithParams(b, e, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncTGSRepPart)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part within KDC_REP") - } - } - return nil -} - -// DecryptEncPart decrypts the encrypted part of an AS_REP. -func (k *ASRep) DecryptEncPart(c *credentials.Credentials) (types.EncryptionKey, error) { - var key types.EncryptionKey - var err error - if c.HasKeytab() { - key, err = c.Keytab.GetEncryptionKey(k.CName.NameString, k.CRealm, k.EncPart.KVNO, k.EncPart.EType) - if err != nil { - return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") - } - } - if c.HasPassword() { - key, _, err = crypto.GetKeyFromPassword(c.Password, k.CName, k.CRealm, k.EncPart.EType, k.PAData) - if err != nil { - return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") - } - } - if !c.HasKeytab() && !c.HasPassword() { - return key, krberror.NewErrorf(krberror.DecryptingError, "no secret available in credentials to preform decryption of AS_REP encrypted part") - } - b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.AS_REP_ENCPART) - if err != nil { - return key, krberror.Errorf(err, krberror.DecryptingError, "error decrypting AS_REP encrypted part") - } - var denc EncKDCRepPart - err = denc.Unmarshal(b) - if err != nil { - return key, krberror.Errorf(err, krberror.EncodingError, "error unmarshaling decrypted encpart of AS_REP") - } - k.DecryptedEncPart = denc - return key, nil -} - -// IsValid checks the validity of AS_REP message. -func (k *ASRep) IsValid(cfg *config.Config, creds *credentials.Credentials, asReq ASReq) (bool, error) { - //Ref RFC 4120 Section 3.1.5 - if k.CName.NameType != asReq.ReqBody.CName.NameType || k.CName.NameString == nil { - return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName) - } - for i := range k.CName.NameString { - if k.CName.NameString[i] != asReq.ReqBody.CName.NameString[i] { - return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.CName, k.CName) - } - } - if k.CRealm != asReq.ReqBody.Realm { - return false, krberror.NewErrorf(krberror.KRBMsgError, "CRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.CRealm) - } - key, err := k.DecryptEncPart(creds) - if err != nil { - return false, krberror.Errorf(err, krberror.DecryptingError, "error decrypting EncPart of AS_REP") - } - if k.DecryptedEncPart.Nonce != asReq.ReqBody.Nonce { - return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") - } - if k.DecryptedEncPart.SName.NameType != asReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil { - return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", asReq.ReqBody.SName, k.DecryptedEncPart.SName) - } - for i := range k.CName.NameString { - if k.DecryptedEncPart.SName.NameString[i] != asReq.ReqBody.SName.NameString[i] { - return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", asReq.ReqBody.SName, k.DecryptedEncPart.SName) - } - } - if k.DecryptedEncPart.SRealm != asReq.ReqBody.Realm { - return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", asReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) - } - if len(asReq.ReqBody.Addresses) > 0 { - if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, asReq.ReqBody.Addresses) { - return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the AS_REP does not match those listed in the AS_REQ") - } - } - t := time.Now().UTC() - if t.Sub(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(t) > cfg.LibDefaults.Clockskew { - return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds", cfg.LibDefaults.Clockskew.Seconds()) - } - // RFC 6806 https://tools.ietf.org/html/rfc6806.html#section-11 - if asReq.PAData.Contains(patype.PA_REQ_ENC_PA_REP) && types.IsFlagSet(&k.DecryptedEncPart.Flags, flags.EncPARep) { - if len(k.DecryptedEncPart.EncPAData) < 2 || !k.DecryptedEncPart.EncPAData.Contains(patype.PA_FX_FAST) { - return false, krberror.NewErrorf(krberror.KRBMsgError, "KDC did not respond appropriately to FAST negotiation") - } - for _, pa := range k.DecryptedEncPart.EncPAData { - if pa.PADataType == patype.PA_REQ_ENC_PA_REP { - var pafast types.PAReqEncPARep - err := pafast.Unmarshal(pa.PADataValue) - if err != nil { - return false, krberror.Errorf(err, krberror.EncodingError, "KDC FAST negotiation response error, could not unmarshal PA_REQ_ENC_PA_REP") - } - etype, err := crypto.GetChksumEtype(pafast.ChksumType) - if err != nil { - return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response error") - } - ab, _ := asReq.Marshal() - if !etype.VerifyChecksum(key.KeyValue, ab, pafast.Chksum, keyusage.KEY_USAGE_AS_REQ) { - return false, krberror.Errorf(err, krberror.ChksumError, "KDC FAST negotiation response checksum invalid") - } - } - } - } - return true, nil -} - -// DecryptEncPart decrypts the encrypted part of an TGS_REP. -func (k *TGSRep) DecryptEncPart(key types.EncryptionKey) error { - b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.TGS_REP_ENCPART_SESSION_KEY) - if err != nil { - return krberror.Errorf(err, krberror.DecryptingError, "error decrypting TGS_REP EncPart") - } - var denc EncKDCRepPart - err = denc.Unmarshal(b) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part") - } - k.DecryptedEncPart = denc - return nil -} - -// IsValid checks the validity of the TGS_REP message. -func (k *TGSRep) IsValid(cfg *config.Config, tgsReq TGSReq) (bool, error) { - if k.CName.NameType != tgsReq.ReqBody.CName.NameType || k.CName.NameString == nil { - return false, krberror.NewErrorf(krberror.KRBMsgError, "CName type in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName) - } - for i := range k.CName.NameString { - if k.CName.NameString[i] != tgsReq.ReqBody.CName.NameString[i] { - return false, krberror.NewErrorf(krberror.KRBMsgError, "CName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.CName, k.CName) - } - } - if k.Ticket.Realm != tgsReq.ReqBody.Realm { - return false, krberror.NewErrorf(krberror.KRBMsgError, "realm in response ticket does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.Ticket.Realm) - } - if k.DecryptedEncPart.Nonce != tgsReq.ReqBody.Nonce { - return false, krberror.NewErrorf(krberror.KRBMsgError, "possible replay attack, nonce in response does not match that in request") - } - //if k.Ticket.SName.NameType != tgsReq.ReqBody.SName.NameType || k.Ticket.SName.NameString == nil { - // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.Ticket.SName) - //} - //for i := range k.Ticket.SName.NameString { - // if k.Ticket.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { - // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response ticket does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.Ticket.SName) - // } - //} - //if k.DecryptedEncPart.SName.NameType != tgsReq.ReqBody.SName.NameType || k.DecryptedEncPart.SName.NameString == nil { - // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %v; Reply: %v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) - //} - //for i := range k.DecryptedEncPart.SName.NameString { - // if k.DecryptedEncPart.SName.NameString[i] != tgsReq.ReqBody.SName.NameString[i] { - // return false, krberror.NewErrorf(krberror.KRBMsgError, "SName in response does not match what was requested. Requested: %+v; Reply: %+v", tgsReq.ReqBody.SName, k.DecryptedEncPart.SName) - // } - //} - if k.DecryptedEncPart.SRealm != tgsReq.ReqBody.Realm { - return false, krberror.NewErrorf(krberror.KRBMsgError, "SRealm in response does not match what was requested. Requested: %s; Reply: %s", tgsReq.ReqBody.Realm, k.DecryptedEncPart.SRealm) - } - if len(k.DecryptedEncPart.CAddr) > 0 { - if !types.HostAddressesEqual(k.DecryptedEncPart.CAddr, tgsReq.ReqBody.Addresses) { - return false, krberror.NewErrorf(krberror.KRBMsgError, "addresses listed in the TGS_REP does not match those listed in the TGS_REQ") - } - } - if time.Since(k.DecryptedEncPart.StartTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.StartTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { - if time.Since(k.DecryptedEncPart.AuthTime) > cfg.LibDefaults.Clockskew || k.DecryptedEncPart.AuthTime.Sub(time.Now().UTC()) > cfg.LibDefaults.Clockskew { - return false, krberror.NewErrorf(krberror.KRBMsgError, "clock skew with KDC too large. Greater than %v seconds.", cfg.LibDefaults.Clockskew.Seconds()) - } - } - return true, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCReq.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCReq.go deleted file mode 100644 index 3a1e6ebfcc6..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KDCReq.go +++ /dev/null @@ -1,402 +0,0 @@ -package messages - -// Reference: https://www.ietf.org/rfc/rfc4120.txt -// Section: 5.4.1 - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/asn1tools" - "gopkg.in/jcmturner/gokrb5.v5/config" - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/flags" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/nametype" - "gopkg.in/jcmturner/gokrb5.v5/iana/patype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -type marshalKDCReq struct { - PVNO int `asn1:"explicit,tag:1"` - MsgType int `asn1:"explicit,tag:2"` - PAData types.PADataSequence `asn1:"explicit,optional,tag:3"` - ReqBody asn1.RawValue `asn1:"explicit,tag:4"` -} - -// KDCReqFields represents the KRB_KDC_REQ fields. -type KDCReqFields struct { - PVNO int - MsgType int - PAData types.PADataSequence - ReqBody KDCReqBody - Renewal bool -} - -// ASReq implements RFC 4120 KRB_AS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. -type ASReq struct { - KDCReqFields -} - -// TGSReq implements RFC 4120 KRB_TGS_REQ: https://tools.ietf.org/html/rfc4120#section-5.4.1. -type TGSReq struct { - KDCReqFields -} - -type marshalKDCReqBody struct { - KDCOptions asn1.BitString `asn1:"explicit,tag:0"` - CName types.PrincipalName `asn1:"explicit,optional,tag:1"` - Realm string `asn1:"generalstring,explicit,tag:2"` - SName types.PrincipalName `asn1:"explicit,optional,tag:3"` - From time.Time `asn1:"generalized,explicit,optional,tag:4"` - Till time.Time `asn1:"generalized,explicit,tag:5"` - RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` - Nonce int `asn1:"explicit,tag:7"` - EType []int32 `asn1:"explicit,tag:8"` - Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` - EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` - // Ticket needs to be a raw value as it is wrapped in an APPLICATION tag - AdditionalTickets asn1.RawValue `asn1:"explicit,optional,tag:11"` -} - -// KDCReqBody implements the KRB_KDC_REQ request body. -type KDCReqBody struct { - KDCOptions asn1.BitString `asn1:"explicit,tag:0"` - CName types.PrincipalName `asn1:"explicit,optional,tag:1"` - Realm string `asn1:"generalstring,explicit,tag:2"` - SName types.PrincipalName `asn1:"explicit,optional,tag:3"` - From time.Time `asn1:"generalized,explicit,optional,tag:4"` - Till time.Time `asn1:"generalized,explicit,tag:5"` - RTime time.Time `asn1:"generalized,explicit,optional,tag:6"` - Nonce int `asn1:"explicit,tag:7"` - EType []int32 `asn1:"explicit,tag:8"` - Addresses []types.HostAddress `asn1:"explicit,optional,tag:9"` - EncAuthData types.EncryptedData `asn1:"explicit,optional,tag:10"` - AdditionalTickets []Ticket `asn1:"explicit,optional,tag:11"` -} - -// NewASReqForTGT generates a new KRB_AS_REQ struct for a TGT request. -func NewASReqForTGT(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { - sname := types.PrincipalName{ - NameType: nametype.KRB_NT_SRV_INST, - NameString: []string{"krbtgt", realm}, - } - return NewASReq(realm, c, cname, sname) -} - -// NewASReqForChgPasswd generates a new KRB_AS_REQ struct for a change password request. -func NewASReqForChgPasswd(realm string, c *config.Config, cname types.PrincipalName) (ASReq, error) { - sname := types.PrincipalName{ - NameType: nametype.KRB_NT_PRINCIPAL, - NameString: []string{"kadmin", "changepw"}, - } - return NewASReq(realm, c, cname, sname) -} - -// NewASReq generates a new KRB_AS_REQ struct for a given SNAME. -func NewASReq(realm string, c *config.Config, cname, sname types.PrincipalName) (ASReq, error) { - nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) - if err != nil { - return ASReq{}, err - } - t := time.Now().UTC() - // Copy the default options to make this thread safe - kopts := types.NewKrbFlags() - copy(kopts.Bytes, c.LibDefaults.KDCDefaultOptions.Bytes) - kopts.BitLength = c.LibDefaults.KDCDefaultOptions.BitLength - a := ASReq{ - KDCReqFields{ - PVNO: iana.PVNO, - MsgType: msgtype.KRB_AS_REQ, - PAData: types.PADataSequence{}, - ReqBody: KDCReqBody{ - KDCOptions: kopts, - Realm: realm, - CName: cname, - SName: sname, - Till: t.Add(c.LibDefaults.TicketLifetime), - Nonce: int(nonce.Int64()), - EType: c.LibDefaults.DefaultTktEnctypeIDs, - }, - }, - } - if c.LibDefaults.Forwardable { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable) - } - if c.LibDefaults.Canonicalize { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize) - } - if c.LibDefaults.Proxiable { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable) - } - if c.LibDefaults.RenewLifetime != 0 { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable) - a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) - a.ReqBody.RTime = t.Add(time.Duration(48) * time.Hour) - } - if !c.LibDefaults.NoAddresses { - ha, err := types.LocalHostAddresses() - if err != nil { - return a, fmt.Errorf("could not get local addresses: %v", err) - } - ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) - a.ReqBody.Addresses = ha - } - return a, nil -} - -// NewTGSReq generates a new KRB_TGS_REQ struct. -func NewTGSReq(cname types.PrincipalName, kdcRealm string, c *config.Config, tkt Ticket, sessionKey types.EncryptionKey, spn types.PrincipalName, renewal bool) (TGSReq, error) { - nonce, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32)) - if err != nil { - return TGSReq{}, err - } - t := time.Now().UTC() - a := TGSReq{ - KDCReqFields{ - PVNO: iana.PVNO, - MsgType: msgtype.KRB_TGS_REQ, - ReqBody: KDCReqBody{ - KDCOptions: types.NewKrbFlags(), - Realm: kdcRealm, - SName: spn, - Till: t.Add(c.LibDefaults.TicketLifetime), - Nonce: int(nonce.Int64()), - EType: c.LibDefaults.DefaultTGSEnctypeIDs, - }, - Renewal: renewal, - }, - } - if c.LibDefaults.Forwardable { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Forwardable) - } - if c.LibDefaults.Canonicalize { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Canonicalize) - } - if c.LibDefaults.Proxiable { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Proxiable) - } - if c.LibDefaults.RenewLifetime > time.Duration(0) { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable) - a.ReqBody.RTime = t.Add(c.LibDefaults.RenewLifetime) - } - if !c.LibDefaults.NoAddresses { - ha, err := types.LocalHostAddresses() - if err != nil { - return a, fmt.Errorf("could not get local addresses: %v", err) - } - ha = append(ha, types.HostAddressesFromNetIPs(c.LibDefaults.ExtraAddresses)...) - a.ReqBody.Addresses = ha - } - if renewal { - types.SetFlag(&a.ReqBody.KDCOptions, flags.Renew) - types.SetFlag(&a.ReqBody.KDCOptions, flags.Renewable) - } - auth, err := types.NewAuthenticator(tkt.Realm, cname) - if err != nil { - return a, krberror.Errorf(err, krberror.KRBMsgError, "error generating new authenticator") - } - // Add the CName to make validation of the reply easier - a.ReqBody.CName = auth.CName - b, err := a.ReqBody.Marshal() - if err != nil { - return a, krberror.Errorf(err, krberror.EncodingError, "error marshaling TGS_REQ body") - } - etype, err := crypto.GetEtype(sessionKey.KeyType) - if err != nil { - return a, krberror.Errorf(err, krberror.EncryptingError, "error getting etype to encrypt authenticator") - } - cb, err := etype.GetChecksumHash(sessionKey.KeyValue, b, keyusage.TGS_REQ_PA_TGS_REQ_AP_REQ_AUTHENTICATOR_CHKSUM) - if err != nil { - return a, krberror.Errorf(err, krberror.ChksumError, "error getting etype checksum hash") - } - auth.Cksum = types.Checksum{ - CksumType: etype.GetHashID(), - Checksum: cb, - } - apReq, err := NewAPReq(tkt, sessionKey, auth) - if err != nil { - return a, krberror.Errorf(err, krberror.KRBMsgError, "error generating new AP_REQ") - } - apb, err := apReq.Marshal() - if err != nil { - return a, krberror.Errorf(err, krberror.EncodingError, "error marshaling AP_REQ for pre-authentication data") - } - a.PAData = types.PADataSequence{ - types.PAData{ - PADataType: patype.PA_TGS_REQ, - PADataValue: apb, - }, - } - return a, nil -} - -// Unmarshal bytes b into the ASReq struct. -func (k *ASReq) Unmarshal(b []byte) error { - var m marshalKDCReq - _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.ASREQ)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling AS_REQ") - } - expectedMsgType := msgtype.KRB_AS_REQ - if m.MsgType != expectedMsgType { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a AS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) - } - var reqb KDCReqBody - err = reqb.Unmarshal(m.ReqBody.Bytes) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error processing AS_REQ body") - } - k.MsgType = m.MsgType - k.PAData = m.PAData - k.PVNO = m.PVNO - k.ReqBody = reqb - return nil -} - -// Unmarshal bytes b into the TGSReq struct. -func (k *TGSReq) Unmarshal(b []byte) error { - var m marshalKDCReq - _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.TGSREQ)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling TGS_REQ") - } - expectedMsgType := msgtype.KRB_TGS_REQ - if m.MsgType != expectedMsgType { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a TGS_REQ. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) - } - var reqb KDCReqBody - err = reqb.Unmarshal(m.ReqBody.Bytes) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error processing TGS_REQ body") - } - k.MsgType = m.MsgType - k.PAData = m.PAData - k.PVNO = m.PVNO - k.ReqBody = reqb - return nil -} - -// Unmarshal bytes b into the KRB_KDC_REQ body struct. -func (k *KDCReqBody) Unmarshal(b []byte) error { - var m marshalKDCReqBody - _, err := asn1.Unmarshal(b, &m) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling KDC_REQ body") - } - k.KDCOptions = m.KDCOptions - if len(k.KDCOptions.Bytes) < 4 { - tb := make([]byte, 4-len(k.KDCOptions.Bytes)) - k.KDCOptions.Bytes = append(tb, k.KDCOptions.Bytes...) - k.KDCOptions.BitLength = len(k.KDCOptions.Bytes) * 8 - } - k.CName = m.CName - k.Realm = m.Realm - k.SName = m.SName - k.From = m.From - k.Till = m.Till - k.RTime = m.RTime - k.Nonce = m.Nonce - k.EType = m.EType - k.Addresses = m.Addresses - k.EncAuthData = m.EncAuthData - if len(m.AdditionalTickets.Bytes) > 0 { - k.AdditionalTickets, err = UnmarshalTicketsSequence(m.AdditionalTickets) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling additional tickets") - } - } - return nil -} - -// Marshal ASReq struct. -func (k *ASReq) Marshal() ([]byte, error) { - m := marshalKDCReq{ - PVNO: k.PVNO, - MsgType: k.MsgType, - PAData: k.PAData, - } - b, err := k.ReqBody.Marshal() - if err != nil { - var mk []byte - return mk, err - } - m.ReqBody = asn1.RawValue{ - Class: asn1.ClassContextSpecific, - IsCompound: true, - Tag: 4, - Bytes: b, - } - mk, err := asn1.Marshal(m) - if err != nil { - return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") - } - mk = asn1tools.AddASNAppTag(mk, asnAppTag.ASREQ) - return mk, nil -} - -// Marshal TGSReq struct. -func (k *TGSReq) Marshal() ([]byte, error) { - m := marshalKDCReq{ - PVNO: k.PVNO, - MsgType: k.MsgType, - PAData: k.PAData, - } - b, err := k.ReqBody.Marshal() - if err != nil { - var mk []byte - return mk, err - } - m.ReqBody = asn1.RawValue{ - Class: asn1.ClassContextSpecific, - IsCompound: true, - Tag: 4, - Bytes: b, - } - mk, err := asn1.Marshal(m) - if err != nil { - return mk, krberror.Errorf(err, krberror.EncodingError, "error marshaling AS_REQ") - } - mk = asn1tools.AddASNAppTag(mk, asnAppTag.TGSREQ) - return mk, nil -} - -// Marshal KRB_KDC_REQ body struct. -func (k *KDCReqBody) Marshal() ([]byte, error) { - var b []byte - m := marshalKDCReqBody{ - KDCOptions: k.KDCOptions, - CName: k.CName, - Realm: k.Realm, - SName: k.SName, - From: k.From, - Till: k.Till, - RTime: k.RTime, - Nonce: k.Nonce, - EType: k.EType, - Addresses: k.Addresses, - EncAuthData: k.EncAuthData, - } - rawtkts, err := MarshalTicketSequence(k.AdditionalTickets) - if err != nil { - return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body additional tickets") - } - //The asn1.rawValue needs the tag setting on it for where it is in the KDCReqBody - rawtkts.Tag = 11 - if len(rawtkts.Bytes) > 0 { - m.AdditionalTickets = rawtkts - } - b, err = asn1.Marshal(m) - if err != nil { - return b, krberror.Errorf(err, krberror.EncodingError, "error in marshaling KDC request body") - } - return b, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBCred.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBCred.go deleted file mode 100644 index 5724e1d9e92..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBCred.go +++ /dev/null @@ -1,102 +0,0 @@ -package messages - -import ( - "fmt" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -type marshalKRBCred struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - Tickets asn1.RawValue `asn1:"explicit,tag:2"` - EncPart types.EncryptedData `asn1:"explicit,tag:3"` -} - -// KRBCred implements RFC 4120 KRB_CRED: https://tools.ietf.org/html/rfc4120#section-5.8.1. -type KRBCred struct { - PVNO int - MsgType int - Tickets []Ticket - EncPart types.EncryptedData - DecryptedEncPart EncKrbCredPart -} - -// EncKrbCredPart is the encrypted part of KRB_CRED. -type EncKrbCredPart struct { - TicketInfo []KrbCredInfo `asn1:"explicit,tag:0"` - Nouce int `asn1:"optional,explicit,tag:1"` - Timestamp time.Time `asn1:"generalized,optional,explicit,tag:2"` - Usec int `asn1:"optional,explicit,tag:3"` - SAddress types.HostAddress `asn1:"optional,explicit,tag:4"` - RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` -} - -// KrbCredInfo is the KRB_CRED_INFO part of KRB_CRED. -type KrbCredInfo struct { - Key types.EncryptionKey `asn1:"explicit,tag:0"` - PRealm string `asn1:"generalstring,optional,explicit,tag:1"` - PName types.PrincipalName `asn1:"optional,explicit,tag:2"` - Flags asn1.BitString `asn1:"optional,explicit,tag:3"` - AuthTime time.Time `asn1:"generalized,optional,explicit,tag:4"` - StartTime time.Time `asn1:"generalized,optional,explicit,tag:5"` - EndTime time.Time `asn1:"generalized,optional,explicit,tag:6"` - RenewTill time.Time `asn1:"generalized,optional,explicit,tag:7"` - SRealm string `asn1:"optional,explicit,ia5,tag:8"` - SName types.PrincipalName `asn1:"optional,explicit,tag:9"` - CAddr types.HostAddresses `asn1:"optional,explicit,tag:10"` -} - -// Unmarshal bytes b into the KRBCred struct. -func (k *KRBCred) Unmarshal(b []byte) error { - var m marshalKRBCred - _, err := asn1.UnmarshalWithParams(b, &m, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBCred)) - if err != nil { - return processUnmarshalReplyError(b, err) - } - expectedMsgType := msgtype.KRB_CRED - if m.MsgType != expectedMsgType { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_CRED. Expected: %v; Actual: %v", expectedMsgType, m.MsgType) - } - k.PVNO = m.PVNO - k.MsgType = m.MsgType - k.EncPart = m.EncPart - if len(m.Tickets.Bytes) > 0 { - k.Tickets, err = UnmarshalTicketsSequence(m.Tickets) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling tickets within KRB_CRED") - } - } - return nil -} - -// DecryptEncPart decrypts the encrypted part of a KRB_CRED. -func (k *KRBCred) DecryptEncPart(key types.EncryptionKey) error { - b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_CRED_ENCPART) - if err != nil { - return krberror.Errorf(err, krberror.DecryptingError, "error decrypting KRB_CRED EncPart") - } - var denc EncKrbCredPart - err = denc.Unmarshal(b) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling encrypted part of KRB_CRED") - } - k.DecryptedEncPart = denc - return nil -} - -// Unmarshal bytes b into the encrypted part of KRB_CRED. -func (k *EncKrbCredPart) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbCredPart)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "error unmarshaling EncKrbCredPart") - } - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBError.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBError.go deleted file mode 100644 index a5d8cd55791..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBError.go +++ /dev/null @@ -1,83 +0,0 @@ -// Package messages implements Kerberos 5 message types and methods. -package messages - -import ( - "fmt" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/iana" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/errorcode" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// KRBError implements RFC 4120 KRB_ERROR: https://tools.ietf.org/html/rfc4120#section-5.9.1. -type KRBError struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - CTime time.Time `asn1:"generalized,optional,explicit,tag:2"` - Cusec int `asn1:"optional,explicit,tag:3"` - STime time.Time `asn1:"generalized,explicit,tag:4"` - Susec int `asn1:"explicit,tag:5"` - ErrorCode int32 `asn1:"explicit,tag:6"` - CRealm string `asn1:"generalstring,optional,explicit,tag:7"` - CName types.PrincipalName `asn1:"optional,explicit,tag:8"` - Realm string `asn1:"generalstring,explicit,tag:9"` - SName types.PrincipalName `asn1:"explicit,tag:10"` - EText string `asn1:"generalstring,optional,explicit,tag:11"` - EData []byte `asn1:"optional,explicit,tag:12"` -} - -// NewKRBError creates a new KRBError. -func NewKRBError(sname types.PrincipalName, realm string, code int32, etext string) KRBError { - t := time.Now().UTC() - return KRBError{ - PVNO: iana.PVNO, - MsgType: msgtype.KRB_ERROR, - STime: t, - Susec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), - ErrorCode: code, - SName: sname, - Realm: realm, - EText: etext, - } -} - -// Unmarshal bytes b into the KRBError struct. -func (k *KRBError) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBError)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "KRB_ERROR unmarshal error") - } - expectedMsgType := msgtype.KRB_ERROR - if k.MsgType != expectedMsgType { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_ERROR. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) - } - return nil -} - -// Error method implementing error interface on KRBError struct. -func (k KRBError) Error() string { - etxt := fmt.Sprintf("KRB Error: %s", errorcode.Lookup(k.ErrorCode)) - if k.EText != "" { - etxt = fmt.Sprintf("%s - %s", etxt, k.EText) - } - return etxt -} - -func processUnmarshalReplyError(b []byte, err error) error { - switch err.(type) { - case asn1.StructuralError: - var krberr KRBError - tmperr := krberr.Unmarshal(b) - if tmperr != nil { - return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") - } - return krberr - default: - return krberror.Errorf(err, krberror.EncodingError, "failed to unmarshal KDC's reply") - } -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBPriv.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBPriv.go deleted file mode 100644 index f342793ce53..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBPriv.go +++ /dev/null @@ -1,108 +0,0 @@ -package messages - -import ( - "fmt" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/asn1tools" - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// KRBPriv implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.7.1. -type KRBPriv struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - EncPart types.EncryptedData `asn1:"explicit,tag:3"` - DecryptedEncPart EncKrbPrivPart `asn1:"optional,omitempty"` // Not part of ASN1 bytes so marked as optional so unmarshalling works -} - -// EncKrbPrivPart is the encrypted part of KRB_PRIV. -type EncKrbPrivPart struct { - UserData []byte `asn1:"explicit,tag:0"` - Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` - Usec int `asn1:"optional,explicit,tag:2"` - SequenceNumber int64 `asn1:"optional,explicit,tag:3"` - SAddress types.HostAddress `asn1:"explicit,tag:4"` - RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` -} - -// NewKRBPriv returns a new KRBPriv type. -func NewKRBPriv(part EncKrbPrivPart) KRBPriv { - return KRBPriv{ - PVNO: iana.PVNO, - MsgType: msgtype.KRB_PRIV, - DecryptedEncPart: part, - } -} - -// Unmarshal bytes b into the KRBPriv struct. -func (k *KRBPriv) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBPriv)) - if err != nil { - return processUnmarshalReplyError(b, err) - } - expectedMsgType := msgtype.KRB_PRIV - if k.MsgType != expectedMsgType { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_PRIV. Expected: %v; Actual: %v", expectedMsgType, k.MsgType) - } - return nil -} - -// Unmarshal bytes b into the EncKrbPrivPart struct. -func (k *EncKrbPrivPart) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, k, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.EncKrbPrivPart)) - if err != nil { - return krberror.Errorf(err, krberror.EncodingError, "KRB_PRIV unmarshal error") - } - return nil -} - -// Marshal the KRBPriv. -func (k *KRBPriv) Marshal() ([]byte, error) { - tk := KRBPriv{ - PVNO: k.PVNO, - MsgType: k.MsgType, - EncPart: k.EncPart, - } - b, err := asn1.Marshal(tk) - if err != nil { - return []byte{}, err - } - b = asn1tools.AddASNAppTag(b, asnAppTag.KRBPriv) - return b, nil -} - -// EncryptEncPart encrypts the DecryptedEncPart within the KRBPriv. -// Use to prepare for marshaling. -func (k *KRBPriv) EncryptEncPart(key types.EncryptionKey) error { - b, err := asn1.Marshal(k.DecryptedEncPart) - if err != nil { - return err - } - b = asn1tools.AddASNAppTag(b, asnAppTag.EncKrbPrivPart) - k.EncPart, err = crypto.GetEncryptedData(b, key, keyusage.KRB_PRIV_ENCPART, 1) - if err != nil { - return err - } - return nil -} - -// DecryptEncPart decrypts the encrypted part of the KRBPriv message. -func (k *KRBPriv) DecryptEncPart(key types.EncryptionKey) error { - b, err := crypto.DecryptEncPart(k.EncPart, key, keyusage.KRB_PRIV_ENCPART) - if err != nil { - return fmt.Errorf("error decrypting KRBPriv EncPart: %v", err) - } - err = k.DecryptedEncPart.Unmarshal(b) - if err != nil { - return fmt.Errorf("error unmarshaling encrypted part: %v", err) - } - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBSafe.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBSafe.go deleted file mode 100644 index 1658ad0db5f..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/KRBSafe.go +++ /dev/null @@ -1,61 +0,0 @@ -package messages - -import ( - "fmt" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/msgtype" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -/* -KRB-SAFE ::= [APPLICATION 20] SEQUENCE { - pvno [0] INTEGER (5), - msg-type [1] INTEGER (20), - safe-body [2] KRB-SAFE-BODY, - cksum [3] Checksum -} - -KRB-SAFE-BODY ::= SEQUENCE { - user-data [0] OCTET STRING, - timestamp [1] KerberosTime OPTIONAL, - usec [2] Microseconds OPTIONAL, - seq-number [3] UInt32 OPTIONAL, - s-address [4] HostAddress, - r-address [5] HostAddress OPTIONAL -} -*/ - -// KRBSafe implements RFC 4120 KRB_SAFE: https://tools.ietf.org/html/rfc4120#section-5.6.1. -type KRBSafe struct { - PVNO int `asn1:"explicit,tag:0"` - MsgType int `asn1:"explicit,tag:1"` - SafeBody KRBSafeBody `asn1:"explicit,tag:2"` - Cksum types.Checksum `asn1:"explicit,tag:3"` -} - -// KRBSafeBody implements the KRB_SAFE_BODY of KRB_SAFE. -type KRBSafeBody struct { - UserData []byte `asn1:"explicit,tag:0"` - Timestamp time.Time `asn1:"generalized,optional,explicit,tag:1"` - Usec int `asn1:"optional,explicit,tag:2"` - SequenceNumber int64 `asn1:"optional,explicit,tag:3"` - SAddress types.HostAddress `asn1:"explicit,tag:4"` - RAddress types.HostAddress `asn1:"optional,explicit,tag:5"` -} - -// Unmarshal bytes b into the KRBSafe struct. -func (s *KRBSafe) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, s, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.KRBSafe)) - if err != nil { - return processUnmarshalReplyError(b, err) - } - expectedMsgType := msgtype.KRB_SAFE - if s.MsgType != expectedMsgType { - return krberror.NewErrorf(krberror.KRBMsgError, "message ID does not indicate a KRB_SAFE. Expected: %v; Actual: %v", expectedMsgType, s.MsgType) - } - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/Ticket.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/Ticket.go deleted file mode 100644 index 75e7246ba45..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/messages/Ticket.go +++ /dev/null @@ -1,251 +0,0 @@ -package messages - -import ( - "crypto/rand" - "fmt" - "strings" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/asn1tools" - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana" - "gopkg.in/jcmturner/gokrb5.v5/iana/adtype" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" - "gopkg.in/jcmturner/gokrb5.v5/iana/errorcode" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/keytab" - "gopkg.in/jcmturner/gokrb5.v5/krberror" - "gopkg.in/jcmturner/gokrb5.v5/pac" - "gopkg.in/jcmturner/gokrb5.v5/types" -) - -// Reference: https://www.ietf.org/rfc/rfc4120.txt -// Section: 5.3 - -// Ticket implements the Kerberos ticket. -type Ticket struct { - TktVNO int `asn1:"explicit,tag:0"` - Realm string `asn1:"generalstring,explicit,tag:1"` - SName types.PrincipalName `asn1:"explicit,tag:2"` - EncPart types.EncryptedData `asn1:"explicit,tag:3"` - DecryptedEncPart EncTicketPart `asn1:"optional"` // Not part of ASN1 bytes so marked as optional so unmarshalling works -} - -// EncTicketPart is the encrypted part of the Ticket. -type EncTicketPart struct { - Flags asn1.BitString `asn1:"explicit,tag:0"` - Key types.EncryptionKey `asn1:"explicit,tag:1"` - CRealm string `asn1:"generalstring,explicit,tag:2"` - CName types.PrincipalName `asn1:"explicit,tag:3"` - Transited TransitedEncoding `asn1:"explicit,tag:4"` - AuthTime time.Time `asn1:"generalized,explicit,tag:5"` - StartTime time.Time `asn1:"generalized,explicit,optional,tag:6"` - EndTime time.Time `asn1:"generalized,explicit,tag:7"` - RenewTill time.Time `asn1:"generalized,explicit,optional,tag:8"` - CAddr types.HostAddresses `asn1:"explicit,optional,tag:9"` - AuthorizationData types.AuthorizationData `asn1:"explicit,optional,tag:10"` -} - -// TransitedEncoding part of the ticket's encrypted part. -type TransitedEncoding struct { - TRType int32 `asn1:"explicit,tag:0"` - Contents []byte `asn1:"explicit,tag:1"` -} - -// NewTicket creates a new Ticket instance. -func NewTicket(cname types.PrincipalName, crealm string, sname types.PrincipalName, srealm string, flags asn1.BitString, sktab keytab.Keytab, eTypeID int32, kvno int, authTime, startTime, endTime, renewTill time.Time) (Ticket, types.EncryptionKey, error) { - etype, err := crypto.GetEtype(eTypeID) - if err != nil { - return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting etype for new ticket") - } - ks := etype.GetKeyByteSize() - kv := make([]byte, ks, ks) - rand.Read(kv) - sessionKey := types.EncryptionKey{ - KeyType: eTypeID, - KeyValue: kv, - } - etp := EncTicketPart{ - Flags: flags, - Key: sessionKey, - CRealm: crealm, - CName: cname, - Transited: TransitedEncoding{}, - AuthTime: authTime, - StartTime: startTime, - EndTime: endTime, - RenewTill: renewTill, - } - b, err := asn1.Marshal(etp) - if err != nil { - return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncodingError, "error marshalling ticket encpart") - } - b = asn1tools.AddASNAppTag(b, asnAppTag.EncTicketPart) - skey, err := sktab.GetEncryptionKey(sname.NameString, srealm, kvno, eTypeID) - if err != nil { - return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error getting encryption key for new ticket") - } - ed, err := crypto.GetEncryptedData(b, skey, keyusage.KDC_REP_TICKET, kvno) - if err != nil { - return Ticket{}, types.EncryptionKey{}, krberror.Errorf(err, krberror.EncryptingError, "error encrypting ticket encpart") - } - tkt := Ticket{ - TktVNO: iana.PVNO, - Realm: srealm, - SName: sname, - EncPart: ed, - } - return tkt, sessionKey, nil -} - -// Unmarshal bytes b into a Ticket struct. -func (t *Ticket) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.Ticket)) - return err -} - -// Marshal the Ticket. -func (t *Ticket) Marshal() ([]byte, error) { - b, err := asn1.Marshal(*t) - if err != nil { - return nil, err - } - b = asn1tools.AddASNAppTag(b, asnAppTag.Ticket) - return b, nil -} - -// Unmarshal bytes b into the EncTicketPart struct. -func (t *EncTicketPart) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.EncTicketPart)) - return err -} - -// UnmarshalTicket returns a ticket from the bytes provided. -func UnmarshalTicket(b []byte) (t Ticket, err error) { - _, err = asn1.UnmarshalWithParams(b, &t, fmt.Sprintf("application,explicit,tag:%d", asnAppTag.Ticket)) - return -} - -// UnmarshalTicketsSequence returns a slice of Tickets from a raw ASN1 value. -func UnmarshalTicketsSequence(in asn1.RawValue) ([]Ticket, error) { - //This is a workaround to a asn1 decoding issue in golang - https://github.com/golang/go/issues/17321. It's not pretty I'm afraid - //We pull out raw values from the larger raw value (that is actually the data of the sequence of raw values) and track our position moving along the data. - b := in.Bytes - // Ignore the head of the asn1 stream (1 byte for tag and those for the length) as this is what tells us its a sequence but we're handling it ourselves - p := 1 + asn1tools.GetNumberBytesInLengthHeader(in.Bytes) - var tkts []Ticket - var raw asn1.RawValue - for p < (len(b)) { - _, err := asn1.UnmarshalWithParams(b[p:], &raw, fmt.Sprintf("application,tag:%d", asnAppTag.Ticket)) - if err != nil { - return nil, fmt.Errorf("unmarshaling sequence of tickets failed geting length of ticket: %v", err) - } - t, err := UnmarshalTicket(b[p:]) - if err != nil { - return nil, fmt.Errorf("unmarshaling sequence of tickets failed: %v", err) - } - p += len(raw.FullBytes) - tkts = append(tkts, t) - } - MarshalTicketSequence(tkts) - return tkts, nil -} - -// MarshalTicketSequence marshals a slice of Tickets returning an ASN1 raw value containing the ticket sequence. -func MarshalTicketSequence(tkts []Ticket) (asn1.RawValue, error) { - raw := asn1.RawValue{ - Class: 2, - IsCompound: true, - } - if len(tkts) < 1 { - // There are no tickets to marshal - return raw, nil - } - var btkts []byte - for i, t := range tkts { - b, err := t.Marshal() - if err != nil { - return raw, fmt.Errorf("error marshaling ticket number %d in seqence of tickets", i+1) - } - btkts = append(btkts, b...) - } - // The ASN1 wrapping consists of 2 bytes: - // 1st byte -> Identifier Octet - In this case an OCTET STRING (ASN TAG - // 2nd byte -> The length (this will be the size indicated in the input bytes + 2 for the additional bytes we add here. - // Application Tag: - //| Byte: | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | - //| Value: | 0 | 1 | 1 | From the RFC spec 4120 | - //| Explanation | Defined by the ASN1 encoding rules for an application tag | A value of 1 indicates a constructed type | The ASN Application tag value | - btkts = append(asn1tools.MarshalLengthBytes(len(btkts)), btkts...) - btkts = append([]byte{byte(32 + asn1.TagSequence)}, btkts...) - raw.Bytes = btkts - // If we need to create the full bytes then identifier octet is "context-specific" = 128 + "constructed" + 32 + the wrapping explicit tag (11) - //fmt.Fprintf(os.Stderr, "mRaw fb: %v\n", raw.FullBytes) - return raw, nil -} - -// DecryptEncPart decrypts the encrypted part of the ticket. -func (t *Ticket) DecryptEncPart(keytab keytab.Keytab, ktprinc string) error { - var upn types.PrincipalName - realm := t.Realm - if ktprinc != "" { - var r string - upn, r = types.ParseSPNString(ktprinc) - if r != "" { - realm = r - } - } else { - upn = t.SName - } - key, err := keytab.GetEncryptionKey(upn.NameString, realm, t.EncPart.KVNO, t.EncPart.EType) - if err != nil { - return NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) - } - b, err := crypto.DecryptEncPart(t.EncPart, key, keyusage.KDC_REP_TICKET) - if err != nil { - return fmt.Errorf("error decrypting Ticket EncPart: %v", err) - } - var denc EncTicketPart - err = denc.Unmarshal(b) - if err != nil { - return fmt.Errorf("error unmarshaling encrypted part: %v", err) - } - t.DecryptedEncPart = denc - return nil -} - -// GetPACType returns a Microsoft PAC that has been extracted from the ticket and processed. -func (t *Ticket) GetPACType(keytab keytab.Keytab, ktprinc string) (bool, pac.PACType, error) { - var isPAC bool - for _, ad := range t.DecryptedEncPart.AuthorizationData { - if ad.ADType == adtype.ADIfRelevant { - var ad2 types.AuthorizationData - err := ad2.Unmarshal(ad.ADData) - if err != nil { - continue - } - if ad2[0].ADType == adtype.ADWin2KPAC { - isPAC = true - var p pac.PACType - err = p.Unmarshal(ad2[0].ADData) - if err != nil { - return isPAC, p, fmt.Errorf("error unmarshaling PAC: %v", err) - } - var upn []string - if ktprinc != "" { - upn = strings.Split(ktprinc, "/") - } else { - upn = t.SName.NameString - } - key, err := keytab.GetEncryptionKey(upn, t.Realm, t.EncPart.KVNO, t.EncPart.EType) - if err != nil { - return isPAC, p, NewKRBError(t.SName, t.Realm, errorcode.KRB_AP_ERR_NOKEY, fmt.Sprintf("Could not get key from keytab: %v", err)) - } - err = p.ProcessPACInfoBuffers(key) - return isPAC, p, err - } - } - } - return isPAC, pac.PACType{}, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/claims.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/claims.go deleted file mode 100644 index 54f5cc750e0..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/claims.go +++ /dev/null @@ -1,312 +0,0 @@ -package mstypes - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// Compression format assigned numbers. -const ( - CompressionFormatNone uint16 = 0 - CompressionFormatLZNT1 uint16 = 2 - CompressionFormatXPress uint16 = 3 - CompressionFormatXPressHuff uint16 = 4 -) - -// ClaimsSourceType -const ClaimsSourceTypeAD uint16 = 1 - -// Claim Type assigned numbers -const ( - ClaimTypeIDInt64 uint16 = 1 - ClaimTypeIDUInt64 uint16 = 2 - ClaimTypeIDString uint16 = 3 - ClaimsTypeIDBoolean uint16 = 6 -) - -// ClaimsBlob implements https://msdn.microsoft.com/en-us/library/hh554119.aspx -type ClaimsBlob struct { - Size uint32 - EncodedBlob []byte -} - -// ReadClaimsBlob reads a ClaimsBlob from the byte slice. -func ReadClaimsBlob(b *[]byte, p *int, e *binary.ByteOrder) (c ClaimsBlob) { - c.Size = ndr.ReadUint32(b, p, e) - c.EncodedBlob = ndr.ReadBytes(b, p, int(c.Size), e) - return -} - -// ClaimsSetMetadata implements https://msdn.microsoft.com/en-us/library/hh554073.aspx -type ClaimsSetMetadata struct { - claimsSetSize uint32 - ClaimsSet ClaimsSet - CompressionFormat uint16 // Enum see constants for options - uncompressedClaimsSetSize uint32 - ReservedType uint16 - reservedFieldSize uint32 - ReservedField []byte -} - -// ClaimSet implements https://msdn.microsoft.com/en-us/library/hh554122.aspx -type ClaimsSet struct { - ClaimsArrayCount uint32 - ClaimsArrays []ClaimsArray - ReservedType uint16 - reservedFieldSize uint32 - ReservedField []byte -} - -// ClaimsArray implements https://msdn.microsoft.com/en-us/library/hh536458.aspx -type ClaimsArray struct { - ClaimsSourceType uint16 - ClaimsCount uint32 - ClaimsEntries []ClaimEntry -} - -// ClaimEntry implements https://msdn.microsoft.com/en-us/library/hh536374.aspx -type ClaimEntry struct { - ID string //utf16string - Type uint16 // enums are 16 bit https://msdn.microsoft.com/en-us/library/windows/desktop/aa366818(v=vs.85).aspx - TypeInt64 ClaimTypeInt64 - TypeUInt64 ClaimTypeUInt64 - TypeString ClaimTypeString - TypeBool ClaimTypeBoolean -} - -// ClaimTypeInt64 is a claim of type int64 -type ClaimTypeInt64 struct { - ValueCount uint32 - Value []int64 -} - -// ClaimTypeUInt64 is a claim of type uint64 -type ClaimTypeUInt64 struct { - ValueCount uint32 - Value []uint64 -} - -// ClaimTypeString is a claim of type string -type ClaimTypeString struct { - ValueCount uint32 - Value []string -} - -// ClaimTypeBoolean is a claim of type bool -type ClaimTypeBoolean struct { - ValueCount uint32 - Value []bool -} - -// ReadClaimsSetMetadata reads a ClaimsSetMetadata from the bytes slice. -func ReadClaimsSetMetadata(b *[]byte, p *int, e *binary.ByteOrder) (c ClaimsSetMetadata, err error) { - c.claimsSetSize = ndr.ReadUint32(b, p, e) - *p += 4 //Move over pointer to ClaimSet array - c.CompressionFormat = ndr.ReadUint16(b, p, e) - // TODO Currently compression is not supported so if it is compressed we just have to return. - if c.CompressionFormat != CompressionFormatNone { - *p = len(*b) - return - } - c.uncompressedClaimsSetSize = ndr.ReadUint32(b, p, e) - c.ReservedType = ndr.ReadUint16(b, p, e) - c.reservedFieldSize = ndr.ReadUint32(b, p, e) - *p += 4 //Move over pointer to ReservedField array - var ah ndr.ConformantArrayHeader - if c.claimsSetSize > 0 { - // ClaimsSet is a conformant array https://msdn.microsoft.com/en-us/library/windows/desktop/aa373603(v=vs.85).aspx - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(b, p, e) - if err != nil { - return - } - if ah.MaxCount != int(c.claimsSetSize) { - err = errors.New("error with size of CLAIMS_SET array") - return - } - csb := ndr.ReadBytes(b, p, int(c.claimsSetSize), e) - //TODO put decompression here - c.ClaimsSet, err = ReadClaimsSet(csb) - if err != nil { - return - } - } - if c.reservedFieldSize > 0 { - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(b, p, e) - if err != nil { - return - } - if ah.MaxCount != int(c.reservedFieldSize) { - err = errors.New("error with size of CLAIMS_SET_METADATA's reserved field array") - return - } - c.ReservedField = ndr.ReadBytes(b, p, int(c.reservedFieldSize), e) - } - return -} - -// ReadClaimsSet reads a ClaimsSet from the bytes slice. -func ReadClaimsSet(b []byte) (c ClaimsSet, err error) { - ch, _, p, err := ndr.ReadHeaders(&b) - if err != nil { - err = fmt.Errorf("error parsing NDR byte stream headers of CLAIMS_SET: %v", err) - return - } - e := &ch.Endianness - //The next 4 bytes are an RPC unique pointer referent. We just skip these - p += 4 - - c.ClaimsArrayCount = ndr.ReadUint32(&b, &p, e) - p += 4 //Move over pointer to claims array - c.ReservedType = ndr.ReadUint16(&b, &p, e) - c.reservedFieldSize = ndr.ReadUint32(&b, &p, e) - p += 4 //Move over pointer to ReservedField array - - var ah ndr.ConformantArrayHeader - if c.ClaimsArrayCount > 0 { - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(&b, &p, e) - if err != nil { - return - } - if ah.MaxCount != int(c.ClaimsArrayCount) { - err = errors.New("error with size of CLAIMS_SET's claims array") - return - } - c.ClaimsArrays = make([]ClaimsArray, c.ClaimsArrayCount, c.ClaimsArrayCount) - for i := range c.ClaimsArrays { - c.ClaimsArrays[i], err = ReadClaimsArray(&b, &p, e) - if err != nil { - return - } - } - } - if c.reservedFieldSize > 0 { - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(&b, &p, e) - if err != nil { - return - } - if ah.MaxCount != int(c.reservedFieldSize) { - err = errors.New("error with size of CLAIMS_SET's reserved field array") - return - } - c.ReservedField = ndr.ReadBytes(&b, &p, int(c.reservedFieldSize), e) - } - return c, nil -} - -// ReadClaimsArray reads a ClaimsArray from the bytes slice. -func ReadClaimsArray(b *[]byte, p *int, e *binary.ByteOrder) (c ClaimsArray, err error) { - c.ClaimsSourceType = ndr.ReadUint16(b, p, e) - c.ClaimsCount = ndr.ReadUint32(b, p, e) - *p += 4 //Move over pointer to claims array - ah, err := ndr.ReadUniDimensionalConformantArrayHeader(b, p, e) - if err != nil { - return - } - if ah.MaxCount != int(c.ClaimsCount) { - err = errors.New("error with size of CLAIMS_ARRAY's claims entries") - return - } - c.ClaimsEntries = make([]ClaimEntry, c.ClaimsCount, c.ClaimsCount) - for i := range c.ClaimsEntries { - var vc uint32 - c.ClaimsEntries[i].Type, vc, err = ReadClaimEntriesUnionHeaders(b, p, e) - if err != nil { - return - } - switch c.ClaimsEntries[i].Type { - case ClaimTypeIDInt64: - c.ClaimsEntries[i].TypeInt64.ValueCount = vc - case ClaimTypeIDUInt64: - c.ClaimsEntries[i].TypeUInt64.ValueCount = vc - case ClaimTypeIDString: - c.ClaimsEntries[i].TypeString.ValueCount = vc - case ClaimsTypeIDBoolean: - c.ClaimsEntries[i].TypeBool.ValueCount = vc - } - } - for i := range c.ClaimsEntries { - err = FillClaimEntry(b, p, e, &c.ClaimsEntries[i]) - if err != nil { - return - } - } - return -} - -func ReadClaimEntriesUnionHeaders(b *[]byte, p *int, e *binary.ByteOrder) (uint16, uint32, error) { - *p += 4 - // This is an NDR union: http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_39 - // The discriminant [tag] is marshalled into the transmitted data stream twice: - // once as the field or parameter in the procedure argument list and - // once as the first part of the union representation [value] - t1 := ndr.ReadUint16(b, p, e) - t2 := ndr.ReadUint16(b, p, e) - if t1 != t2 { - return 0, 0, ndr.Malformed{EText: "malformed NDR encoding of CLAIM_ENTRY union"} - } - vc := ndr.ReadUint32(b, p, e) - *p += 4 //Move over pointer to array of values - return t1, vc, nil -} - -// FillClaimEntry reads a ClaimEntry from the bytes slice. -func FillClaimEntry(b *[]byte, p *int, e *binary.ByteOrder, c *ClaimEntry) (err error) { - c.ID, err = ndr.ReadConformantVaryingString(b, p, e) - if err != nil { - return - } - ah, err := ndr.ReadUniDimensionalConformantArrayHeader(b, p, e) - if err != nil { - return - } - switch c.Type { - case ClaimTypeIDInt64: - if ah.MaxCount != int(c.TypeInt64.ValueCount) { - return errors.New("error with size of CLAIM_ENTRY's value") - } - c.TypeInt64.Value = make([]int64, c.TypeInt64.ValueCount, c.TypeInt64.ValueCount) - for i := range c.TypeInt64.Value { - buf := bytes.NewReader((*b)[*p : *p+8]) - err = binary.Read(buf, *e, &c.TypeInt64.Value[i]) - if err != nil { - return - } - *p += 8 // progress position for a uint64 - } - case ClaimTypeIDUInt64: - if ah.MaxCount != int(c.TypeUInt64.ValueCount) { - return errors.New("error with size of CLAIM_ENTRY's value") - } - c.TypeUInt64.Value = make([]uint64, c.TypeUInt64.ValueCount, c.TypeUInt64.ValueCount) - for i := range c.TypeUInt64.Value { - c.TypeUInt64.Value[i] = ndr.ReadUint64(b, p, e) - } - case ClaimTypeIDString: - if ah.MaxCount != int(c.TypeString.ValueCount) { - return errors.New("error with size of CLAIM_ENTRY's value") - } - c.TypeString.Value = make([]string, c.TypeString.ValueCount, c.TypeString.ValueCount) - *p += 4 * (int(c.TypeString.ValueCount)) // Move over pointers - for i := range c.TypeString.Value { - c.TypeString.Value[i], err = ndr.ReadConformantVaryingString(b, p, e) - if err != nil { - return - } - } - case ClaimsTypeIDBoolean: - if ah.MaxCount != int(c.TypeBool.ValueCount) { - return errors.New("error with size of CLAIM_ENTRY's value") - } - c.TypeBool.Value = make([]bool, c.TypeBool.ValueCount, c.TypeBool.ValueCount) - for i := range c.TypeBool.Value { - if ndr.ReadUint64(b, p, e) != 0 { - c.TypeBool.Value[i] = true - } - } - } - return -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/filetime.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/filetime.go deleted file mode 100644 index 417e4dcf14a..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/filetime.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package mstypes implements representations of Microsoft types for PAC processing. -package mstypes - -import ( - "encoding/binary" - "time" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -/* -FILETIME is a windows data structure. -Ref: https://msdn.microsoft.com/en-us/library/windows/desktop/ms724284%28v=vs.85%29.aspx -It contains two parts that are 32bit integers: - dwLowDateTime - dwHighDateTime -We need to combine these two into one 64bit integer. -This gives the number of 100 nano second period from January 1, 1601, Coordinated Universal Time (UTC) -*/ - -const unixEpochDiff = 116444736000000000 - -// FileTime implements the Microsoft FILETIME type https://msdn.microsoft.com/en-us/library/cc230324.aspx -type FileTime struct { - LowDateTime uint32 - HighDateTime uint32 -} - -// Time return a golang Time type from the FileTime -func (ft FileTime) Time() time.Time { - ns := (ft.MSEpoch() - unixEpochDiff) * 100 - return time.Unix(0, int64(ns)).UTC() -} - -// MSEpoch returns the FileTime as a Microsoft epoch, the number of 100 nano second periods elapsed from January 1, 1601 UTC. -func (ft FileTime) MSEpoch() int64 { - return (int64(ft.HighDateTime) << 32) + int64(ft.LowDateTime) -} - -// Unix returns the FileTime as a Unix time, the number of seconds elapsed since January 1, 1970 UTC. -func (ft FileTime) Unix() int64 { - return (ft.MSEpoch() - unixEpochDiff) / 10000000 -} - -// GetFileTime returns a FileTime type from the provided Golang Time type. -func GetFileTime(t time.Time) FileTime { - ns := t.UnixNano() - fp := (ns / 100) + unixEpochDiff - hd := fp >> 32 - ld := fp - (hd << 32) - return FileTime{ - LowDateTime: uint32(ld), - HighDateTime: uint32(hd), - } -} - -// ReadFileTime reads a FileTime from the bytes slice. -func ReadFileTime(b *[]byte, p *int, e *binary.ByteOrder) FileTime { - l := ndr.ReadUint32(b, p, e) - h := ndr.ReadUint32(b, p, e) - return FileTime{ - LowDateTime: l, - HighDateTime: h, - } -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/group_membership.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/group_membership.go deleted file mode 100644 index 984160f6d9d..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/group_membership.go +++ /dev/null @@ -1,53 +0,0 @@ -package mstypes - -import ( - "encoding/binary" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// GroupMembership implements https://msdn.microsoft.com/en-us/library/cc237945.aspx -// RelativeID : A 32-bit unsigned integer that contains the RID of a particular group. -// The possible values for the Attributes flags are identical to those specified in KERB_SID_AND_ATTRIBUTES -type GroupMembership struct { - RelativeID uint32 - Attributes uint32 -} - -// ReadGroupMembership reads a GroupMembership from the bytes slice. -func ReadGroupMembership(b *[]byte, p *int, e *binary.ByteOrder) GroupMembership { - r := ndr.ReadUint32(b, p, e) - a := ndr.ReadUint32(b, p, e) - return GroupMembership{ - RelativeID: r, - Attributes: a, - } -} - -// DomainGroupMembership implements https://msdn.microsoft.com/en-us/library/hh536344.aspx -// DomainId: A SID structure that contains the SID for the domain.This member is used in conjunction with the GroupIds members to create group SIDs for the device. -// GroupCount: A 32-bit unsigned integer that contains the number of groups within the domain to which the account belongs. -// GroupIds: A pointer to a list of GROUP_MEMBERSHIP structures that contain the groups to which the account belongs in the domain. The number of groups in this list MUST be equal to GroupCount. -type DomainGroupMembership struct { - DomainID RPCSID - GroupCount uint32 - GroupIDs []GroupMembership // Size is value of GroupCount -} - -// ReadDomainGroupMembership reads a DomainGroupMembership from the bytes slice. -func ReadDomainGroupMembership(b *[]byte, p *int, e *binary.ByteOrder) (DomainGroupMembership, error) { - d, err := ReadRPCSID(b, p, e) - if err != nil { - return DomainGroupMembership{}, err - } - c := ndr.ReadUint32(b, p, e) - g := make([]GroupMembership, c, c) - for i := range g { - g[i] = ReadGroupMembership(b, p, e) - } - return DomainGroupMembership{ - DomainID: d, - GroupCount: c, - GroupIDs: g, - }, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/kerb_sid_and_attributes.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/kerb_sid_and_attributes.go deleted file mode 100644 index fee509be0bf..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/kerb_sid_and_attributes.go +++ /dev/null @@ -1,42 +0,0 @@ -package mstypes - -import ( - "encoding/binary" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// Attributes of a security group membership and can be combined by using the bitwise OR operation. -// They are used by an access check mechanism to specify whether the membership is to be used in an access check decision. -const ( - SEGroupMandatory = 31 - SEGroupEnabledByDefault = 30 - SEGroupEnabled = 29 - SEGroupOwner = 28 - SEGroupResource = 2 - //All other bits MUST be set to zero and MUST be ignored on receipt. -) - -// KerbSidAndAttributes implements https://msdn.microsoft.com/en-us/library/cc237947.aspx -type KerbSidAndAttributes struct { - SID RPCSID // A pointer to an RPC_SID structure. - Attributes uint32 -} - -// ReadKerbSidAndAttributes reads a KerbSidAndAttribute from the bytes slice. -func ReadKerbSidAndAttributes(b *[]byte, p *int, e *binary.ByteOrder) (KerbSidAndAttributes, error) { - s, err := ReadRPCSID(b, p, e) - if err != nil { - return KerbSidAndAttributes{}, err - } - a := ndr.ReadUint32(b, p, e) - return KerbSidAndAttributes{ - SID: s, - Attributes: a, - }, nil -} - -// SetFlag sets a flag in a uint32 attribute value. -func SetFlag(a *uint32, i uint) { - *a = *a | (1 << (31 - i)) -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/rpc_unicode_string.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/rpc_unicode_string.go deleted file mode 100644 index 0981b974a5f..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/rpc_unicode_string.go +++ /dev/null @@ -1,36 +0,0 @@ -package mstypes - -import ( - "encoding/binary" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// RPCUnicodeString implements https://msdn.microsoft.com/en-us/library/cc230365.aspx -type RPCUnicodeString struct { - Length uint16 // The length, in bytes, of the string pointed to by the Buffer member, not including the terminating null character if any. The length MUST be a multiple of 2. The length SHOULD equal the entire size of the Buffer, in which case there is no terminating null character. Any method that accesses this structure MUST use the Length specified instead of relying on the presence or absence of a null character. - MaximumLength uint16 // The maximum size, in bytes, of the string pointed to by Buffer. The size MUST be a multiple of 2. If not, the size MUST be decremented by 1 prior to use. This value MUST not be less than Length. - BufferPrt uint32 // A pointer to a string buffer. If MaximumLength is greater than zero, the buffer MUST contain a non-null value. - Value string -} - -// ReadRPCUnicodeString reads a RPCUnicodeString from the bytes slice. -func ReadRPCUnicodeString(b *[]byte, p *int, e *binary.ByteOrder) (RPCUnicodeString, error) { - l := ndr.ReadUint16(b, p, e) - ml := ndr.ReadUint16(b, p, e) - if ml < l || l%2 != 0 || ml%2 != 0 { - return RPCUnicodeString{}, ndr.Malformed{EText: "Invalid data for RPC_UNICODE_STRING"} - } - ptr := ndr.ReadUint32(b, p, e) - return RPCUnicodeString{ - Length: l, - MaximumLength: ml, - BufferPrt: ptr, - }, nil -} - -// UnmarshalString populates a golang string into the RPCUnicodeString struct. -func (s *RPCUnicodeString) UnmarshalString(b *[]byte, p *int, e *binary.ByteOrder) (err error) { - s.Value, err = ndr.ReadConformantVaryingString(b, p, e) - return -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/sid.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/sid.go deleted file mode 100644 index 626d0b13409..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/sid.go +++ /dev/null @@ -1,70 +0,0 @@ -package mstypes - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// RPCSID implements https://msdn.microsoft.com/en-us/library/cc230364.aspx -type RPCSID struct { - Revision uint8 // An 8-bit unsigned integer that specifies the revision level of the SID. This value MUST be set to 0x01. - SubAuthorityCount uint8 // An 8-bit unsigned integer that specifies the number of elements in the SubAuthority array. The maximum number of elements allowed is 15. - IdentifierAuthority RPCSIDIdentifierAuthority // An RPC_SID_IDENTIFIER_AUTHORITY structure that indicates the authority under which the SID was created. It describes the entity that created the SID. The Identifier Authority value {0,0,0,0,0,5} denotes SIDs created by the NT SID authority. - SubAuthority []uint32 // A variable length array of unsigned 32-bit integers that uniquely identifies a principal relative to the IdentifierAuthority. Its length is determined by SubAuthorityCount. -} - -// RPCSIDIdentifierAuthority implements https://msdn.microsoft.com/en-us/library/cc230372.aspx -type RPCSIDIdentifierAuthority struct { - Value []byte // 6 bytes -} - -// ReadRPCSID reads a RPC_SID from the bytes slice. -func ReadRPCSID(b *[]byte, p *int, e *binary.ByteOrder) (RPCSID, error) { - size := int(ndr.ReadUint32(b, p, e)) // This is part of the NDR encoding rather than the data type. - r := ndr.ReadUint8(b, p) - if r != uint8(1) { - return RPCSID{}, ndr.Malformed{EText: fmt.Sprintf("SID revision value read as %d when it must be 1", r)} - } - c := ndr.ReadUint8(b, p) - a := ReadRPCSIDIdentifierAuthority(b, p, e) - s := make([]uint32, c, c) - if size != len(s) { - return RPCSID{}, ndr.Malformed{EText: fmt.Sprintf("Number of elements (%d) within SID in the byte stream does not equal the SubAuthorityCount (%d)", size, c)} - } - for i := 0; i < len(s); i++ { - s[i] = ndr.ReadUint32(b, p, e) - } - return RPCSID{ - Revision: r, - SubAuthorityCount: c, - IdentifierAuthority: a, - SubAuthority: s, - }, nil -} - -// ReadRPCSIDIdentifierAuthority reads a RPC_SIDIdentifierAuthority from the bytes slice. -func ReadRPCSIDIdentifierAuthority(b *[]byte, p *int, e *binary.ByteOrder) RPCSIDIdentifierAuthority { - return RPCSIDIdentifierAuthority{ - Value: ndr.ReadBytes(b, p, 6, e), - } -} - -// ToString returns the string representation of the RPC_SID. -func (s *RPCSID) ToString() string { - var str string - b := append(make([]byte, 2, 2), s.IdentifierAuthority.Value...) - // For a strange reason this is read big endian: https://msdn.microsoft.com/en-us/library/dd302645.aspx - i := binary.BigEndian.Uint64(b) - if i >= 4294967296 { - str = fmt.Sprintf("S-1-0x%s", hex.EncodeToString(s.IdentifierAuthority.Value)) - } else { - str = fmt.Sprintf("S-1-%d", i) - } - for _, sub := range s.SubAuthority { - str = fmt.Sprintf("%s-%d", str, sub) - } - return str -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/user_session_key.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/user_session_key.go deleted file mode 100644 index 7ae4f7d89a4..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/mstypes/user_session_key.go +++ /dev/null @@ -1,30 +0,0 @@ -package mstypes - -import ( - "encoding/binary" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// CypherBlock implements https://msdn.microsoft.com/en-us/library/cc237040.aspx -type CypherBlock struct { - Data []byte // size = 8 -} - -// UserSessionKey implements https://msdn.microsoft.com/en-us/library/cc237080.aspx -type UserSessionKey struct { - Data []CypherBlock // size = 2 -} - -// ReadUserSessionKey reads a UserSessionKey from the bytes slice. -func ReadUserSessionKey(b *[]byte, p *int, e *binary.ByteOrder) UserSessionKey { - cb1 := CypherBlock{ - Data: ndr.ReadBytes(b, p, 8, e), - } - cb2 := CypherBlock{ - Data: ndr.ReadBytes(b, p, 8, e), - } - return UserSessionKey{ - Data: []CypherBlock{cb1, cb2}, - } -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_claims.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_claims.go deleted file mode 100644 index a66a91e7609..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_claims.go +++ /dev/null @@ -1,41 +0,0 @@ -package pac - -import ( - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/mstypes" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// Claims reference: https://msdn.microsoft.com/en-us/library/hh553895.aspx - -// ClientClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh536365.aspx -type ClientClaimsInfo struct { - Claims mstypes.ClaimsSetMetadata -} - -// Unmarshal bytes into the ClientClaimsInfo struct -func (k *ClientClaimsInfo) Unmarshal(b []byte) error { - ch, _, p, err := ndr.ReadHeaders(&b) - if err != nil { - return fmt.Errorf("error parsing byte stream headers of CLIENT_CLAIMS_INFO: %v", err) - } - e := &ch.Endianness - //The next 4 bytes are an RPC unique pointer referent. We just skip these - p += 4 - - k.Claims, err = mstypes.ReadClaimsSetMetadata(&b, &p, e) - if err != nil { - return err - } - - //Check that there is only zero padding left - if len(b) >= p { - for _, v := range b[p:] { - if v != 0 { - return ndr.Malformed{EText: "non-zero padding left over at end of data stream"} - } - } - } - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_info.go deleted file mode 100644 index a0c2468e849..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/client_info.go +++ /dev/null @@ -1,40 +0,0 @@ -package pac - -import ( - "encoding/binary" - - "gopkg.in/jcmturner/gokrb5.v5/mstypes" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// ClientInfo implements https://msdn.microsoft.com/en-us/library/cc237951.aspx -type ClientInfo struct { - ClientID mstypes.FileTime // A FILETIME structure in little-endian format that contains the Kerberos initial ticket-granting ticket TGT authentication time - NameLength uint16 // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the Name field. - Name string // An array of 16-bit Unicode characters in little-endian format that contains the client's account name. -} - -// Unmarshal bytes into the ClientInfo struct -func (k *ClientInfo) Unmarshal(b []byte) error { - //The PAC_CLIENT_INFO structure is a simple structure that is not NDR-encoded. - var p int - var e binary.ByteOrder = binary.LittleEndian - - k.ClientID = mstypes.ReadFileTime(&b, &p, &e) - k.NameLength = ndr.ReadUint16(&b, &p, &e) - if len(b[p:]) < int(k.NameLength) { - return ndr.Malformed{EText: "PAC ClientInfo length truncated"} - } - k.Name = ndr.ReadUTF16String(int(k.NameLength), &b, &p, &e) - - //Check that there is only zero padding left - if len(b) >= p { - for _, v := range b[p:] { - if v != 0 { - return ndr.Malformed{EText: "non-zero padding left over at end of data stream"} - } - } - } - - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/credentials_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/credentials_info.go deleted file mode 100644 index 4a3c516e715..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/credentials_info.go +++ /dev/null @@ -1,131 +0,0 @@ -package pac - -import ( - "encoding/binary" - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/mstypes" - "gopkg.in/jcmturner/gokrb5.v5/types" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// https://msdn.microsoft.com/en-us/library/cc237931.aspx - -// CredentialsInfo implements https://msdn.microsoft.com/en-us/library/cc237953.aspx -type CredentialsInfo struct { - Version uint32 // A 32-bit unsigned integer in little-endian format that defines the version. MUST be 0x00000000. - EType uint32 - PACCredentialDataEncrypted []byte // Key usage number for encryption: KERB_NON_KERB_SALT (16) - PACCredentialData CredentialData -} - -// Unmarshal bytes into the CredentialsInfo struct -func (c *CredentialsInfo) Unmarshal(b []byte, k types.EncryptionKey) error { - //The CredentialsInfo structure is a simple structure that is not NDR-encoded. - var p int - var e binary.ByteOrder = binary.LittleEndian - - c.Version = ndr.ReadUint32(&b, &p, &e) - if c.Version != 0 { - return errors.New("credentials info version is not zero") - } - c.EType = ndr.ReadUint32(&b, &p, &e) - c.PACCredentialDataEncrypted = ndr.ReadBytes(&b, &p, len(b)-p, &e) - - err := c.DecryptEncPart(k, &e) - if err != nil { - return fmt.Errorf("error decrypting PAC Credentials Data: %v", err) - } - return nil -} - -// DecryptEncPart decrypts the encrypted part of the CredentialsInfo. -func (c *CredentialsInfo) DecryptEncPart(k types.EncryptionKey, e *binary.ByteOrder) error { - if k.KeyType != int32(c.EType) { - return fmt.Errorf("key provided is not the correct type. Type needed: %d, type provided: %d", c.EType, k.KeyType) - } - pt, err := crypto.DecryptMessage(c.PACCredentialDataEncrypted, k, keyusage.KERB_NON_KERB_SALT) - if err != nil { - return err - } - var p int - c.PACCredentialData = ReadPACCredentialData(&pt, &p, e) - return nil -} - -// CredentialData implements https://msdn.microsoft.com/en-us/library/cc237952.aspx -// This structure is encrypted prior to being encoded in any other structures. -// Encryption is performed by first serializing the data structure via Network Data Representation (NDR) encoding, as specified in [MS-RPCE]. -// Once serialized, the data is encrypted using the key and cryptographic system selected through the AS protocol and the KRB_AS_REP message -// Fields (for capturing this information) and cryptographic parameters are specified in PAC_CREDENTIAL_INFO (section 2.6.1). -type CredentialData struct { - CredentialCount uint32 - Credentials []SECPKGSupplementalCred // Size is the value of CredentialCount -} - -// ReadPACCredentialData reads a CredentialData from the byte slice. -func ReadPACCredentialData(b *[]byte, p *int, e *binary.ByteOrder) CredentialData { - c := ndr.ReadUint32(b, p, e) - cr := make([]SECPKGSupplementalCred, c, c) - for i := range cr { - cr[i] = ReadSECPKGSupplementalCred(b, p, e) - } - return CredentialData{ - CredentialCount: c, - Credentials: cr, - } -} - -// SECPKGSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237956.aspx -type SECPKGSupplementalCred struct { - PackageName mstypes.RPCUnicodeString - CredentialSize uint32 - Credentials []uint8 // Is a ptr. Size is the value of CredentialSize -} - -// ReadSECPKGSupplementalCred reads a SECPKGSupplementalCred from the byte slice. -func ReadSECPKGSupplementalCred(b *[]byte, p *int, e *binary.ByteOrder) SECPKGSupplementalCred { - n, _ := mstypes.ReadRPCUnicodeString(b, p, e) - cs := ndr.ReadUint32(b, p, e) - c := make([]uint8, cs, cs) - for i := range c { - c[i] = ndr.ReadUint8(b, p) - } - return SECPKGSupplementalCred{ - PackageName: n, - CredentialSize: cs, - Credentials: c, - } -} - -// NTLMSupplementalCred implements https://msdn.microsoft.com/en-us/library/cc237949.aspx -type NTLMSupplementalCred struct { - Version uint32 // A 32-bit unsigned integer that defines the credential version.This field MUST be 0x00000000. - Flags uint32 - LMPassword []byte // A 16-element array of unsigned 8-bit integers that define the LM OWF. The LmPassword member MUST be ignored if the L flag is not set in the Flags member. - NTPassword []byte // A 16-element array of unsigned 8-bit integers that define the NT OWF. The LtPassword member MUST be ignored if the N flag is not set in the Flags member. -} - -// ReadNTLMSupplementalCred reads a NTLMSupplementalCred from the byte slice. -func ReadNTLMSupplementalCred(b *[]byte, p *int, e *binary.ByteOrder) NTLMSupplementalCred { - v := ndr.ReadUint32(b, p, e) - f := ndr.ReadUint32(b, p, e) - l := ndr.ReadBytes(b, p, 16, e) - n := ndr.ReadBytes(b, p, 16, e) - return NTLMSupplementalCred{ - Version: v, - Flags: f, - LMPassword: l, - NTPassword: n, - } -} - -const ( - // NTLMSupCredLMOWF indicates that the LM OWF member is present and valid. - NTLMSupCredLMOWF = 31 - // NTLMSupCredNTOWF indicates that the NT OWF member is present and valid. - NTLMSupCredNTOWF = 30 -) diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_claims.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_claims.go deleted file mode 100644 index 977170d42cf..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_claims.go +++ /dev/null @@ -1,39 +0,0 @@ -package pac - -import ( - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/mstypes" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// DeviceClaimsInfo implements https://msdn.microsoft.com/en-us/library/hh554226.aspx -type DeviceClaimsInfo struct { - Claims mstypes.ClaimsSetMetadata -} - -// Unmarshal bytes into the DeviceClaimsInfo struct -func (k *DeviceClaimsInfo) Unmarshal(b []byte) error { - ch, _, p, err := ndr.ReadHeaders(&b) - if err != nil { - return fmt.Errorf("error parsing byte stream headers of DEVICE_CLAIMS_INFO: %v", err) - } - e := &ch.Endianness - //The next 4 bytes are an RPC unique pointer referent. We just skip these - p += 4 - - k.Claims, err = mstypes.ReadClaimsSetMetadata(&b, &p, e) - if err != nil { - return err - } - - //Check that there is only zero padding left - if len(b) >= p { - for _, v := range b[p:] { - if v != 0 { - return ndr.Malformed{EText: "non-zero padding left over at end of data stream"} - } - } - } - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_info.go deleted file mode 100644 index e3fa3fcb5bc..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/device_info.go +++ /dev/null @@ -1,94 +0,0 @@ -package pac - -import ( - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/mstypes" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// DeviceInfo implements https://msdn.microsoft.com/en-us/library/hh536402.aspx -type DeviceInfo struct { - UserID uint32 // A 32-bit unsigned integer that contains the RID of the account. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. - PrimaryGroupID uint32 // A 32-bit unsigned integer that contains the RID for the primary group to which this account belongs. - AccountDomainID mstypes.RPCSID // A SID structure that contains the SID for the domain of the account.This member is used in conjunction with the UserId, and GroupIds members to create the user and group SIDs for the client. - AccountGroupCount uint32 // A 32-bit unsigned integer that contains the number of groups within the account domain to which the account belongs - AccountGroupIDs []mstypes.GroupMembership // A pointer to a list of GROUP_MEMBERSHIP (section 2.2.2) structures that contains the groups to which the account belongs in the account domain. The number of groups in this list MUST be equal to GroupCount. - SIDCount uint32 // A 32-bit unsigned integer that contains the total number of SIDs present in the ExtraSids member. - ExtraSIDs []mstypes.KerbSidAndAttributes // A pointer to a list of KERB_SID_AND_ATTRIBUTES structures that contain a list of SIDs corresponding to groups not in domains. If the UserId member equals 0x00000000, the first group SID in this member is the SID for this account. - DomainGroupCount uint32 // A 32-bit unsigned integer that contains the number of domains with groups to which the account belongs. - DomainGroup []mstypes.DomainGroupMembership // A pointer to a list of DOMAIN_GROUP_MEMBERSHIP structures (section 2.2.3) that contains the domains to which the account belongs to a group. The number of sets in this list MUST be equal to DomainCount. -} - -// Unmarshal bytes into the DeviceInfo struct -func (k *DeviceInfo) Unmarshal(b []byte) error { - ch, _, p, err := ndr.ReadHeaders(&b) - if err != nil { - return fmt.Errorf("error parsing byte stream headers: %v", err) - } - e := &ch.Endianness - - //The next 4 bytes are an RPC unique pointer referent. We just skip these - p += 4 - - k.UserID = ndr.ReadUint32(&b, &p, e) - k.PrimaryGroupID = ndr.ReadUint32(&b, &p, e) - k.AccountDomainID, err = mstypes.ReadRPCSID(&b, &p, e) - if err != nil { - return err - } - k.AccountGroupCount = ndr.ReadUint32(&b, &p, e) - if k.AccountGroupCount > 0 { - ag := make([]mstypes.GroupMembership, k.AccountGroupCount, k.AccountGroupCount) - for i := range ag { - ag[i] = mstypes.ReadGroupMembership(&b, &p, e) - } - k.AccountGroupIDs = ag - } - - k.SIDCount = ndr.ReadUint32(&b, &p, e) - var ah ndr.ConformantArrayHeader - if k.SIDCount > 0 { - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(&b, &p, e) - if ah.MaxCount != int(k.SIDCount) { - return fmt.Errorf("error with size of ExtraSIDs list. expected: %d, Actual: %d", k.SIDCount, ah.MaxCount) - } - es := make([]mstypes.KerbSidAndAttributes, k.SIDCount, k.SIDCount) - attr := make([]uint32, k.SIDCount, k.SIDCount) - ptr := make([]uint32, k.SIDCount, k.SIDCount) - for i := range attr { - ptr[i] = ndr.ReadUint32(&b, &p, e) - attr[i] = ndr.ReadUint32(&b, &p, e) - } - for i := range es { - if ptr[i] != 0 { - s, err := mstypes.ReadRPCSID(&b, &p, e) - es[i] = mstypes.KerbSidAndAttributes{SID: s, Attributes: attr[i]} - if err != nil { - return ndr.Malformed{EText: fmt.Sprintf("could not read ExtraSIDs: %v", err)} - } - } - } - k.ExtraSIDs = es - } - - k.DomainGroupCount = ndr.ReadUint32(&b, &p, e) - if k.DomainGroupCount > 0 { - dg := make([]mstypes.DomainGroupMembership, k.DomainGroupCount, k.DomainGroupCount) - for i := range dg { - dg[i], _ = mstypes.ReadDomainGroupMembership(&b, &p, e) - } - k.DomainGroup = dg - } - - //Check that there is only zero padding left - if len(b) >= p { - for _, v := range b[p:] { - if v != 0 { - return ndr.Malformed{EText: "non-zero padding left over at end of data stream"} - } - } - } - - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/kerb_validation_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/kerb_validation_info.go deleted file mode 100644 index cc346ad8f06..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/kerb_validation_info.go +++ /dev/null @@ -1,296 +0,0 @@ -// Package pac implements Microsoft Privilege Attribute Certificate (PAC) processing. -package pac - -import ( - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/mstypes" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// KERB_VALIDATION_INFO flags. -const ( - USERFLAG_GUEST = 31 // Authentication was done via the GUEST account; no password was used. - USERFLAG_NO_ENCRYPTION_AVAILABLE = 30 // No encryption is available. - USERFLAG_LAN_MANAGER_KEY = 28 // LAN Manager key was used for authentication. - USERFLAG_SUB_AUTH = 25 // Sub-authentication used; session key came from the sub-authentication package. - USERFLAG_EXTRA_SIDS = 26 // Indicates that the ExtraSids field is populated and contains additional SIDs. - USERFLAG_MACHINE_ACCOUNT = 24 // Indicates that the account is a machine account. - USERFLAG_DC_NTLM2 = 23 // Indicates that the domain controller understands NTLMv2. - USERFLAG_RESOURCE_GROUPIDS = 22 // Indicates that the ResourceGroupIds field is populated. - USERFLAG_PROFILEPATH = 21 // Indicates that ProfilePath is populated. - USERFLAG_NTLM2_NTCHALLENGERESP = 20 // The NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. - USERFLAG_LM2_LMCHALLENGERESP = 19 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and session key generation. - USERFLAG_AUTH_LMCHALLENGERESP_KEY_NTCHALLENGERESP = 18 // The LMv2 response from the LmChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used for authentication and the NTLMv2 response from the NtChallengeResponseFields ([MS-NLMP] section 2.2.1.3) was used session key generation. -) - -// KerbValidationInfo implement https://msdn.microsoft.com/en-us/library/cc237948.aspx -// The KERB_VALIDATION_INFO structure defines the user's logon and authorization information -// provided by the DC. The KERB_VALIDATION_INFO structure is a subset of the -// NETLOGON_VALIDATION_SAM_INFO4 structure ([MS-NRPC] section 2.2.1.4.13). -// It is a subset due to historical reasons and to the use of the common Active Directory to generate this information. -// The KERB_VALIDATION_INFO structure is marshaled by RPC [MS-RPCE]. -type KerbValidationInfo struct { - LogOnTime mstypes.FileTime - LogOffTime mstypes.FileTime - KickOffTime mstypes.FileTime - PasswordLastSet mstypes.FileTime - PasswordCanChange mstypes.FileTime - PasswordMustChange mstypes.FileTime - EffectiveName mstypes.RPCUnicodeString - FullName mstypes.RPCUnicodeString - LogonScript mstypes.RPCUnicodeString - ProfilePath mstypes.RPCUnicodeString - HomeDirectory mstypes.RPCUnicodeString - HomeDirectoryDrive mstypes.RPCUnicodeString - LogonCount uint16 - BadPasswordCount uint16 - UserID uint32 - PrimaryGroupID uint32 - GroupCount uint32 - pGroupIDs uint32 - GroupIDs []mstypes.GroupMembership - UserFlags uint32 - UserSessionKey mstypes.UserSessionKey - LogonServer mstypes.RPCUnicodeString - LogonDomainName mstypes.RPCUnicodeString - pLogonDomainID uint32 - LogonDomainID mstypes.RPCSID - Reserved1 []uint32 // Has 2 elements - UserAccountControl uint32 - SubAuthStatus uint32 - LastSuccessfulILogon mstypes.FileTime - LastFailedILogon mstypes.FileTime - FailedILogonCount uint32 - Reserved3 uint32 - SIDCount uint32 - pExtraSIDs uint32 - ExtraSIDs []mstypes.KerbSidAndAttributes - pResourceGroupDomainSID uint32 - ResourceGroupDomainSID mstypes.RPCSID - ResourceGroupCount uint32 - pResourceGroupIDs uint32 - ResourceGroupIDs []mstypes.GroupMembership -} - -// Unmarshal bytes into the DeviceInfo struct -func (k *KerbValidationInfo) Unmarshal(b []byte) (err error) { - ch, _, p, err := ndr.ReadHeaders(&b) - if err != nil { - return fmt.Errorf("error parsing byte stream headers: %v", err) - } - e := &ch.Endianness - - //The next 4 bytes are an RPC unique pointer referent. We just skip these - p += 4 - - k.LogOnTime = mstypes.ReadFileTime(&b, &p, e) - k.LogOffTime = mstypes.ReadFileTime(&b, &p, e) - k.KickOffTime = mstypes.ReadFileTime(&b, &p, e) - k.PasswordLastSet = mstypes.ReadFileTime(&b, &p, e) - k.PasswordCanChange = mstypes.ReadFileTime(&b, &p, e) - k.PasswordMustChange = mstypes.ReadFileTime(&b, &p, e) - - if k.EffectiveName, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - if k.FullName, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - if k.LogonScript, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - if k.ProfilePath, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - if k.HomeDirectory, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - if k.HomeDirectoryDrive, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - - k.LogonCount = ndr.ReadUint16(&b, &p, e) - k.BadPasswordCount = ndr.ReadUint16(&b, &p, e) - k.UserID = ndr.ReadUint32(&b, &p, e) - k.PrimaryGroupID = ndr.ReadUint32(&b, &p, e) - k.GroupCount = ndr.ReadUint32(&b, &p, e) - k.pGroupIDs = ndr.ReadUint32(&b, &p, e) - - k.UserFlags = ndr.ReadUint32(&b, &p, e) - k.UserSessionKey = mstypes.ReadUserSessionKey(&b, &p, e) - - if k.LogonServer, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - if k.LogonDomainName, err = mstypes.ReadRPCUnicodeString(&b, &p, e); err != nil { - return - } - - k.pLogonDomainID = ndr.ReadUint32(&b, &p, e) - - k.Reserved1 = []uint32{ - ndr.ReadUint32(&b, &p, e), - ndr.ReadUint32(&b, &p, e), - } - - k.UserAccountControl = ndr.ReadUint32(&b, &p, e) - k.SubAuthStatus = ndr.ReadUint32(&b, &p, e) - k.LastSuccessfulILogon = mstypes.ReadFileTime(&b, &p, e) - k.LastFailedILogon = mstypes.ReadFileTime(&b, &p, e) - k.FailedILogonCount = ndr.ReadUint32(&b, &p, e) - k.Reserved3 = ndr.ReadUint32(&b, &p, e) - - k.SIDCount = ndr.ReadUint32(&b, &p, e) - k.pExtraSIDs = ndr.ReadUint32(&b, &p, e) - - k.pResourceGroupDomainSID = ndr.ReadUint32(&b, &p, e) - k.ResourceGroupCount = ndr.ReadUint32(&b, &p, e) - k.pResourceGroupIDs = ndr.ReadUint32(&b, &p, e) - - // Populate pointers - if err = k.EffectiveName.UnmarshalString(&b, &p, e); err != nil { - return - } - if err = k.FullName.UnmarshalString(&b, &p, e); err != nil { - return - } - if err = k.LogonScript.UnmarshalString(&b, &p, e); err != nil { - return - } - if err = k.ProfilePath.UnmarshalString(&b, &p, e); err != nil { - return - } - if err = k.HomeDirectory.UnmarshalString(&b, &p, e); err != nil { - return - } - if err = k.HomeDirectoryDrive.UnmarshalString(&b, &p, e); err != nil { - return - } - var ah ndr.ConformantArrayHeader - if k.GroupCount > 0 { - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(&b, &p, e) - if err != nil { - return - } - if ah.MaxCount != int(k.GroupCount) { - err = errors.New("error with size of group list") - return - } - g := make([]mstypes.GroupMembership, k.GroupCount, k.GroupCount) - for i := range g { - g[i] = mstypes.ReadGroupMembership(&b, &p, e) - } - k.GroupIDs = g - } - - if err = k.LogonServer.UnmarshalString(&b, &p, e); err != nil { - return - } - if err = k.LogonDomainName.UnmarshalString(&b, &p, e); err != nil { - return - } - - if k.pLogonDomainID != 0 { - k.LogonDomainID, err = mstypes.ReadRPCSID(&b, &p, e) - if err != nil { - return fmt.Errorf("error reading LogonDomainID: %v", err) - } - } - - if k.SIDCount > 0 { - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(&b, &p, e) - if err != nil { - return - } - if ah.MaxCount != int(k.SIDCount) { - return fmt.Errorf("error with size of ExtraSIDs list. Expected: %d, Actual: %d", k.SIDCount, ah.MaxCount) - } - es := make([]mstypes.KerbSidAndAttributes, k.SIDCount, k.SIDCount) - attr := make([]uint32, k.SIDCount, k.SIDCount) - ptr := make([]uint32, k.SIDCount, k.SIDCount) - for i := range attr { - ptr[i] = ndr.ReadUint32(&b, &p, e) - attr[i] = ndr.ReadUint32(&b, &p, e) - } - for i := range es { - if ptr[i] != 0 { - s, err := mstypes.ReadRPCSID(&b, &p, e) - es[i] = mstypes.KerbSidAndAttributes{SID: s, Attributes: attr[i]} - if err != nil { - return ndr.Malformed{EText: fmt.Sprintf("could not read ExtraSIDs: %v", err)} - } - } - } - k.ExtraSIDs = es - } - - if k.pResourceGroupDomainSID != 0 { - k.ResourceGroupDomainSID, err = mstypes.ReadRPCSID(&b, &p, e) - if err != nil { - return err - } - } - - if k.ResourceGroupCount > 0 { - ah, err = ndr.ReadUniDimensionalConformantArrayHeader(&b, &p, e) - if err != nil { - return - } - if ah.MaxCount != int(k.ResourceGroupCount) { - return fmt.Errorf("error with size of ResourceGroup list. Expected: %d, Actual: %d", k.ResourceGroupCount, ah.MaxCount) - } - g := make([]mstypes.GroupMembership, k.ResourceGroupCount, k.ResourceGroupCount) - for i := range g { - g[i] = mstypes.ReadGroupMembership(&b, &p, e) - } - k.ResourceGroupIDs = g - } - - //Check that there is only zero padding left - if len(b) >= p { - for _, v := range b[p:] { - if v != 0 { - return ndr.Malformed{EText: "non-zero padding left over at end of data stream"} - } - } - } - - return nil -} - -// GetGroupMembershipSIDs returns a slice of strings containing the group membership SIDs found in the PAC. -func (k *KerbValidationInfo) GetGroupMembershipSIDs() []string { - var g []string - lSID := k.LogonDomainID.ToString() - for i := range k.GroupIDs { - g = append(g, fmt.Sprintf("%s-%d", lSID, k.GroupIDs[i].RelativeID)) - } - for _, s := range k.ExtraSIDs { - var exists = false - for _, es := range g { - if es == s.SID.ToString() { - exists = true - break - } - } - if !exists { - g = append(g, s.SID.ToString()) - } - } - for _, r := range k.ResourceGroupIDs { - var exists = false - s := fmt.Sprintf("%s-%d", k.ResourceGroupDomainSID.ToString(), r.RelativeID) - for _, es := range g { - if es == s { - exists = true - break - } - } - if !exists { - g = append(g, s) - } - } - return g -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_info_buffer.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_info_buffer.go deleted file mode 100644 index be6f41b4ec0..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_info_buffer.go +++ /dev/null @@ -1,39 +0,0 @@ -package pac - -import ( - "encoding/binary" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -const ( - ulTypeKerbValidationInfo = 1 - ulTypeCredentials = 2 - ulTypePACServerSignatureData = 6 - ulTypePACKDCSignatureData = 7 - ulTypePACClientInfo = 10 - ulTypeS4UDelegationInfo = 11 - ulTypeUPNDNSInfo = 12 - ulTypePACClientClaimsInfo = 13 - ulTypePACDeviceInfo = 14 - ulTypePACDeviceClaimsInfo = 15 -) - -// InfoBuffer implements the PAC Info Buffer: https://msdn.microsoft.com/en-us/library/cc237954.aspx -type InfoBuffer struct { - ULType uint32 // A 32-bit unsigned integer in little-endian format that describes the type of data present in the buffer contained at Offset. - CBBufferSize uint32 // A 32-bit unsigned integer in little-endian format that contains the size, in bytes, of the buffer in the PAC located at Offset. - Offset uint64 // A 64-bit unsigned integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the PACTYPE structure. The data offset MUST be a multiple of eight. The following sections specify the format of each type of element. -} - -// ReadPACInfoBuffer reads a InfoBuffer from the byte slice. -func ReadPACInfoBuffer(b *[]byte, p *int, e *binary.ByteOrder) InfoBuffer { - u := ndr.ReadUint32(b, p, e) - s := ndr.ReadUint32(b, p, e) - o := ndr.ReadUint64(b, p, e) - return InfoBuffer{ - ULType: u, - CBBufferSize: s, - Offset: o, - } -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_type.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_type.go deleted file mode 100644 index be12e96f598..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/pac_type.go +++ /dev/null @@ -1,209 +0,0 @@ -package pac - -import ( - "encoding/binary" - "errors" - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/crypto" - "gopkg.in/jcmturner/gokrb5.v5/iana/keyusage" - "gopkg.in/jcmturner/gokrb5.v5/types" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// PACType implements: https://msdn.microsoft.com/en-us/library/cc237950.aspx -type PACType struct { - CBuffers uint32 - Version uint32 - Buffers []InfoBuffer - Data []byte - KerbValidationInfo *KerbValidationInfo - CredentialsInfo *CredentialsInfo - ServerChecksum *SignatureData - KDCChecksum *SignatureData - ClientInfo *ClientInfo - S4UDelegationInfo *S4UDelegationInfo - UPNDNSInfo *UPNDNSInfo - ClientClaimsInfo *ClientClaimsInfo - DeviceInfo *DeviceInfo - DeviceClaimsInfo *DeviceClaimsInfo - ZeroSigData []byte -} - -// Unmarshal bytes into the PACType struct -func (pac *PACType) Unmarshal(b []byte) error { - var p int - var e binary.ByteOrder = binary.LittleEndian - pac.Data = b - zb := make([]byte, len(b), len(b)) - copy(zb, b) - pac.ZeroSigData = zb - pac.CBuffers = ndr.ReadUint32(&b, &p, &e) - pac.Version = ndr.ReadUint32(&b, &p, &e) - buf := make([]InfoBuffer, pac.CBuffers, pac.CBuffers) - for i := range buf { - buf[i] = ReadPACInfoBuffer(&b, &p, &e) - } - pac.Buffers = buf - return nil -} - -// ProcessPACInfoBuffers processes the PAC Info Buffers. -// https://msdn.microsoft.com/en-us/library/cc237954.aspx -func (pac *PACType) ProcessPACInfoBuffers(key types.EncryptionKey) error { - for _, buf := range pac.Buffers { - p := make([]byte, buf.CBBufferSize, buf.CBBufferSize) - copy(p, pac.Data[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)]) - switch int(buf.ULType) { - case ulTypeKerbValidationInfo: - if pac.KerbValidationInfo != nil { - //Must ignore subsequent buffers of this type - continue - } - var k KerbValidationInfo - err := k.Unmarshal(p) - if err != nil { - return fmt.Errorf("error processing KerbValidationInfo: %v", err) - } - pac.KerbValidationInfo = &k - case ulTypeCredentials: - // Currently PAC parsing is only useful on the service side in gokrb5 - // The CredentialsInfo are only useful when gokrb5 has implemented RFC4556 and only applied on the client side. - // Skipping CredentialsInfo - will be revisited under RFC4556 implementation. - continue - //if pac.CredentialsInfo != nil { - // //Must ignore subsequent buffers of this type - // continue - //} - //var k CredentialsInfo - //err := k.Unmarshal(p, key) // The encryption key used is the AS reply key only available to the client. - //if err != nil { - // return fmt.Errorf("error processing CredentialsInfo: %v", err) - //} - //pac.CredentialsInfo = &k - case ulTypePACServerSignatureData: - if pac.ServerChecksum != nil { - //Must ignore subsequent buffers of this type - continue - } - var k SignatureData - zb, err := k.Unmarshal(p) - copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) - if err != nil { - return fmt.Errorf("error processing ServerChecksum: %v", err) - } - pac.ServerChecksum = &k - case ulTypePACKDCSignatureData: - if pac.KDCChecksum != nil { - //Must ignore subsequent buffers of this type - continue - } - var k SignatureData - zb, err := k.Unmarshal(p) - copy(pac.ZeroSigData[int(buf.Offset):int(buf.Offset)+int(buf.CBBufferSize)], zb) - if err != nil { - return fmt.Errorf("error processing KDCChecksum: %v", err) - } - pac.KDCChecksum = &k - case ulTypePACClientInfo: - if pac.ClientInfo != nil { - //Must ignore subsequent buffers of this type - continue - } - var k ClientInfo - err := k.Unmarshal(p) - if err != nil { - return fmt.Errorf("error processing ClientInfo: %v", err) - } - pac.ClientInfo = &k - case ulTypeS4UDelegationInfo: - if pac.S4UDelegationInfo != nil { - //Must ignore subsequent buffers of this type - continue - } - var k S4UDelegationInfo - err := k.Unmarshal(p) - if err != nil { - return fmt.Errorf("error processing S4U_DelegationInfo: %v", err) - } - pac.S4UDelegationInfo = &k - case ulTypeUPNDNSInfo: - if pac.UPNDNSInfo != nil { - //Must ignore subsequent buffers of this type - continue - } - var k UPNDNSInfo - err := k.Unmarshal(p) - if err != nil { - return fmt.Errorf("error processing UPN_DNSInfo: %v", err) - } - pac.UPNDNSInfo = &k - case ulTypePACClientClaimsInfo: - if pac.ClientClaimsInfo != nil || len(p) < 1 { - //Must ignore subsequent buffers of this type - continue - } - var k ClientClaimsInfo - err := k.Unmarshal(p) - if err != nil { - return fmt.Errorf("error processing ClientClaimsInfo: %v", err) - } - pac.ClientClaimsInfo = &k - case ulTypePACDeviceInfo: - if pac.DeviceInfo != nil { - //Must ignore subsequent buffers of this type - continue - } - var k DeviceInfo - err := k.Unmarshal(p) - if err != nil { - return fmt.Errorf("error processing DeviceInfo: %v", err) - } - pac.DeviceInfo = &k - case ulTypePACDeviceClaimsInfo: - if pac.DeviceClaimsInfo != nil { - //Must ignore subsequent buffers of this type - continue - } - var k DeviceClaimsInfo - err := k.Unmarshal(p) - if err != nil { - return fmt.Errorf("error processing DeviceClaimsInfo: %v", err) - } - pac.DeviceClaimsInfo = &k - } - } - - if ok, err := pac.validate(key); !ok { - return err - } - - return nil -} - -func (pac *PACType) validate(key types.EncryptionKey) (bool, error) { - if pac.KerbValidationInfo == nil { - return false, errors.New("PAC Info Buffers does not contain a KerbValidationInfo") - } - if pac.ServerChecksum == nil { - return false, errors.New("PAC Info Buffers does not contain a ServerChecksum") - } - if pac.KDCChecksum == nil { - return false, errors.New("PAC Info Buffers does not contain a KDCChecksum") - } - if pac.ClientInfo == nil { - return false, errors.New("PAC Info Buffers does not contain a ClientInfo") - } - etype, err := crypto.GetChksumEtype(int32(pac.ServerChecksum.SignatureType)) - if err != nil { - return false, err - } - if ok := etype.VerifyChecksum(key.KeyValue, - pac.ZeroSigData, - pac.ServerChecksum.Signature, - keyusage.KERB_NON_KERB_CKSUM_SALT); !ok { - return false, errors.New("PAC service checksum verification failed") - } - - return true, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/s4u_delegation_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/s4u_delegation_info.go deleted file mode 100644 index 82b96878ea2..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/s4u_delegation_info.go +++ /dev/null @@ -1,55 +0,0 @@ -package pac - -import ( - "fmt" - - "gopkg.in/jcmturner/gokrb5.v5/mstypes" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// S4UDelegationInfo implements https://msdn.microsoft.com/en-us/library/cc237944.aspx -type S4UDelegationInfo struct { - S4U2proxyTarget mstypes.RPCUnicodeString // The name of the principal to whom the application can forward the ticket. - TransitedListSize uint32 - S4UTransitedServices []mstypes.RPCUnicodeString // List of all services that have been delegated through by this client and subsequent services or servers.. Size is value of TransitedListSize -} - -// Unmarshal bytes into the S4UDelegationInfo struct -func (k *S4UDelegationInfo) Unmarshal(b []byte) error { - ch, _, p, err := ndr.ReadHeaders(&b) - if err != nil { - return fmt.Errorf("error parsing byte stream headers: %v", err) - } - e := &ch.Endianness - - //The next 4 bytes are an RPC unique pointer referent. We just skip these - p += 4 - - k.S4U2proxyTarget, err = mstypes.ReadRPCUnicodeString(&b, &p, e) - if err != nil { - return err - } - k.TransitedListSize = ndr.ReadUint32(&b, &p, e) - if k.TransitedListSize > 0 { - ts := make([]mstypes.RPCUnicodeString, k.TransitedListSize, k.TransitedListSize) - for i := range ts { - ts[i], err = mstypes.ReadRPCUnicodeString(&b, &p, e) - if err != nil { - return err - } - } - for i := range ts { - ts[i].UnmarshalString(&b, &p, e) - } - k.S4UTransitedServices = ts - } - - //Check that there is only zero padding left - for _, v := range b[p:] { - if v != 0 { - return ndr.Malformed{EText: "non-zero padding left over at end of data stream"} - } - } - - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/signature_data.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/signature_data.go deleted file mode 100644 index c0bb5cb1d22..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/signature_data.go +++ /dev/null @@ -1,74 +0,0 @@ -package pac - -import ( - "encoding/binary" - - "gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype" - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -/* -https://msdn.microsoft.com/en-us/library/cc237955.aspx - -The Key Usage Value MUST be KERB_NON_KERB_CKSUM_SALT (17) [MS-KILE] (section 3.1.5.9). - -Server Signature (SignatureType = 0x00000006) -https://msdn.microsoft.com/en-us/library/cc237957.aspx -The KDC will use the long-term key that the KDC shares with the server, so that the server can verify this signature on receiving a PAC. -The server signature is a keyed hash [RFC4757] of the entire PAC message, with the Signature fields of both PAC_SIGNATURE_DATA structures set to zero. -The key used to protect the ciphertext part of the response is used. -The checksum type corresponds to the key unless the key is DES, in which case the KERB_CHECKSUM_HMAC_MD5 key is used. -The resulting hash value is then placed in the Signature field of the server's PAC_SIGNATURE_DATA structure. - -KDC Signature (SignatureType = 0x00000007) -https://msdn.microsoft.com/en-us/library/dd357117.aspx -The KDC will use KDC (krbtgt) key [RFC4120], so that other KDCs can verify this signature on receiving a PAC. -The KDC signature is a keyed hash [RFC4757] of the Server Signature field in the PAC message. -The cryptographic system that is used to calculate the checksum depends on which system the KDC supports, as defined below: -- Supports RC4-HMAC --> KERB_CHECKSUM_HMAC_MD5 -- Does not support RC4-HMAC and supports AES256 --> HMAC_SHA1_96_AES256 -- Does not support RC4-HMAC or AES256-CTS-HMAC-SHA1-96, and supports AES128-CTS-HMAC-SHA1-96 --> HMAC_SHA1_96_AES128 -- Does not support RC4-HMAC, AES128-CTS-HMAC-SHA1-96 or AES256-CTS-HMAC-SHA1-96 --> None. The checksum operation will fail. -*/ - -// SignatureData implements https://msdn.microsoft.com/en-us/library/cc237955.aspx -type SignatureData struct { - SignatureType uint32 // A 32-bit unsigned integer value in little-endian format that defines the cryptographic system used to calculate the checksum. This MUST be one of the following checksum types: KERB_CHECKSUM_HMAC_MD5 (signature size = 16), HMAC_SHA1_96_AES128 (signature size = 12), HMAC_SHA1_96_AES256 (signature size = 12). - Signature []byte // Size depends on the type. See comment above. - RODCIdentifier uint16 // A 16-bit unsigned integer value in little-endian format that contains the first 16 bits of the key version number ([MS-KILE] section 3.1.5.8) when the KDC is an RODC. When the KDC is not an RODC, this field does not exist. -} - -// Unmarshal bytes into the SignatureData struct -func (k *SignatureData) Unmarshal(b []byte) ([]byte, error) { - var p int - var e binary.ByteOrder = binary.LittleEndian - - k.SignatureType = ndr.ReadUint32(&b, &p, &e) - var c int - switch k.SignatureType { - case chksumtype.KERB_CHECKSUM_HMAC_MD5_UNSIGNED: - c = 16 - case uint32(chksumtype.HMAC_SHA1_96_AES128): - c = 12 - case uint32(chksumtype.HMAC_SHA1_96_AES256): - c = 12 - } - sp := p - k.Signature = ndr.ReadBytes(&b, &p, c, &e) - k.RODCIdentifier = ndr.ReadUint16(&b, &p, &e) - - //Check that there is only zero padding left - for _, v := range b[p:] { - if v != 0 { - return []byte{}, ndr.Malformed{EText: "non-zero padding left over at end of data stream"} - } - } - - // Create bytes with zeroed signature needed for checksum verification - rb := make([]byte, len(b), len(b)) - copy(rb, b) - z := make([]byte, len(b), len(b)) - copy(rb[sp:sp+c], z) - - return rb, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/upn_dns_info.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/upn_dns_info.go deleted file mode 100644 index 71f10f056c5..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/pac/upn_dns_info.go +++ /dev/null @@ -1,66 +0,0 @@ -package pac - -import ( - "encoding/binary" - "sort" - - "gopkg.in/jcmturner/rpc.v0/ndr" -) - -// UPNDNSInfo implements https://msdn.microsoft.com/en-us/library/dd240468.aspx -type UPNDNSInfo struct { - UPNLength uint16 // An unsigned 16-bit integer in little-endian format that specifies the length, in bytes, of the UPN field. - UPNOffset uint16 // An unsigned 16-bit integer in little-endian format that contains the offset to the beginning of the buffer, in bytes, from the beginning of the UPN_DNS_INFO structure. - DNSDomainNameLength uint16 - DNSDomainNameOffset uint16 - Flags uint32 - UPN string - DNSDomain string -} - -const ( - upnNoUPNAttr = 31 // The user account object does not have the userPrincipalName attribute ([MS-ADA3] section 2.349) set. A UPN constructed by concatenating the user name with the DNS domain name of the account domain is provided. -) - -// Unmarshal bytes into the UPN_DNSInfo struct -func (k *UPNDNSInfo) Unmarshal(b []byte) error { - //The UPN_DNS_INFO structure is a simple structure that is not NDR-encoded. - var p int - var e binary.ByteOrder = binary.LittleEndian - - k.UPNLength = ndr.ReadUint16(&b, &p, &e) - k.UPNOffset = ndr.ReadUint16(&b, &p, &e) - k.DNSDomainNameLength = ndr.ReadUint16(&b, &p, &e) - k.DNSDomainNameOffset = ndr.ReadUint16(&b, &p, &e) - k.Flags = ndr.ReadUint32(&b, &p, &e) - ub := b[k.UPNOffset : k.UPNOffset+k.UPNLength] - db := b[k.DNSDomainNameOffset : k.DNSDomainNameOffset+k.DNSDomainNameLength] - - u := make([]rune, k.UPNLength/2, k.UPNLength/2) - for i := 0; i < len(u); i++ { - q := i * 2 - u[i] = rune(ndr.ReadUint16(&ub, &q, &e)) - } - k.UPN = string(u) - d := make([]rune, k.DNSDomainNameLength/2, k.DNSDomainNameLength/2) - for i := 0; i < len(d); i++ { - q := i * 2 - d[i] = rune(ndr.ReadUint16(&db, &q, &e)) - } - k.DNSDomain = string(d) - - l := []int{ - p, - int(k.UPNOffset + k.UPNLength), - int(k.DNSDomainNameOffset + k.DNSDomainNameLength), - } - sort.Ints(l) - //Check that there is only zero padding left - for _, v := range b[l[2]:] { - if v != 0 { - return ndr.Malformed{EText: "non-zero padding left over at end of data stream."} - } - } - - return nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/Authenticator.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/types/Authenticator.go deleted file mode 100644 index 0395922f123..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/Authenticator.go +++ /dev/null @@ -1,100 +0,0 @@ -// Package types provides Kerberos 5 data types. -package types - -import ( - "crypto/rand" - "fmt" - "math" - "math/big" - "time" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/asn1tools" - "gopkg.in/jcmturner/gokrb5.v5/iana" - "gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag" -) - -/*Authenticator ::= [APPLICATION 2] SEQUENCE { -authenticator-vno [0] INTEGER (5), -crealm [1] Realm, -cname [2] PrincipalName, -cksum [3] Checksum OPTIONAL, -cusec [4] Microseconds, -ctime [5] KerberosTime, -subkey [6] EncryptionKey OPTIONAL, -seq-number [7] UInt32 OPTIONAL, -authorization-data [8] AuthorizationData OPTIONAL -} - - cksum - This field contains a checksum of the application data that - accompanies the KRB_AP_REQ, computed using a key usage value of 10 - in normal application exchanges, or 6 when used in the TGS-REQ - PA-TGS-REQ AP-DATA field. - -*/ - -// Authenticator - A record containing information that can be shown to have been recently generated using the session key known only by the client and server. -// https://tools.ietf.org/html/rfc4120#section-5.5.1 -type Authenticator struct { - AVNO int `asn1:"explicit,tag:0"` - CRealm string `asn1:"generalstring,explicit,tag:1"` - CName PrincipalName `asn1:"explicit,tag:2"` - Cksum Checksum `asn1:"explicit,optional,tag:3"` - Cusec int `asn1:"explicit,tag:4"` - CTime time.Time `asn1:"generalized,explicit,tag:5"` - SubKey EncryptionKey `asn1:"explicit,optional,tag:6"` - SeqNumber int64 `asn1:"explicit,optional,tag:7"` - AuthorizationData AuthorizationData `asn1:"explicit,optional,tag:8"` -} - -// NewAuthenticator creates a new Authenticator. -func NewAuthenticator(realm string, cname PrincipalName) (Authenticator, error) { - seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32)) - if err != nil { - return Authenticator{}, err - } - t := time.Now().UTC() - return Authenticator{ - AVNO: iana.PVNO, - CRealm: realm, - CName: cname, - Cksum: Checksum{}, - Cusec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)), - CTime: t, - SeqNumber: seq.Int64(), - }, nil -} - -// GenerateSeqNumberAndSubKey sets the Authenticator's sequence number and subkey. -func (a *Authenticator) GenerateSeqNumberAndSubKey(keyType int32, keySize int) error { - seq, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32)) - if err != nil { - return err - } - a.SeqNumber = seq.Int64() - //Generate subkey value - sk := make([]byte, keySize, keySize) - rand.Read(sk) - a.SubKey = EncryptionKey{ - KeyType: keyType, - KeyValue: sk, - } - return nil -} - -// Unmarshal bytes into the Authenticator. -func (a *Authenticator) Unmarshal(b []byte) error { - _, err := asn1.UnmarshalWithParams(b, a, fmt.Sprintf("application,explicit,tag:%v", asnAppTag.Authenticator)) - return err -} - -// Marshal the Authenticator. -func (a *Authenticator) Marshal() ([]byte, error) { - b, err := asn1.Marshal(*a) - if err != nil { - return nil, err - } - b = asn1tools.AddASNAppTag(b, asnAppTag.Authenticator) - return b, nil -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/AuthorizationData.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/types/AuthorizationData.go deleted file mode 100644 index c9448008b55..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/AuthorizationData.go +++ /dev/null @@ -1,123 +0,0 @@ -package types - -import ( - "github.com/jcmturner/gofork/encoding/asn1" -) - -// Reference: https://www.ietf.org/rfc/rfc4120.txt -// Section: 5.2.6 - -/* -AuthorizationData - --- NOTE: AuthorizationData is always used as an OPTIONAL field and --- should not be empty. -AuthorizationData ::= SEQUENCE OF SEQUENCE { -ad-type [0] Int32, -ad-data [1] OCTET STRING -} - -ad-data -This field contains authorization data to be interpreted according -to the value of the corresponding ad-type field. - -ad-type - This field specifies the format for the ad-data subfield. All -negative values are reserved for local use. Non-negative values -are reserved for registered use. - -Each sequence of type and data is referred to as an authorization -element. Elements MAY be application specific; however, there is a -common set of recursive elements that should be understood by all -implementations. These elements contain other elements embedded -within them, and the interpretation of the encapsulating element -determines which of the embedded elements must be interpreted, and -which may be ignored. - -These common authorization data elements are recursively defined, -meaning that the ad-data for these types will itself contain a -sequence of authorization data whose interpretation is affected by -the encapsulating element. Depending on the meaning of the -encapsulating element, the encapsulated elements may be ignored, -might be interpreted as issued directly by the KDC, or might be -stored in a separate plaintext part of the ticket. The types of the -encapsulating elements are specified as part of the Kerberos -specification because the behavior based on these values should be -understood across implementations, whereas other elements need only -be understood by the applications that they affect. - -Authorization data elements are considered critical if present in a -ticket or authenticator. If an unknown authorization data element -type is received by a server either in an AP-REQ or in a ticket -contained in an AP-REQ, then, unless it is encapsulated in a known -authorization data element amending the criticality of the elements -it contains, authentication MUST fail. Authorization data is -intended to restrict the use of a ticket. If the service cannot -determine whether the restriction applies to that service, then a -security weakness may result if the ticket can be used for that -service. Authorization elements that are optional can be enclosed in -an AD-IF-RELEVANT element. - -In the definitions that follow, the value of the ad-type for the -element will be specified as the least significant part of the -subsection number, and the value of the ad-data will be as shown in -the ASN.1 structure that follows the subsection heading. - - Contents of ad-data ad-type - - DER encoding of AD-IF-RELEVANT 1 - - DER encoding of AD-KDCIssued 4 - - DER encoding of AD-AND-OR 5 - - DER encoding of AD-MANDATORY-FOR-KDC 8 - -*/ - -// AuthorizationData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6 -type AuthorizationData []AuthorizationDataEntry - -// AuthorizationDataEntry implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6 -type AuthorizationDataEntry struct { - ADType int32 `asn1:"explicit,tag:0"` - ADData []byte `asn1:"explicit,tag:1"` -} - -// ADIfRelevant implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.1 -type ADIfRelevant AuthorizationData - -// ADKDCIssued implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.2 -type ADKDCIssued struct { - ADChecksum Checksum `asn1:"explicit,tag:0"` - IRealm string `asn1:"optional,generalstring,explicit,tag:1"` - Isname PrincipalName `asn1:"optional,explicit,tag:2"` - Elements AuthorizationData `asn1:"explicit,tag:3"` -} - -// ADAndOr implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.3 -type ADAndOr struct { - ConditionCount int32 `asn1:"explicit,tag:0"` - Elements AuthorizationData `asn1:"explicit,tag:1"` -} - -// ADMandatoryForKDC implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.6.4 -type ADMandatoryForKDC AuthorizationData - -// Unmarshal bytes into the ADKDCIssued. -func (a *ADKDCIssued) Unmarshal(b []byte) error { - _, err := asn1.Unmarshal(b, a) - return err -} - -// Unmarshal bytes into the AuthorizationData. -func (a *AuthorizationData) Unmarshal(b []byte) error { - _, err := asn1.Unmarshal(b, a) - return err -} - -// Unmarshal bytes into the AuthorizationDataEntry. -func (a *AuthorizationDataEntry) Unmarshal(b []byte) error { - _, err := asn1.Unmarshal(b, a) - return err -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/Cryptosystem.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/types/Cryptosystem.go deleted file mode 100644 index 7e8b4ab2969..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/Cryptosystem.go +++ /dev/null @@ -1,55 +0,0 @@ -package types - -import ( - "github.com/jcmturner/gofork/encoding/asn1" -) - -// Reference: https://www.ietf.org/rfc/rfc4120.txt -// Section: 5.2.9 - -// EncryptedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9 -type EncryptedData struct { - EType int32 `asn1:"explicit,tag:0"` - KVNO int `asn1:"explicit,optional,tag:1"` - Cipher []byte `asn1:"explicit,tag:2"` -} - -// EncryptionKey implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9 -// AKA KeyBlock -type EncryptionKey struct { - KeyType int32 `asn1:"explicit,tag:0"` - KeyValue []byte `asn1:"explicit,tag:1"` -} - -// Checksum implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.9 -type Checksum struct { - CksumType int32 `asn1:"explicit,tag:0"` - Checksum []byte `asn1:"explicit,tag:1"` -} - -// Unmarshal bytes into the EncryptedData. -func (a *EncryptedData) Unmarshal(b []byte) error { - _, err := asn1.Unmarshal(b, a) - return err -} - -// Marshal the EncryptedData. -func (a *EncryptedData) Marshal() ([]byte, error) { - edb, err := asn1.Marshal(*a) - if err != nil { - return edb, err - } - return edb, nil -} - -// Unmarshal bytes into the EncryptionKey. -func (a *EncryptionKey) Unmarshal(b []byte) error { - _, err := asn1.Unmarshal(b, a) - return err -} - -// Unmarshal bytes into the Checksum. -func (a *Checksum) Unmarshal(b []byte) error { - _, err := asn1.Unmarshal(b, a) - return err -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/HostAddress.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/types/HostAddress.go deleted file mode 100644 index 3ec0d3f5ba6..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/HostAddress.go +++ /dev/null @@ -1,206 +0,0 @@ -package types - -// Reference: https://www.ietf.org/rfc/rfc4120.txt -// Section: 5.2.5 - -import ( - "bytes" - "fmt" - "net" - - "github.com/jcmturner/gofork/encoding/asn1" - "gopkg.in/jcmturner/gokrb5.v5/iana/addrtype" -) - -/* -HostAddress and HostAddresses - -HostAddress ::= SEQUENCE { - addr-type [0] Int32, - address [1] OCTET STRING -} - --- NOTE: HostAddresses is always used as an OPTIONAL field and --- should not be empty. -HostAddresses -- NOTE: subtly different from rfc1510, - -- but has a value mapping and encodes the same - ::= SEQUENCE OF HostAddress - -The host address encodings consist of two fields: - -addr-type - This field specifies the type of address that follows. Pre- - defined values for this field are specified in Section 7.5.3. - -address - This field encodes a single address of type addr-type. -*/ - -// HostAddresses implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5 -type HostAddresses []HostAddress - -// HostAddress implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.2.5 -type HostAddress struct { - AddrType int32 `asn1:"explicit,tag:0"` - Address []byte `asn1:"explicit,tag:1"` -} - -// GetHostAddress returns a HostAddress struct from a string in the format : -func GetHostAddress(s string) (HostAddress, error) { - var h HostAddress - cAddr, _, err := net.SplitHostPort(s) - if err != nil { - return h, fmt.Errorf("invalid format of client address: %v", err) - } - ip := net.ParseIP(cAddr) - hb, err := ip.MarshalText() - if err != nil { - return h, fmt.Errorf("could not marshal client's address into bytes: %v", err) - } - var ht int32 - if ip.To4() != nil { - ht = addrtype.IPv4 - } else if ip.To16() != nil { - ht = addrtype.IPv6 - } else { - return h, fmt.Errorf("could not determine client's address types: %v", err) - } - h = HostAddress{ - AddrType: ht, - Address: hb, - } - return h, nil -} - -// GetAddress returns a string representation of the HostAddress. -func (h *HostAddress) GetAddress() (string, error) { - var b []byte - _, err := asn1.Unmarshal(h.Address, &b) - return string(b), err -} - -// LocalHostAddresses returns a HostAddresses struct for the local machines interface IP addresses. -func LocalHostAddresses() (ha HostAddresses, err error) { - ifs, err := net.Interfaces() - if err != nil { - return - } - for _, iface := range ifs { - if iface.Flags&net.FlagLoopback != 0 || iface.Flags&net.FlagUp == 0 { - // Interface is either loopback of not up - continue - } - addrs, err := iface.Addrs() - if err != nil { - continue - } - for _, addr := range addrs { - var ip net.IP - switch v := addr.(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - } - var a HostAddress - if ip.To16() == nil { - //neither IPv4 or IPv6 - continue - } - if ip.To4() != nil { - //Is IPv4 - a.AddrType = addrtype.IPv4 - a.Address = ip.To4() - } else { - a.AddrType = addrtype.IPv6 - a.Address = ip.To16() - } - ha = append(ha, a) - } - } - return ha, nil -} - -// HostAddressesFromNetIPs returns a HostAddresses type from a slice of net.IP -func HostAddressesFromNetIPs(ips []net.IP) (ha HostAddresses) { - for _, ip := range ips { - ha = append(ha, HostAddressFromNetIP(ip)) - } - return ha -} - -// HostAddressFromNetIP returns a HostAddress type from a net.IP -func HostAddressFromNetIP(ip net.IP) HostAddress { - if ip.To4() != nil { - //Is IPv4 - return HostAddress{ - AddrType: addrtype.IPv4, - Address: ip.To4(), - } - } - return HostAddress{ - AddrType: addrtype.IPv6, - Address: ip.To16(), - } -} - -// HostAddressesEqual tests if two HostAddress slices are equal. -func HostAddressesEqual(h, a []HostAddress) bool { - if len(h) != len(a) { - return false - } - for _, e := range a { - var found bool - for _, i := range h { - if e.Equal(i) { - found = true - break - } - } - if !found { - return false - } - } - return true -} - -// HostAddressesContains tests if a HostAddress is contained in a HostAddress slice. -func HostAddressesContains(h []HostAddress, a HostAddress) bool { - for _, e := range h { - if e.Equal(a) { - return true - } - } - return false -} - -// Equal tests if the HostAddress is equal to another HostAddress provided. -func (h *HostAddress) Equal(a HostAddress) bool { - if h.AddrType != a.AddrType { - return false - } - return bytes.Equal(h.Address, a.Address) -} - -// Contains tests if a HostAddress is contained within the HostAddresses struct. -func (h *HostAddresses) Contains(a HostAddress) bool { - for _, e := range *h { - if e.Equal(a) { - return true - } - } - return false -} - -// Equal tests if a HostAddress slice is equal to the HostAddresses struct. -func (h *HostAddresses) Equal(a []HostAddress) bool { - if len(*h) != len(a) { - return false - } - for _, e := range a { - if !h.Contains(e) { - return false - } - } - return true -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/KerberosFlags.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/types/KerberosFlags.go deleted file mode 100644 index 06c3a17090e..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/KerberosFlags.go +++ /dev/null @@ -1,124 +0,0 @@ -package types - -// Reference: https://www.ietf.org/rfc/rfc4120.txt -// Section: 5.2.8 - -import ( - "github.com/jcmturner/gofork/encoding/asn1" -) - -/* -KerberosFlags - -For several message types, a specific constrained bit string type, -KerberosFlags, is used. - -KerberosFlags ::= BIT STRING (SIZE (32..MAX)) --- minimum number of bits shall be sent, --- but no fewer than 32 - -Compatibility note: The following paragraphs describe a change from -the RFC 1510 description of bit strings that would result in -incompatility in the case of an implementation that strictly -conformed to ASN.1 DER and RFC 1510. - -ASN.1 bit strings have multiple uses. The simplest use of a bit -string is to contain a vector of bits, with no particular meaning -attached to individual bits. This vector of bits is not necessarily -a multiple of eight bits long. The use in Kerberos of a bit string -as a compact boolean vector wherein each element has a distinct -meaning poses some problems. The natural notation for a compact -boolean vector is the ASN.1 "NamedBit" notation, and the DER require -that encodings of a bit string using "NamedBit" notation exclude any -trailing zero bits. This truncation is easy to neglect, especially -given C language implementations that naturally choose to store -boolean vectors as 32-bit integers. - -For example, if the notation for KDCOptions were to include the -"NamedBit" notation, as in RFC 1510, and a KDCOptions value to be -encoded had only the "forwardable" (bit number one) bit set, the DER -encoding MUST include only two bits: the first reserved bit -("reserved", bit number zero, value zero) and the one-valued bit (bit -number one) for "forwardable". - -Most existing implementations of Kerberos unconditionally send 32 -bits on the wire when encoding bit strings used as boolean vectors. -This behavior violates the ASN.1 syntax used for flag values in RFC -1510, but it occurs on such a widely installed base that the protocol -description is being modified to accommodate it. - -Consequently, this document removes the "NamedBit" notations for -individual bits, relegating them to comments. The size constraint on -the KerberosFlags type requires that at least 32 bits be encoded at -all times, though a lenient implementation MAY choose to accept fewer -than 32 bits and to treat the missing bits as set to zero. - -Currently, no uses of KerberosFlags specify more than 32 bits' worth -of flags, although future revisions of this document may do so. When -more than 32 bits are to be transmitted in a KerberosFlags value, -future revisions to this document will likely specify that the -smallest number of bits needed to encode the highest-numbered one- -valued bit should be sent. This is somewhat similar to the DER -encoding of a bit string that is declared with the "NamedBit" -notation. -*/ - -// NewKrbFlags returns an ASN1 BitString struct of the right size for KrbFlags. -func NewKrbFlags() asn1.BitString { - f := asn1.BitString{} - f.Bytes = make([]byte, 4) - f.BitLength = len(f.Bytes) * 8 - return f -} - -// SetFlags sets the flags of an ASN1 BitString. -func SetFlags(f *asn1.BitString, j []int) { - for _, i := range j { - SetFlag(f, i) - } -} - -// SetFlag sets a flag in an ASN1 BitString. -func SetFlag(f *asn1.BitString, i int) { - for l := len(f.Bytes); l < 4; l++ { - (*f).Bytes = append((*f).Bytes, byte(0)) - (*f).BitLength = len((*f).Bytes) * 8 - } - //Which byte? - b := int(i / 8) - //Which bit in byte - p := uint(7 - (i - 8*b)) - (*f).Bytes[b] = (*f).Bytes[b] | (1 << p) -} - -// UnsetFlags unsets flags in an ASN1 BitString. -func UnsetFlags(f *asn1.BitString, j []int) { - for _, i := range j { - UnsetFlag(f, i) - } -} - -// UnsetFlag unsets a flag in an ASN1 BitString. -func UnsetFlag(f *asn1.BitString, i int) { - for l := len(f.Bytes); l < 4; l++ { - (*f).Bytes = append((*f).Bytes, byte(0)) - (*f).BitLength = len((*f).Bytes) * 8 - } - //Which byte? - b := int(i / 8) - //Which bit in byte - p := uint(7 - (i - 8*b)) - (*f).Bytes[b] = (*f).Bytes[b] &^ (1 << p) -} - -// IsFlagSet tests if a flag is set in the ASN1 BitString. -func IsFlagSet(f *asn1.BitString, i int) bool { - //Which byte? - b := int(i / 8) - //Which bit in byte - p := uint(7 - (i - 8*b)) - if (*f).Bytes[b]&(1</@ -// a PrincipalName type will be returned with the name type set to KRB_NT_PRINCIPAL(1) -// and the realm will be returned as a string. If the "@" suffix -// is not included in the SPN then the value of realm string returned will be "" -func ParseSPNString(spn string) (pn PrincipalName, realm string) { - if strings.Contains(spn, "@") { - s := strings.Split(spn, "@") - realm = s[len(s)-1] - spn = strings.TrimSuffix(spn, "@"+realm) - } - pn = NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn) - return -} diff --git a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/TypedData.go b/vendor/gopkg.in/jcmturner/gokrb5.v5/types/TypedData.go deleted file mode 100644 index 19e9f4961f4..00000000000 --- a/vendor/gopkg.in/jcmturner/gokrb5.v5/types/TypedData.go +++ /dev/null @@ -1,18 +0,0 @@ -package types - -import "github.com/jcmturner/gofork/encoding/asn1" - -// TypedData implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 -type TypedData struct { - DataType int32 `asn1:"explicit,tag:0"` - DataValue []byte `asn1:"optional,explicit,tag:1"` -} - -// TypedDataSequence implements RFC 4120 type: https://tools.ietf.org/html/rfc4120#section-5.9.1 -type TypedDataSequence []TypedData - -// Unmarshal bytes into the TypedDataSequence. -func (a *TypedDataSequence) Unmarshal(b []byte) error { - _, err := asn1.Unmarshal(b, a) - return err -} diff --git a/vendor/gopkg.in/jcmturner/rpc.v0/LICENSE b/vendor/gopkg.in/jcmturner/rpc.v0/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/vendor/gopkg.in/jcmturner/rpc.v0/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/jcmturner/rpc.v0/ndr/error.go b/vendor/gopkg.in/jcmturner/rpc.v0/ndr/error.go deleted file mode 100644 index 3b44ee45e93..00000000000 --- a/vendor/gopkg.in/jcmturner/rpc.v0/ndr/error.go +++ /dev/null @@ -1,13 +0,0 @@ -package ndr - -import "fmt" - -// Malformed implements the error interface for malformed NDR encoding errors. -type Malformed struct { - EText string -} - -// Error implements the error interface on the Malformed struct. -func (e Malformed) Error() string { - return fmt.Sprintf("malformed NDR steam: %s", e.EText) -} diff --git a/vendor/gopkg.in/jcmturner/rpc.v0/ndr/ndr.go b/vendor/gopkg.in/jcmturner/rpc.v0/ndr/ndr.go deleted file mode 100644 index 5b3e87ae9b3..00000000000 --- a/vendor/gopkg.in/jcmturner/rpc.v0/ndr/ndr.go +++ /dev/null @@ -1,444 +0,0 @@ -// Package ndr is a partial implementation of NDR encoding: http://pubs.opengroup.org/onlinepubs/9629399/chap14.htm -package ndr - -import ( - "bytes" - "encoding/binary" - "fmt" - "math" -) - -// Useful reference: https://docs.microsoft.com/en-us/windows/desktop/Rpc/rpc-ndr-engine - -/* -Serialization Version 1 -https://msdn.microsoft.com/en-us/library/cc243563.aspx - -Common Header - https://msdn.microsoft.com/en-us/library/cc243890.aspx -8 bytes in total: -- First byte - Version: Must equal 1 -- Second byte - 1st 4 bits: Endianess (0=Big; 1=Little); 2nd 4 bits: Character Encoding (0=ASCII; 1=EBCDIC) -- 3rd - Floating point representation -- 4th - Common Header Length: Must equal 8 -- 5th - 8th - Filler: MUST be set to 0xcccccccc on marshaling, and SHOULD be ignored during unmarshaling. - -Private Header - https://msdn.microsoft.com/en-us/library/cc243919.aspx -8 bytes in total: -- First 4 bytes - Indicates the length of a serialized top-level type in the octet stream. It MUST include the padding length and exclude the header itself. -- Second 4 bytes - Filler: MUST be set to 0 (zero) during marshaling, and SHOULD be ignored during unmarshaling. -*/ - -const ( - protocolVersion = 1 - commonHeaderBytes = 8 - privateHeaderBytes = 8 - bigEndian = 0 - littleEndian = 1 - ascii = 0 - ebcdic = 1 - ieee = 0 - vax = 1 - cray = 2 - ibm = 3 -) - -// CommonHeader implements the NDR common header: https://msdn.microsoft.com/en-us/library/cc243889.aspx -type CommonHeader struct { - Version uint8 - Endianness binary.ByteOrder - CharacterEncoding uint8 - //FloatRepresentation uint8 - HeaderLength uint16 - Filler []byte -} - -// PrivateHeader implements the NDR private header: https://msdn.microsoft.com/en-us/library/cc243919.aspx -type PrivateHeader struct { - ObjectBufferLength uint32 - Filler []byte -} - -// ReadHeaders processes the bytes to return the NDR Common and Private headers. -func ReadHeaders(b *[]byte) (CommonHeader, PrivateHeader, int, error) { - ch, p, err := GetCommonHeader(b) - if err != nil { - return CommonHeader{}, PrivateHeader{}, 0, err - } - ph, err := GetPrivateHeader(b, &p, &ch.Endianness) - if err != nil { - return CommonHeader{}, PrivateHeader{}, 0, err - } - return ch, ph, p, err -} - -// GetCommonHeader processes the bytes to return the NDR Common header. -func GetCommonHeader(b *[]byte) (CommonHeader, int, error) { - //The first 8 bytes comprise the Common RPC Header for type marshalling. - if len(*b) < commonHeaderBytes { - return CommonHeader{}, 0, Malformed{EText: "Not enough bytes."} - } - if (*b)[0] != protocolVersion { - return CommonHeader{}, 0, Malformed{EText: fmt.Sprintf("Stream does not indicate a RPC Type serialization of version %v", protocolVersion)} - } - endian := int((*b)[1] >> 4 & 0xF) - if endian != 0 && endian != 1 { - return CommonHeader{}, 1, Malformed{EText: "Common header does not indicate a valid endianness"} - } - charEncoding := uint8((*b)[1] & 0xF) - if charEncoding != 0 && charEncoding != 1 { - return CommonHeader{}, 1, Malformed{EText: "Common header does not indicate a valid charater encoding"} - } - var bo binary.ByteOrder - switch endian { - case littleEndian: - bo = binary.LittleEndian - case bigEndian: - bo = binary.BigEndian - } - l := bo.Uint16((*b)[2:4]) - if l != commonHeaderBytes { - return CommonHeader{}, 4, Malformed{EText: fmt.Sprintf("Common header does not indicate a valid length: %v instead of %v", uint8((*b)[3]), commonHeaderBytes)} - } - - return CommonHeader{ - Version: uint8((*b)[0]), - Endianness: bo, - CharacterEncoding: charEncoding, - //FloatRepresentation: uint8(b[2]), - HeaderLength: l, - Filler: (*b)[4:8], - }, 8, nil -} - -// GetPrivateHeader processes the bytes to return the NDR Private header. -func GetPrivateHeader(b *[]byte, p *int, bo *binary.ByteOrder) (PrivateHeader, error) { - //The next 8 bytes comprise the RPC type marshalling private header for constructed types. - if len(*b) < (privateHeaderBytes) { - return PrivateHeader{}, Malformed{EText: "Not enough bytes."} - } - var l uint32 - buf := bytes.NewBuffer((*b)[*p : *p+4]) - binary.Read(buf, *bo, &l) - if l%8 != 0 { - return PrivateHeader{}, Malformed{EText: "Object buffer length not a multiple of 8"} - } - *p += 8 - return PrivateHeader{ - ObjectBufferLength: l, - Filler: (*b)[4:8], - }, nil -} - -// ReadUint8 reads bytes representing a thirty two bit integer. -func ReadUint8(b *[]byte, p *int) (i uint8) { - if len((*b)[*p:]) < 1 { - return - } - ensureAlignment(p, 1) - i = uint8((*b)[*p]) - *p++ - return -} - -// ReadUint16 reads bytes representing a thirty two bit integer. -func ReadUint16(b *[]byte, p *int, e *binary.ByteOrder) (i uint16) { - if len((*b)[*p:]) < 2 { - return - } - ensureAlignment(p, 2) - i = (*e).Uint16((*b)[*p : *p+2]) - *p += 2 - return -} - -// ReadUint32 reads bytes representing a thirty two bit integer. -func ReadUint32(b *[]byte, p *int, e *binary.ByteOrder) (i uint32) { - if len((*b)[*p:]) < 4 { - return - } - ensureAlignment(p, 4) - i = (*e).Uint32((*b)[*p : *p+4]) - *p += 4 - return -} - -// ReadUint64 reads bytes representing a thirty two bit integer. -func ReadUint64(b *[]byte, p *int, e *binary.ByteOrder) (i uint64) { - if len((*b)[*p:]) < 8 { - return - } - ensureAlignment(p, 8) - i = (*e).Uint64((*b)[*p : *p+8]) - *p += 8 - return -} - -// ReadBytes reads the number of bytes specified. -func ReadBytes(b *[]byte, p *int, s int, e *binary.ByteOrder) (r []byte) { - if len((*b)[*p:]) < s { - return - } - buf := bytes.NewBuffer((*b)[*p : *p+s]) - r = make([]byte, s) - binary.Read(buf, *e, &r) - *p += s - return r -} - -// ReadBool reads bytes representing a boolean. -func ReadBool(b *[]byte, p *int) bool { - if len((*b)[*p:]) < 1 { - return false - } - if ReadUint8(b, p) != 0 { - return true - } - return false -} - -// ReadIEEEfloat32 reads bytes representing a IEEE formatted 32 bit float. -func ReadIEEEfloat32(b *[]byte, p *int, e *binary.ByteOrder) float32 { - ensureAlignment(p, 4) - return math.Float32frombits(ReadUint32(b, p, e)) -} - -// ReadIEEEfloat64 reads bytes representing a IEEE formatted 64 bit float. -func ReadIEEEfloat64(b *[]byte, p *int, e *binary.ByteOrder) float64 { - ensureAlignment(p, 8) - return math.Float64frombits(ReadUint64(b, p, e)) -} - -// Conformant - don't know the max count in advance -// Varying - don't know the actual count in advance - -// ReadConformantVaryingString reads a Conformant and Varying String from the bytes slice. -// A conformant and varying string is a string in which the maximum number of elements is not known beforehand and therefore is included in the representation of the string. -// NDR represents a conformant and varying string as an ordered sequence of representations of the string elements, preceded by three unsigned long integers. -// The first integer gives the maximum number of elements in the string, including the terminator. -// The second integer gives the offset from the first index of the string to the first index of the actual subset being passed. -// The third integer gives the actual number of elements being passed, including the terminator. -func ReadConformantVaryingString(b *[]byte, p *int, e *binary.ByteOrder) (string, error) { - m := ReadUint32(b, p, e) // Max element count - o := ReadUint32(b, p, e) // Offset - a := ReadUint32(b, p, e) // Actual count - if a > (m-o) || o > m { - return "", Malformed{EText: fmt.Sprintf("Not enough bytes to read conformant varying string. Max: %d, Offset: %d, Actual: %d", m, o, a)} - } - //Unicode string so each element is 2 bytes - //move position based on the offset - if o > 0 { - *p += int(o * 2) - } - s := make([]rune, a, a) - for i := 0; i < len(s); i++ { - s[i] = rune(ReadUint16(b, p, e)) - } - ensureAlignment(p, 4) - if len(s) > 0 { - // Remove any null terminator - if s[len(s)-1] == rune(0) { - s = s[:len(s)-1] - } - } - return string(s), nil -} - -// NDR defines a special representation for an array whose elements are strings. -// In the NDR representation of an array of strings, any conformance information (maximum element counts) -// for the strings is removed from the string representations and included in the conformance information for the array, -// but any variance information (offsets and actual element counts) for the strings remains with the string representations. -// -// If the strings are conformant or if any dimension of the array is conformant, then the representation contains maximum element counts for all dimensions of the array and for the strings. -// -// If the strings are non-conformant and the array is non-conformant, then the representation does not contain any maximum element counts. -// -// If any dimension of the array is varying, then the representation contains offsets and actual counts for all dimensions of the array. -// -// If the array is non-varying, then the representation does not contain any offsets or actual counts for the array, although it does contain offsets and actual counts for the strings. -func ReadConformantVaryingStringArray(b *[]byte, p *int, e *binary.ByteOrder, n int) ([]string, error) { - // Read Max count for each dimension - sm := make([]int, n, n) - for i := range sm { - sm[i] = int(ReadUint32(b, p, e)) - } - // max count for all the strings - m := int(ReadUint32(b, p, e)) - // Read each elements header - h := make([]VaryingArrayHeader, n, n) - for i := range h { - // Offset for the dimension - h[i].Offset = int(ReadUint32(b, p, e)) - // Actual count for the dimension - h[i].ActualCount = int(ReadUint32(b, p, e)) - } - sa := make([]string, n, n) - for i := range sa { - o := int(ReadUint32(b, p, e)) // Offset - a := int(ReadUint32(b, p, e)) // Actual count - if a > (m-h[i].Offset) || h[i].Offset > m { - return sa, Malformed{EText: fmt.Sprintf("Not enough bytes to read conformant varying string. Max: %d, Offset: %d, Actual: %d", m, o, a)} - } - //Unicode string so each element is 2 bytes - //move position based on the offset - if o > 0 { - *p += int(o * 2) - } - s := make([]rune, a, a) - for i := 0; i < len(s); i++ { - s[i] = rune(ReadUint16(b, p, e)) - } - ensureAlignment(p, 4) - if len(s) > 0 { - // Remove any null terminator - if s[len(s)-1] == rune(0) { - s = s[:len(s)-1] - } - } - sa[i] = string(s) - } - return sa, nil -} - -type ConformantArrayHeader struct { - MaxCount int -} - -type VaryingArrayHeader struct { - Offset int - ActualCount int -} - -type ConformantVaryingArrayHeader struct { - ConformantArrayHeader - VaryingArrayHeader -} - -// ReadUniDimensionalConformantArrayHeader reads a UniDimensionalConformantArrayHeader from the bytes slice. -func ReadUniDimensionalConformantArrayHeader(b *[]byte, p *int, e *binary.ByteOrder) (h ConformantArrayHeader, err error) { - if len((*b)[*p:]) < 4 { - err = Malformed{EText: "Not enough bytes to read uni-dimensional conformant array"} - return - } - // Max count int - h.MaxCount = int(ReadUint32(b, p, e)) - return -} - -// ReadMultiDimensionalConformantArrayHeader reads a MultiDimensionalConformantArrayHeader of n dimensions from the bytes slice. -func ReadMultiDimensionalConformantArrayHeader(b *[]byte, p *int, e *binary.ByteOrder, n int) ([]ConformantArrayHeader, error) { - if len((*b)[*p:]) < n*4 { - return []ConformantArrayHeader{}, Malformed{EText: "Not enough bytes to read conformant array"} - } - h := make([]ConformantArrayHeader, n, n) - for i := range h { - // Max count int for that dimension - h[i].MaxCount = int(ReadUint32(b, p, e)) - } - return h, nil -} - -// ReadUniDimensionalVaryingArrayHeader reads a UniDimensionalVaryingArrayHeader from the bytes slice. -func ReadUniDimensionalVaryingArrayHeader(b *[]byte, p *int, e *binary.ByteOrder) (h VaryingArrayHeader, err error) { - if len((*b)[*p:]) < 8 { - err = Malformed{EText: "Not enough bytes to read uni-dimensional varying array"} - return - } - h.Offset = int(ReadUint32(b, p, e)) - h.ActualCount = int(ReadUint32(b, p, e)) - return -} - -// ReadMultiDimensionalVaryingArrayHeader reads a MultiDimensionalVaryingArrayHeader of n dimensions from the bytes slice. -func ReadMultiDimensionalVaryingArrayHeader(b *[]byte, p *int, e *binary.ByteOrder, n int) ([]VaryingArrayHeader, error) { - if len((*b)[*p:]) < n*4*2 { - return []VaryingArrayHeader{}, Malformed{EText: "Not enough bytes to read varying array"} - } - h := make([]VaryingArrayHeader, n, n) - for i := range h { - // Offset for the dimension - h[i].Offset = int(ReadUint32(b, p, e)) - // Actual count for the dimension - h[i].ActualCount = int(ReadUint32(b, p, e)) - } - return h, nil -} - -// ReadUniDimensionalConformantVaryingArrayHeader reads a UniDimensionalConformantVaryingArrayHeader from the bytes slice. -func ReadUniDimensionalConformantVaryingArrayHeader(b *[]byte, p *int, e *binary.ByteOrder) (h ConformantVaryingArrayHeader, err error) { - if len((*b)[*p:]) < 12 { - err = Malformed{EText: "Not enough bytes to read uni-dimensional conformant varying array"} - return - } - h.MaxCount = int(ReadUint32(b, p, e)) - h.Offset = int(ReadUint32(b, p, e)) - h.ActualCount = int(ReadUint32(b, p, e)) - if h.ActualCount > (h.MaxCount-h.Offset) || h.Offset > h.MaxCount { - err = Malformed{EText: fmt.Sprintf("Not enough bytes to read uni-dimensional conformant varying array. Max: %d, Offset: %d, Actual: %d", h.MaxCount, h.Offset, h.ActualCount)} - } - return -} - -// ReadMultiDimensionalConformantVaryingArrayHeader reads a MultiDimensionalConformantVaryingArrayHeader of n dimensions from the bytes slice. -func ReadMultiDimensionalConformantVaryingArrayHeader(b *[]byte, p *int, e *binary.ByteOrder, n int) ([]ConformantVaryingArrayHeader, error) { - if len((*b)[*p:]) < n*4*3 { - return []ConformantVaryingArrayHeader{}, Malformed{EText: "Not enough bytes to read conformant varying array"} - } - h := make([]ConformantVaryingArrayHeader, n, n) - for i := range h { - h[i].MaxCount = int(ReadUint32(b, p, e)) - } - for i := range h { - h[i].Offset = int(ReadUint32(b, p, e)) - h[i].ActualCount = int(ReadUint32(b, p, e)) - } - return h, nil -} - -func ensureAlignment(p *int, byteSize int) { - if byteSize > 0 { - if s := *p % byteSize; s != 0 { - *p += byteSize - s - } - } -} - -// ReadUTF16String returns a string that is UTF16 encoded in a byte slice. n is the number of bytes representing the string -func ReadUTF16String(n int, b *[]byte, p *int, e *binary.ByteOrder) string { - //Length divided by 2 as each run is 16bits = 2bytes - s := make([]rune, n/2, n/2) - for i := 0; i < len(s); i++ { - s[i] = rune(ReadUint16(b, p, e)) - } - return string(s) -} - -//func DebugByteSteamView(p int, b []byte) { -// fmt.Fprintf(os.Stderr, "Full %v\n", b) -// fmt.Fprintf(os.Stderr, "At pos %v\n", b[p:]) -// fmt.Fprintln(os.Stderr, "uint32 view:") -// var e binary.ByteOrder = binary.LittleEndian -// var sl []int -// for p < len(b) { -// l := p -// i := ReadUint32(&b, &p, &e) -// if l+4 <= len(b) { -// fmt.Fprintf(os.Stderr, "%d:\t%v\t\t%d\n", l, b[l:l+4], i) -// } else { -// fmt.Fprintf(os.Stderr, "%d:\t%v\t\t%d\n", l, b[l:], i) -// } -// -// sc := l - 8 -// if ReadUint32(&b, &sc, &e) == i { -// //Possible str -// sc -= 4 -// sl = append(sl, sc) -// } -// } -// for _, i := range sl { -// sc := i -// s, e := ReadConformantVaryingString(&b, &i, &e) -// if e == nil { -// fmt.Fprintf(os.Stderr, "Potential string at %d: %s\n", sc, s) -// } -// } -//} diff --git a/vendor/k8s.io/utils/env/env.go b/vendor/k8s.io/utils/env/env.go deleted file mode 100644 index bf4cc1a139b..00000000000 --- a/vendor/k8s.io/utils/env/env.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package env - -import ( - "os" - "strconv" -) - -// GetString returns the env variable for the given key -// and falls back to the given defaultValue if not set -func GetString(key, defaultValue string) string { - v, ok := os.LookupEnv(key) - if ok { - return v - } - return defaultValue -} - -// GetInt returns the env variable (parsed as integer) for -// the given key and falls back to the given defaultValue if not set -func GetInt(key string, defaultValue int) (int, error) { - v, ok := os.LookupEnv(key) - if ok { - value, err := strconv.Atoi(v) - if err != nil { - return defaultValue, err - } - return value, nil - } - return defaultValue, nil -} - -// GetFloat64 returns the env variable (parsed as float64) for -// the given key and falls back to the given defaultValue if not set -func GetFloat64(key string, defaultValue float64) (float64, error) { - v, ok := os.LookupEnv(key) - if ok { - value, err := strconv.ParseFloat(v, 64) - if err != nil { - return defaultValue, err - } - return value, nil - } - return defaultValue, nil -} - -// GetBool returns the env variable (parsed as bool) for -// the given key and falls back to the given defaultValue if not set -func GetBool(key string, defaultValue bool) (bool, error) { - v, ok := os.LookupEnv(key) - if ok { - value, err := strconv.ParseBool(v) - if err != nil { - return defaultValue, err - } - return value, nil - } - return defaultValue, nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index b9a17ac4908..b184ab403e4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,18 +59,12 @@ github.com/Knetic/govaluate # github.com/MakeNowJust/heredoc v1.0.0 ## explicit; go 1.12 github.com/MakeNowJust/heredoc -# github.com/Masterminds/goutils v1.1.1 -## explicit -github.com/Masterminds/goutils # github.com/Masterminds/semver v1.5.0 ## explicit github.com/Masterminds/semver # github.com/Masterminds/semver/v3 v3.2.1 ## explicit; go 1.18 github.com/Masterminds/semver/v3 -# github.com/Masterminds/sprig/v3 v3.2.3 -## explicit; go 1.13 -github.com/Masterminds/sprig/v3 # github.com/Microsoft/go-winio v0.6.1 ## explicit; go 1.17 github.com/Microsoft/go-winio @@ -107,20 +101,6 @@ github.com/agext/levenshtein # github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df ## explicit; go 1.18 github.com/antlr/antlr4/runtime/Go/antlr/v4 -# github.com/antonmedv/expr v1.12.5 -## explicit; go 1.13 -github.com/antonmedv/expr -github.com/antonmedv/expr/ast -github.com/antonmedv/expr/builtin -github.com/antonmedv/expr/checker -github.com/antonmedv/expr/compiler -github.com/antonmedv/expr/conf -github.com/antonmedv/expr/file -github.com/antonmedv/expr/optimizer -github.com/antonmedv/expr/parser -github.com/antonmedv/expr/parser/lexer -github.com/antonmedv/expr/vm -github.com/antonmedv/expr/vm/runtime # github.com/apparentlymart/go-textseg v1.0.0 ## explicit github.com/apparentlymart/go-textseg/textseg @@ -164,46 +144,22 @@ github.com/argoproj/argo-cd/v2/util/proxy github.com/argoproj/argo-cd/v2/util/security github.com/argoproj/argo-cd/v2/util/settings github.com/argoproj/argo-cd/v2/util/tls -# github.com/argoproj/argo-workflows/v3 v3.4.3 -## explicit; go 1.18 -github.com/argoproj/argo-workflows/v3 -github.com/argoproj/argo-workflows/v3/config +# github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 +## explicit; go 1.21 github.com/argoproj/argo-workflows/v3/errors -github.com/argoproj/argo-workflows/v3/persist/sqldb github.com/argoproj/argo-workflows/v3/pkg/apis/workflow github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/scheme github.com/argoproj/argo-workflows/v3/pkg/client/clientset/versioned/typed/workflow/v1alpha1 -github.com/argoproj/argo-workflows/v3/util -github.com/argoproj/argo-workflows/v3/util/cmd github.com/argoproj/argo-workflows/v3/util/env github.com/argoproj/argo-workflows/v3/util/errors -github.com/argoproj/argo-workflows/v3/util/expand -github.com/argoproj/argo-workflows/v3/util/expr/env -github.com/argoproj/argo-workflows/v3/util/file -github.com/argoproj/argo-workflows/v3/util/instanceid -github.com/argoproj/argo-workflows/v3/util/intstr github.com/argoproj/argo-workflows/v3/util/json -github.com/argoproj/argo-workflows/v3/util/k8s -github.com/argoproj/argo-workflows/v3/util/labels github.com/argoproj/argo-workflows/v3/util/retry github.com/argoproj/argo-workflows/v3/util/slice -github.com/argoproj/argo-workflows/v3/util/sorting -github.com/argoproj/argo-workflows/v3/util/template -github.com/argoproj/argo-workflows/v3/util/tls -github.com/argoproj/argo-workflows/v3/util/unstructured github.com/argoproj/argo-workflows/v3/util/wait -github.com/argoproj/argo-workflows/v3/workflow/artifacts/common -github.com/argoproj/argo-workflows/v3/workflow/artifacts/hdfs -github.com/argoproj/argo-workflows/v3/workflow/artifacts/resource github.com/argoproj/argo-workflows/v3/workflow/common -github.com/argoproj/argo-workflows/v3/workflow/hydrator -github.com/argoproj/argo-workflows/v3/workflow/metrics -github.com/argoproj/argo-workflows/v3/workflow/packer -github.com/argoproj/argo-workflows/v3/workflow/templateresolution github.com/argoproj/argo-workflows/v3/workflow/util -github.com/argoproj/argo-workflows/v3/workflow/validate # github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 ## explicit; go 1.19 github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/api/v1/endpoints @@ -222,10 +178,7 @@ github.com/argoproj/gitops-engine/pkg/utils/tracing # github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e ## explicit; go 1.14 github.com/argoproj/pkg/exec -github.com/argoproj/pkg/expr -github.com/argoproj/pkg/file github.com/argoproj/pkg/grpc/http -github.com/argoproj/pkg/json github.com/argoproj/pkg/rand github.com/argoproj/pkg/sync github.com/argoproj/pkg/time @@ -375,19 +328,13 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 -# github.com/colinmarc/hdfs v1.1.4-0.20180805212432-9746310a4d31 -## explicit -github.com/colinmarc/hdfs -github.com/colinmarc/hdfs/protocol/hadoop_common -github.com/colinmarc/hdfs/protocol/hadoop_hdfs -github.com/colinmarc/hdfs/rpc # github.com/coreos/go-oidc/v3 v3.11.0 ## explicit; go 1.21 github.com/coreos/go-oidc/v3/oidc # github.com/cyphar/filepath-securejoin v0.2.4 ## explicit; go 1.13 github.com/cyphar/filepath-securejoin -# github.com/davecgh/go-spew v1.1.1 +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/deckarep/golang-set v1.8.0 @@ -439,9 +386,6 @@ github.com/dgryski/go-rendezvous ## explicit github.com/docker/distribution/digestset github.com/docker/distribution/reference -# github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3 -## explicit -github.com/doublerebel/bellows # github.com/emicklei/go-restful/v3 v3.11.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 @@ -548,7 +492,7 @@ github.com/go-git/go-git/v5/utils/trace github.com/go-jose/go-jose/v4 github.com/go-jose/go-jose/v4/cipher github.com/go-jose/go-jose/v4/json -# github.com/go-logr/logr v1.3.0 +# github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr @@ -556,15 +500,15 @@ github.com/go-logr/logr/slogr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/jsonpointer v0.19.6 -## explicit; go 1.13 +# github.com/go-openapi/jsonpointer v0.20.2 +## explicit; go 1.19 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.2 -## explicit; go 1.13 +# github.com/go-openapi/jsonreference v0.20.4 +## explicit; go 1.19 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.22.3 -## explicit; go 1.18 +# github.com/go-openapi/swag v0.22.6 +## explicit; go 1.19 github.com/go-openapi/swag # github.com/go-pg/pg v6.15.1+incompatible ## explicit @@ -589,7 +533,6 @@ github.com/go-redis/cache/v9 github.com/go-resty/resty/v2 # github.com/go-sql-driver/mysql v1.6.0 ## explicit; go 1.10 -github.com/go-sql-driver/mysql # github.com/go-xorm/xorm v0.7.9 ## explicit; go 1.11 github.com/go-xorm/xorm @@ -710,7 +653,7 @@ github.com/google/s2a-go/stream # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.3.1 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/google/wire v0.6.0 @@ -779,16 +722,10 @@ github.com/hashicorp/go-multierror # github.com/hashicorp/go-retryablehttp v0.7.7 ## explicit; go 1.19 github.com/hashicorp/go-retryablehttp -# github.com/hashicorp/go-uuid v1.0.2 -## explicit -github.com/hashicorp/go-uuid # github.com/hashicorp/hcl2 v0.0.0-20191002203319-fb75b3253c80 ## explicit github.com/hashicorp/hcl2/hcl github.com/hashicorp/hcl2/hcl/hclsyntax -# github.com/huandu/xstrings v1.4.0 -## explicit; go 1.12 -github.com/huandu/xstrings # github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 ## explicit github.com/iancoleman/orderedmap @@ -806,10 +743,6 @@ github.com/invopop/jsonschema # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit github.com/jbenet/go-context/io -# github.com/jcmturner/gofork v1.0.0 -## explicit -github.com/jcmturner/gofork/encoding/asn1 -github.com/jcmturner/gofork/x/crypto/pbkdf2 # github.com/jinzhu/inflection v1.0.0 ## explicit github.com/jinzhu/inflection @@ -840,9 +773,6 @@ github.com/kevinburke/ssh_config ## explicit; go 1.18 github.com/klauspost/compress/flate github.com/klauspost/compress/s2 -# github.com/klauspost/pgzip v1.2.5 -## explicit -github.com/klauspost/pgzip # github.com/leodido/go-urn v1.2.0 ## explicit; go 1.13 github.com/leodido/go-urn @@ -924,9 +854,6 @@ github.com/nats-io/nkeys # github.com/nats-io/nuid v1.0.1 ## explicit github.com/nats-io/nuid -# github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 -## explicit -github.com/oliveagle/jsonpath # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -1001,18 +928,12 @@ github.com/satori/go.uuid # github.com/sergi/go-diff v1.1.0 ## explicit; go 1.12 github.com/sergi/go-diff/diffmatchpatch -# github.com/shopspring/decimal v1.3.1 -## explicit; go 1.13 -github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus # github.com/skeema/knownhosts v1.2.2 ## explicit; go 1.17 github.com/skeema/knownhosts -# github.com/spf13/cast v1.5.0 -## explicit; go 1.18 -github.com/spf13/cast # github.com/spf13/cobra v1.8.0 ## explicit; go 1.15 github.com/spf13/cobra @@ -1055,12 +976,6 @@ github.com/tidwall/pretty # github.com/tidwall/sjson v1.2.4 ## explicit; go 1.14 github.com/tidwall/sjson -# github.com/valyala/bytebufferpool v1.0.0 -## explicit -github.com/valyala/bytebufferpool -# github.com/valyala/fasttemplate v1.2.2 -## explicit; go 1.12 -github.com/valyala/fasttemplate # github.com/vmihailenco/go-tinylfu v0.2.2 ## explicit; go 1.15 github.com/vmihailenco/go-tinylfu @@ -1228,7 +1143,6 @@ golang.org/x/crypto/ed25519 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -golang.org/x/crypto/md4 golang.org/x/crypto/nacl/box golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/openpgp @@ -1312,8 +1226,8 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.3.0 -## explicit +# golang.org/x/time v0.5.0 +## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d ## explicit; go 1.19 @@ -1357,7 +1271,7 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal @@ -1506,50 +1420,6 @@ gopkg.in/igm/sockjs-go.v3/sockjs # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 -# gopkg.in/jcmturner/aescts.v1 v1.0.1 -## explicit -gopkg.in/jcmturner/aescts.v1 -# gopkg.in/jcmturner/dnsutils.v1 v1.0.1 -## explicit -gopkg.in/jcmturner/dnsutils.v1 -# gopkg.in/jcmturner/goidentity.v2 v2.0.0 -## explicit -# gopkg.in/jcmturner/gokrb5.v5 v5.3.0 -## explicit -gopkg.in/jcmturner/gokrb5.v5/asn1tools -gopkg.in/jcmturner/gokrb5.v5/client -gopkg.in/jcmturner/gokrb5.v5/config -gopkg.in/jcmturner/gokrb5.v5/credentials -gopkg.in/jcmturner/gokrb5.v5/crypto -gopkg.in/jcmturner/gokrb5.v5/crypto/common -gopkg.in/jcmturner/gokrb5.v5/crypto/etype -gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3961 -gopkg.in/jcmturner/gokrb5.v5/crypto/rfc3962 -gopkg.in/jcmturner/gokrb5.v5/crypto/rfc4757 -gopkg.in/jcmturner/gokrb5.v5/crypto/rfc8009 -gopkg.in/jcmturner/gokrb5.v5/gssapi -gopkg.in/jcmturner/gokrb5.v5/iana -gopkg.in/jcmturner/gokrb5.v5/iana/addrtype -gopkg.in/jcmturner/gokrb5.v5/iana/adtype -gopkg.in/jcmturner/gokrb5.v5/iana/asnAppTag -gopkg.in/jcmturner/gokrb5.v5/iana/chksumtype -gopkg.in/jcmturner/gokrb5.v5/iana/errorcode -gopkg.in/jcmturner/gokrb5.v5/iana/etypeID -gopkg.in/jcmturner/gokrb5.v5/iana/flags -gopkg.in/jcmturner/gokrb5.v5/iana/keyusage -gopkg.in/jcmturner/gokrb5.v5/iana/msgtype -gopkg.in/jcmturner/gokrb5.v5/iana/nametype -gopkg.in/jcmturner/gokrb5.v5/iana/patype -gopkg.in/jcmturner/gokrb5.v5/kadmin -gopkg.in/jcmturner/gokrb5.v5/keytab -gopkg.in/jcmturner/gokrb5.v5/krberror -gopkg.in/jcmturner/gokrb5.v5/messages -gopkg.in/jcmturner/gokrb5.v5/mstypes -gopkg.in/jcmturner/gokrb5.v5/pac -gopkg.in/jcmturner/gokrb5.v5/types -# gopkg.in/jcmturner/rpc.v0 v0.0.2 -## explicit -gopkg.in/jcmturner/rpc.v0/ndr # gopkg.in/warnings.v0 v0.1.2 ## explicit gopkg.in/warnings.v0 @@ -2165,7 +2035,6 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing -k8s.io/utils/env k8s.io/utils/exec k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net @@ -2290,22 +2159,10 @@ sigs.k8s.io/structured-merge-diff/v4/merge sigs.k8s.io/structured-merge-diff/v4/schema sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value -# sigs.k8s.io/yaml v1.3.0 +# sigs.k8s.io/yaml v1.4.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# upper.io/db.v3 v3.8.0+incompatible -## explicit -upper.io/db.v3 -upper.io/db.v3/internal/cache -upper.io/db.v3/internal/cache/hashstructure -upper.io/db.v3/internal/immutable -upper.io/db.v3/internal/sqladapter -upper.io/db.v3/internal/sqladapter/compat -upper.io/db.v3/internal/sqladapter/exql -upper.io/db.v3/lib/reflectx -upper.io/db.v3/lib/sqlbuilder -upper.io/db.v3/mysql -upper.io/db.v3/postgresql +sigs.k8s.io/yaml/goyaml.v2 # xorm.io/builder v0.3.7 ## explicit; go 1.11 xorm.io/builder diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE index 7805d36de73..093d6d3edf3 100644 --- a/vendor/sigs.k8s.io/yaml/LICENSE +++ b/vendor/sigs.k8s.io/yaml/LICENSE @@ -48,3 +48,259 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# The forked go-yaml.v3 library under this project is covered by two +different licenses (MIT and Apache): + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +# The forked go-yaml.v2 library under the project is covered by an +Apache license: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS index 325b40b0763..003a149e151 100644 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -2,26 +2,22 @@ approvers: - dims -- lavalamp +- jpbetz - smarterclayton - deads2k - sttts - liggitt -- caesarxuchao reviewers: - dims - thockin -- lavalamp +- jpbetz - smarterclayton - wojtek-t - deads2k - derekwaynecarr -- caesarxuchao - mikedanese - liggitt -- gmarek - sttts -- ncdc - tallclair labels: - sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go index 235b7f2cf61..0ea28bd0307 100644 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -16,53 +16,53 @@ import ( "unicode/utf8" ) -// indirect walks down v allocating pointers as needed, +// indirect walks down 'value' allocating pointers as needed, // until it gets to a non-pointer. // if it encounters an Unmarshaler, indirect stops and returns that. // if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, +func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If 'value' is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() + if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() { + value = value.Addr() } for { // Load value from interface, but only if the result will be // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e + if value.Kind() == reflect.Interface && !value.IsNil() { + element := value.Elem() + if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) { + value = element continue } } - if v.Kind() != reflect.Ptr { + if value.Kind() != reflect.Ptr { break } - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() { break } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) + if value.IsNil() { + if value.CanSet() { + value.Set(reflect.New(value.Type().Elem())) } else { - v = reflect.New(v.Type().Elem()) + value = reflect.New(value.Type().Elem()) } } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { + if value.Type().NumMethod() > 0 { + if u, ok := value.Interface().(json.Unmarshaler); ok { return u, nil, reflect.Value{} } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + if u, ok := value.Interface().(encoding.TextUnmarshaler); ok { return nil, u, reflect.Value{} } } - v = v.Elem() + value = value.Elem() } - return nil, nil, v + return nil, nil, value } // A field represents a single field found in a struct. @@ -134,8 +134,8 @@ func typeFields(t reflect.Type) []field { next := []field{{typ: t}} // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} + var count map[reflect.Type]int + var nextCount map[reflect.Type]int // Types already visited at an earlier level. visited := map[reflect.Type]bool{} @@ -348,8 +348,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See http://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and @@ -420,10 +421,8 @@ func equalFoldRight(s, t []byte) bool { t = t[size:] } - if len(t) > 0 { - return false - } - return true + + return len(t) <= 0 } // asciiEqualFold is a specialization of bytes.EqualFold for use when diff --git a/vendor/gopkg.in/jcmturner/aescts.v1/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE similarity index 100% rename from vendor/gopkg.in/jcmturner/aescts.v1/LICENSE rename to vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE diff --git a/vendor/github.com/oliveagle/jsonpath/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml similarity index 50% rename from vendor/github.com/oliveagle/jsonpath/LICENSE rename to vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml index 530afca3d60..8da58fbf6f8 100644 --- a/vendor/github.com/oliveagle/jsonpath/LICENSE +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml @@ -1,13 +1,23 @@ -The MIT License (MIT) +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: -Copyright (c) 2015 oliver + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. @@ -19,4 +29,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/k8s.io/utils/env/doc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE similarity index 76% rename from vendor/k8s.io/utils/env/doc.go rename to vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE index 092ba673656..866d74a7ad7 100644 --- a/vendor/k8s.io/utils/env/doc.go +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE @@ -1,5 +1,4 @@ -/* -Copyright 2020 The Kubernetes Authors. +Copyright 2011-2016 Canonical Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,7 +11,3 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -// Package env provides utility functions for using environment variables. -package env // import "k8s.io/utils/env" diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS new file mode 100644 index 00000000000..73be0a3a9bd --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS @@ -0,0 +1,24 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- jpbetz +- smarterclayton +- deads2k +- sttts +- liggitt +- natasha41575 +- knverey +reviewers: +- dims +- thockin +- jpbetz +- smarterclayton +- deads2k +- derekwaynecarr +- mikedanese +- liggitt +- sttts +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md new file mode 100644 index 00000000000..53f4139dc31 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md @@ -0,0 +1,143 @@ +# go-yaml fork + +This package is a fork of the go-yaml library and is intended solely for consumption +by kubernetes projects. In this fork, we plan to support only critical changes required for +kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests +should be made in the upstream go-yaml library, and we will reject such changes in this fork +unless we are pulling them from upstream. + +This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0 + +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go new file mode 100644 index 00000000000..acf71402cf3 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go new file mode 100644 index 00000000000..129bc2a97d3 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go new file mode 100644 index 00000000000..a1c2cc52627 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go new file mode 100644 index 00000000000..0ee738e11b6 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go new file mode 100644 index 00000000000..81d05dfe573 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go new file mode 100644 index 00000000000..7c1f5fac3db --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go new file mode 100644 index 00000000000..4120e0c9160 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go new file mode 100644 index 00000000000..0b9bb6030a0 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go new file mode 100644 index 00000000000..4c45e660a8f --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go new file mode 100644 index 00000000000..a2dde608cb7 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go new file mode 100644 index 00000000000..30813884c06 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go new file mode 100644 index 00000000000..f6a9c8e34b1 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go new file mode 100644 index 00000000000..8110ce3c37a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go index efbc535d416..fc10246bdb2 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import ( @@ -8,56 +24,59 @@ import ( "reflect" "strconv" - "gopkg.in/yaml.v2" + "sigs.k8s.io/yaml/goyaml.v2" ) -// Marshal marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) +// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference) +func Marshal(obj interface{}) ([]byte, error) { + jsonBytes, err := json.Marshal(obj) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: %w", err) } - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil + return JSONToYAML(jsonBytes) } // JSONOpt is a decoding option for decoding from JSON format. type JSONOpt func(*json.Decoder) *json.Decoder -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, false, opts...) +// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the +// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer. +// +// Important notes about the Unmarshal logic: +// +// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users. +// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this. +// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative. +// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process. +// - There are no compatibility guarantees for returned error values. +func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...) } -// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal -// into an object, optionally configuring the behavior of the JSON unmarshal. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions: +// +// - Duplicate fields in an object yield an error. This is according to the YAML specification. +// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error. +func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error { + return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...) } -// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// unmarshal unmarshals the given YAML byte stream into the given interface, // optionally performing the unmarshalling strictly -func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { - vo := reflect.ValueOf(o) - unmarshalFn := yaml.Unmarshal - if strict { - unmarshalFn = yaml.UnmarshalStrict - } - j, err := yamlToJSON(y, &vo, unmarshalFn) +func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error { + jsonTarget := reflect.ValueOf(obj) + + jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn) if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) + return fmt.Errorf("error converting YAML to JSON: %w", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...) if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) + return fmt.Errorf("error unmarshaling JSON: %w", err) } return nil @@ -67,21 +86,26 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error // object, optionally applying decoder options prior to decoding. We are not // using json.Unmarshal directly as we want the chance to pass in non-default // options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) +func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(reader) for _, opt := range opts { d = opt(d) } - if err := d.Decode(&o); err != nil { + if err := d.Decode(&obj); err != nil { return fmt.Errorf("while decoding JSON: %v", err) } return nil } -// JSONToYAML Converts JSON to YAML. +// JSONToYAML converts JSON to YAML. Notable implementation details: +// +// - Duplicate fields, are case-sensitively ignored in an undefined order. +// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, // etc.) when unmarshalling to interface{}, it just picks float64 @@ -93,35 +117,46 @@ func JSONToYAML(j []byte) ([]byte, error) { } // Marshal this object into YAML. - return yaml.Marshal(jsonObj) + yamlBytes, err := yaml.Marshal(jsonObj) + if err != nil { + return nil, err + } + + return yamlBytes, nil } // YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, // passing JSON through this method should be a no-op. // -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. +// Some things YAML can do that are not supported by JSON: +// - In YAML you can have binary and null keys in your maps. These are invalid +// in JSON, and therefore int, bool and float keys are converted to strings implicitly. +// - Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// - And more... read the YAML specification for more details. +// +// Notable about the implementation: // -// For strict decoding of YAML, use YAMLToJSONStrict. +// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative. +// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly. +// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip. +// - There are no compatibility guarantees for returned error values. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) + return yamlToJSONTarget(y, nil, yaml.Unmarshal) } // YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, // returning an error on any duplicate field names. func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := unmarshalFn(yamlBytes, &yamlObj) if err != nil { return nil, err } @@ -136,7 +171,11 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, } // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) + jsonBytes, err := json.Marshal(jsonObj) + if err != nil { + return nil, err + } + return jsonBytes, nil } func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { @@ -147,13 +186,13 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in // decoding into the value, we're just checking if the ultimate target is a // string. if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) + jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false) // We have a JSON or Text Umarshaler at this level, so we can't be trying // to decode into a string. - if ju != nil || tu != nil { + if jsonUnmarshaler != nil || textUnmarshaler != nil { jsonTarget = nil } else { - jsonTarget = &pv + jsonTarget = &pointerValue } } @@ -205,7 +244,7 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in keyString = "false" } default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v", reflect.TypeOf(k), k, v) } diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go index ab3e06a222a..94abc1719dc 100644 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -1,7 +1,24 @@ // This file contains changes that are only compatible with go 1.10 and onwards. +//go:build go1.10 // +build go1.10 +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package yaml import "encoding/json" diff --git a/vendor/upper.io/db.v3/.gitignore b/vendor/upper.io/db.v3/.gitignore deleted file mode 100644 index 29460701365..00000000000 --- a/vendor/upper.io/db.v3/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.sw? -*.db -*.tmp -generated_*.go diff --git a/vendor/upper.io/db.v3/CHANGELOG.md b/vendor/upper.io/db.v3/CHANGELOG.md deleted file mode 100644 index 7b4d6fec330..00000000000 --- a/vendor/upper.io/db.v3/CHANGELOG.md +++ /dev/null @@ -1,31 +0,0 @@ -## Changelog - -Dec 15th, 2016: On `db.v2`, upper-db produced queries that mutated themselves: - -``` -q := sess.SelectFrom("users") - -q.Where(...) // This method modified q's internal state. -``` - -Starting on `db.v3` this is no longer valid, if you want to use values to -represent queries you'll have to reassign them, like this: - -``` -q := sess.SelectFrom("users") - -q = q.Where(...) - -q.And(...) // Nothing happens, the Where() method does not affect q. -``` - -This applies to all query builder methods, `db.Result`, `db.And` and `db.Or`. - -If you want to check your code for statatements that might rely on the old -behaviour and could cause you trouble use `dbcheck`: - -``` -go get -u github.com/upper/cmd/dbcheck - -dbcheck github.com/my/package/... -``` diff --git a/vendor/upper.io/db.v3/LICENSE b/vendor/upper.io/db.v3/LICENSE deleted file mode 100644 index 4004d2ba350..00000000000 --- a/vendor/upper.io/db.v3/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2012-present The upper.io/db authors. All rights reserved. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/upper.io/db.v3/Makefile b/vendor/upper.io/db.v3/Makefile deleted file mode 100644 index c88f64ab56f..00000000000 --- a/vendor/upper.io/db.v3/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -SHELL := /bin/bash - -PARALLEL_FLAGS ?= --halt-on-error 2 --jobs=4 -v -u - -TEST_FLAGS ?= - -export TEST_FLAGS -export PARALLEL_FLAGS - -test: test-libs test-adapters - -benchmark-lib: - go test -v -benchtime=500ms -bench=. ./lib/... - -benchmark-internal: - go test -v -benchtime=500ms -bench=. ./internal/... - -benchmark: benchmark-lib benchmark-internal - -test-lib: - go test -v ./lib/... - -test-internal: - go test -v ./internal/... - -test-libs: - parallel $(PARALLEL_FLAGS) \ - "$(MAKE) test-{}" ::: \ - lib \ - internal - -test-adapters: \ - test-adapter-postgresql \ - test-adapter-mysql \ - test-adapter-mssql \ - test-adapter-sqlite \ - test-adapter-ql \ - test-adapter-mongo - -test-adapter-%: - ($(MAKE) -C $* test-extended || exit 1) - -test-generic: - export TEST_FLAGS="-run TestGeneric"; \ - $(MAKE) test-adapters - -goimports: - for FILE in $$(find -name "*.go" | grep -v vendor); do \ - goimports -w $$FILE; \ - done diff --git a/vendor/upper.io/db.v3/README.md b/vendor/upper.io/db.v3/README.md deleted file mode 100644 index c96c76ab4a0..00000000000 --- a/vendor/upper.io/db.v3/README.md +++ /dev/null @@ -1,127 +0,0 @@ -

- -

- -# upper.io/db.v3 [![Build Status](https://travis-ci.org/upper/db.svg?branch=master)](https://travis-ci.org/upper/db) [![GoDoc](https://godoc.org/upper.io/db.v3?status.svg)](https://godoc.org/upper.io/db.v3) - -The `upper.io/db.v3` package for [Go][2] is a productive data access layer for -Go that provides a common interface to work with different data sources such as -[PostgreSQL](https://upper.io/db.v3/postgresql), -[MySQL](https://upper.io/db.v3/mysql), [SQLite](https://upper.io/db.v3/sqlite), -[MSSQL](https://upper.io/db.v3/mssql), -[QL](https://upper.io/db.v3/ql) and [MongoDB](https://upper.io/db.v3/mongo). - -``` -go get upper.io/db.v3 -``` - -## The tour - -![screen shot 2017-05-01 at 19 23 22](https://cloud.githubusercontent.com/assets/385670/25599675/b6fe9fea-2ea3-11e7-9f76-002931dfbbc1.png) - -Take the [tour](https://tour.upper.io) to see real live examples in your -browser. - -## Live demos - -You can run the following example on our [playground](https://demo.upper.io): - -```go -package main - -import ( - "log" - - "upper.io/db.v3/postgresql" -) - -var settings = postgresql.ConnectionURL{ - Host: "demo.upper.io", - Database: "booktown", - User: "demouser", - Password: "demop4ss", -} - -type Book struct { - ID int `db:"id"` - Title string `db:"title"` - AuthorID int `db:"author_id"` - SubjectID int `db:"subject_id"` -} - -func main() { - sess, err := postgresql.Open(settings) - if err != nil { - log.Fatalf("db.Open(): %q\n", err) - } - defer sess.Close() - - var books []Book - err = sess.Collection("books").Find().All(&books) - if err != nil { - log.Fatalf("Find(): %q\n", err) - } - - for i, book := range books { - log.Printf("Book %d: %#v\n", i, book) - } -} -``` - -Or you can also run it locally from the `_examples` directory: - -``` -go run _examples/booktown-books/main.go -2016/08/10 08:42:48 "The Shining" (ID: 7808) -2016/08/10 08:42:48 "Dune" (ID: 4513) -2016/08/10 08:42:48 "2001: A Space Odyssey" (ID: 4267) -2016/08/10 08:42:48 "The Cat in the Hat" (ID: 1608) -2016/08/10 08:42:48 "Bartholomew and the Oobleck" (ID: 1590) -2016/08/10 08:42:48 "Franklin in the Dark" (ID: 25908) -2016/08/10 08:42:48 "Goodnight Moon" (ID: 1501) -2016/08/10 08:42:48 "Little Women" (ID: 190) -2016/08/10 08:42:48 "The Velveteen Rabbit" (ID: 1234) -2016/08/10 08:42:48 "Dynamic Anatomy" (ID: 2038) -2016/08/10 08:42:48 "The Tell-Tale Heart" (ID: 156) -2016/08/10 08:42:48 "Programming Python" (ID: 41473) -2016/08/10 08:42:48 "Learning Python" (ID: 41477) -2016/08/10 08:42:48 "Perl Cookbook" (ID: 41478) -2016/08/10 08:42:48 "Practical PostgreSQL" (ID: 41472) -``` - -## Documentation for users - -This is the source code repository, check out our [release -notes](https://github.com/upper/db/releases/tag/v3.0.0) and see examples and -documentation at [upper.io/db.v3][1]. - - -## Changelog - -See [CHANGELOG.md](https://github.com/upper/db/blob/master/CHANGELOG.md). - -## License - -Licensed under [MIT License](./LICENSE) - -## Authors and contributors - -* José Carlos Nieto <> -* Peter Kieltyka <> -* Maciej Lisiewski <> -* Max Hawkins <> -* Paul Xue <> -* Kevin Darlington <> -* Lars Buitinck <> -* icattlecoder <> -* Aaron <> -* Hiram J. Pérez <> -* Julien Schmidt <> -* Max Hawkins <> -* Piotr "Orange" Zduniak <> -* achun <> -* rjmcguire <> -* wei2912 <> - -[1]: https://upper.io/db.v3 -[2]: http://golang.org diff --git a/vendor/upper.io/db.v3/collection.go b/vendor/upper.io/db.v3/collection.go deleted file mode 100644 index 8cdd634404c..00000000000 --- a/vendor/upper.io/db.v3/collection.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Collection is an interface that defines methods useful for handling tables. -type Collection interface { - // Insert inserts a new item into the collection, it accepts one argument - // that can be either a map or a struct. If the call succeeds, it returns the - // ID of the newly added element as an `interface{}` (the actual type of this - // ID depends on both the database adapter and the column that stores this - // ID). The ID returned by Insert() could be passed directly to Find() to - // retrieve the newly added element. - Insert(interface{}) (interface{}, error) - - // InsertReturning is like Insert() but it updates the passed map or struct - // with the newly inserted element (and with automatic fields, like IDs, - // timestamps, etc). This is all done atomically within a transaction. If - // the database does not support transactions this method returns - // db.ErrUnsupported. - InsertReturning(interface{}) error - - // UpdateReturning takes a pointer to map or struct and tries to update the - // given item on the collection based on the item's primary keys. Once the - // element is updated, UpdateReturning will query the element that was just - // updated. If the database does not support transactions this method returns - // db.ErrUnsupported - UpdateReturning(interface{}) error - - // Exists returns true if the collection exists, false otherwise. - Exists() bool - - // Find defines a new result set with elements from the collection. - Find(...interface{}) Result - - // Truncate removes all elements on the collection and resets the - // collection's IDs. - Truncate() error - - // Name returns the name of the collection. - Name() string -} diff --git a/vendor/upper.io/db.v3/comparison.go b/vendor/upper.io/db.v3/comparison.go deleted file mode 100644 index bf6fa9d35f0..00000000000 --- a/vendor/upper.io/db.v3/comparison.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "reflect" - "time" -) - -// Comparison defines methods for representing comparison operators in a -// portable way across databases. -type Comparison interface { - Operator() ComparisonOperator - - Value() interface{} -} - -// ComparisonOperator is a type we use to label comparison operators. -type ComparisonOperator uint8 - -// Comparison operators -const ( - ComparisonOperatorNone ComparisonOperator = iota - - ComparisonOperatorEqual - ComparisonOperatorNotEqual - - ComparisonOperatorLessThan - ComparisonOperatorGreaterThan - - ComparisonOperatorLessThanOrEqualTo - ComparisonOperatorGreaterThanOrEqualTo - - ComparisonOperatorBetween - ComparisonOperatorNotBetween - - ComparisonOperatorIn - ComparisonOperatorNotIn - - ComparisonOperatorIs - ComparisonOperatorIsNot - - ComparisonOperatorLike - ComparisonOperatorNotLike - - ComparisonOperatorRegExp - ComparisonOperatorNotRegExp - - ComparisonOperatorAfter - ComparisonOperatorBefore - - ComparisonOperatorOnOrAfter - ComparisonOperatorOnOrBefore -) - -type dbComparisonOperator struct { - t ComparisonOperator - op string - v interface{} -} - -func (c *dbComparisonOperator) CustomOperator() string { - return c.op -} - -func (c *dbComparisonOperator) Operator() ComparisonOperator { - return c.t -} - -func (c *dbComparisonOperator) Value() interface{} { - return c.v -} - -// Gte indicates whether the reference is greater than or equal to the given -// argument. -func Gte(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorGreaterThanOrEqualTo, - v: v, - } -} - -// Lte indicates whether the reference is less than or equal to the given -// argument. -func Lte(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorLessThanOrEqualTo, - v: v, - } -} - -// Eq indicates whether the constraint is equal to the given argument. -func Eq(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorEqual, - v: v, - } -} - -// NotEq indicates whether the constraint is not equal to the given argument. -func NotEq(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorNotEqual, - v: v, - } -} - -// Gt indicates whether the constraint is greater than the given argument. -func Gt(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorGreaterThan, - v: v, - } -} - -// Lt indicates whether the constraint is less than the given argument. -func Lt(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorLessThan, - v: v, - } -} - -// In indicates whether the argument is part of the reference. -func In(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorIn, - v: toInterfaceArray(v), - } -} - -// NotIn indicates whether the argument is not part of the reference. -func NotIn(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorNotIn, - v: toInterfaceArray(v), - } -} - -// After indicates whether the reference is after the given time. -func After(t time.Time) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorGreaterThan, - v: t, - } -} - -// Before indicates whether the reference is before the given time. -func Before(t time.Time) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorLessThan, - v: t, - } -} - -// OnOrAfter indicater whether the reference is after or equal to the given -// time value. -func OnOrAfter(t time.Time) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorGreaterThanOrEqualTo, - v: t, - } -} - -// OnOrBefore indicates whether the reference is before or equal to the given -// time value. -func OnOrBefore(t time.Time) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorLessThanOrEqualTo, - v: t, - } -} - -// Between indicates whether the reference is contained between the two given -// values. -func Between(a interface{}, b interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorBetween, - v: []interface{}{a, b}, - } -} - -// NotBetween indicates whether the reference is not contained between the two -// given values. -func NotBetween(a interface{}, b interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorNotBetween, - v: []interface{}{a, b}, - } -} - -// Is indicates whether the reference is nil, true or false. -func Is(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorIs, - v: v, - } -} - -// IsNot indicates whether the reference is not nil, true nor false. -func IsNot(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorIsNot, - v: v, - } -} - -// IsNull indicates whether the reference is a NULL value. -func IsNull() Comparison { - return Is(nil) -} - -// IsNotNull indicates whether the reference is a NULL value. -func IsNotNull() Comparison { - return IsNot(nil) -} - -/* -// IsDistinctFrom indicates whether the reference is different from -// the given value, including NULL values. -func IsDistinctFrom(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorIsDistinctFrom, - v: v, - } -} - -// IsNotDistinctFrom indicates whether the reference is not different from the -// given value, including NULL values. -func IsNotDistinctFrom(v interface{}) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorIsNotDistinctFrom, - v: v, - } -} -*/ - -// Like indicates whether the reference matches the wildcard value. -func Like(v string) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorLike, - v: v, - } -} - -// NotLike indicates whether the reference does not match the wildcard value. -func NotLike(v string) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorNotLike, - v: v, - } -} - -/* -// ILike indicates whether the reference matches the wildcard value (case -// insensitive). -func ILike(v string) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorILike, - v: v, - } -} - -// NotILike indicates whether the reference does not match the wildcard value -// (case insensitive). -func NotILike(v string) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorNotILike, - v: v, - } -} -*/ - -// RegExp indicates whether the reference matches the regexp pattern. -func RegExp(v string) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorRegExp, - v: v, - } -} - -// NotRegExp indicates whether the reference does not match the regexp pattern. -func NotRegExp(v string) Comparison { - return &dbComparisonOperator{ - t: ComparisonOperatorNotRegExp, - v: v, - } -} - -// Op represents a custom comparison operator against the reference. -func Op(customOperator string, v interface{}) Comparison { - return &dbComparisonOperator{ - op: customOperator, - t: ComparisonOperatorNone, - v: v, - } -} - -func toInterfaceArray(v interface{}) []interface{} { - rv := reflect.ValueOf(v) - switch rv.Type().Kind() { - case reflect.Ptr: - return toInterfaceArray(rv.Elem().Interface()) - case reflect.Slice: - elems := rv.Len() - args := make([]interface{}, elems) - for i := 0; i < elems; i++ { - args[i] = rv.Index(i).Interface() - } - return args - } - return []interface{}{v} -} - -var _ = Comparison(&dbComparisonOperator{}) diff --git a/vendor/upper.io/db.v3/compound.go b/vendor/upper.io/db.v3/compound.go deleted file mode 100644 index 55f4f08866c..00000000000 --- a/vendor/upper.io/db.v3/compound.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "upper.io/db.v3/internal/immutable" -) - -// Compound represents an statement that has one or many sentences joined by by -// an operator like "AND" or "OR". This is an exported interface but it was -// designed for internal usage, you may want to use the `db.And()` or `db.Or()` -// functions instead. -type Compound interface { - // Sentences returns child sentences. - Sentences() []Compound - - // Operator returns the operator that joins the compound's child sentences. - Operator() CompoundOperator - - // Empty returns true if the compound has zero children, false otherwise. - Empty() bool -} - -// CompoundOperator represents the operation on a compound statement. -type CompoundOperator uint - -// Compound operators. -const ( - OperatorNone CompoundOperator = iota - OperatorAnd - OperatorOr -) - -type compound struct { - prev *compound - fn func(*[]Compound) error -} - -func newCompound(conds ...Compound) *compound { - c := &compound{} - if len(conds) == 0 { - return c - } - return c.frame(func(in *[]Compound) error { - *in = append(*in, conds...) - return nil - }) -} - -// Sentences returns each one of the conditions as a compound. -func (c *compound) Sentences() []Compound { - conds, err := immutable.FastForward(c) - if err == nil { - return *(conds.(*[]Compound)) - } - return nil -} - -// Operator returns no operator. -func (c *compound) Operator() CompoundOperator { - return OperatorNone -} - -// Empty returns true if this condition has no elements. False otherwise. -func (c *compound) Empty() bool { - if c.fn != nil { - return false - } - if c.prev != nil { - return c.prev.Empty() - } - return true -} - -func (c *compound) frame(fn func(*[]Compound) error) *compound { - return &compound{prev: c, fn: fn} -} - -// Prev is for internal usage. -func (c *compound) Prev() immutable.Immutable { - if c == nil { - return nil - } - return c.prev -} - -// Fn is for internal usage. -func (c *compound) Fn(in interface{}) error { - if c.fn == nil { - return nil - } - return c.fn(in.(*[]Compound)) -} - -// Base is for internal usage. -func (c *compound) Base() interface{} { - return &[]Compound{} -} - -func defaultJoin(in ...Compound) []Compound { - for i := range in { - if cond, ok := in[i].(Cond); ok && len(cond) > 1 { - in[i] = And(cond) - } - } - return in -} - -var ( - _ = immutable.Immutable(&compound{}) - _ = Compound(Cond{}) -) diff --git a/vendor/upper.io/db.v3/cond.go b/vendor/upper.io/db.v3/cond.go deleted file mode 100644 index 513a424268e..00000000000 --- a/vendor/upper.io/db.v3/cond.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "fmt" - "sort" -) - -// Cond is a map that defines conditions for a query and satisfies the -// Constraints and Compound interfaces. -// -// Each entry of the map represents a condition (a column-value relation bound -// by a comparison operator). The comparison operator is optional and can be -// specified after the column name, if no comparison operator is provided the -// equality operator is used as default. -// -// Examples: -// -// // Where age equals 18. -// db.Cond{"age": 18} -// // // Where age is greater than or equal to 18. -// db.Cond{"age >=": 18} -// -// // Where id is in a list of ids. -// db.Cond{"id IN": []{1, 2, 3}} -// -// // Where age is lower than 18 (you could use this syntax when using -// // mongodb). -// db.Cond{"age $lt": 18} -// -// // Where age > 32 and age < 35 -// db.Cond{"age >": 32, "age <": 35} -type Cond map[interface{}]interface{} - -// Constraints returns each one of the Cond map records as a constraint. -func (c Cond) Constraints() []Constraint { - z := make([]Constraint, 0, len(c)) - for _, k := range c.Keys() { - z = append(z, NewConstraint(k, c[k])) - } - return z -} - -// Keys returns the keys of this map sorted by name. -func (c Cond) Keys() []interface{} { - keys := make(condKeys, 0, len(c)) - for k := range c { - keys = append(keys, k) - } - if len(c) > 1 { - sort.Sort(keys) - } - return keys -} - -// Sentences return each one of the map records as a compound. -func (c Cond) Sentences() []Compound { - z := make([]Compound, 0, len(c)) - for _, k := range c.Keys() { - z = append(z, Cond{k: c[k]}) - } - return z -} - -// Operator returns the default compound operator. -func (c Cond) Operator() CompoundOperator { - return OperatorNone -} - -// Empty returns false if there are no conditions. -func (c Cond) Empty() bool { - for range c { - return false - } - return true -} - -type condKeys []interface{} - -func (ck condKeys) Len() int { - return len(ck) -} - -func (ck condKeys) Less(i, j int) bool { - return fmt.Sprintf("%v", ck[i]) < fmt.Sprintf("%v", ck[j]) -} - -func (ck condKeys) Swap(i, j int) { - ck[i], ck[j] = ck[j], ck[i] -} diff --git a/vendor/upper.io/db.v3/connection_url.go b/vendor/upper.io/db.v3/connection_url.go deleted file mode 100644 index d52a6610866..00000000000 --- a/vendor/upper.io/db.v3/connection_url.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// ConnectionURL represents a connection string. -type ConnectionURL interface { - // String returns the connection string that is going to be passed to the - // adapter. - String() string -} diff --git a/vendor/upper.io/db.v3/constraint.go b/vendor/upper.io/db.v3/constraint.go deleted file mode 100644 index f45ed4f8c66..00000000000 --- a/vendor/upper.io/db.v3/constraint.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Constraint interface represents a single condition, like "a = 1". where `a` -// is the key and `1` is the value. This is an exported interface but it's -// rarely used directly, you may want to use the `db.Cond{}` map instead. -type Constraint interface { - // Key is the leftmost part of the constraint and usually contains a column - // name. - Key() interface{} - - // Value if the rightmost part of the constraint and usually contains a - // column value. - Value() interface{} -} - -// Constraints interface represents an array or constraints, like "a = 1, b = -// 2, c = 3". -type Constraints interface { - // Constraints returns an array of constraints. - Constraints() []Constraint - // Keys returns the map keys always in the same order. - Keys() []interface{} -} - -type constraint struct { - k interface{} - v interface{} -} - -func (c constraint) Key() interface{} { - return c.k -} - -func (c constraint) Value() interface{} { - return c.v -} - -// NewConstraint creates a constraint. -func NewConstraint(key interface{}, value interface{}) Constraint { - return constraint{k: key, v: value} -} - -var ( - _ = Constraints(Cond{}) - _ = Constraint(&constraint{}) -) diff --git a/vendor/upper.io/db.v3/database.go b/vendor/upper.io/db.v3/database.go deleted file mode 100644 index 4098deed895..00000000000 --- a/vendor/upper.io/db.v3/database.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Database is an interface that defines methods that must be satisfied by -// all database adapters. -type Database interface { - // Driver returns the underlying driver the wrapper uses as an interface{}. - // - // In order to actually use the driver, the `interface{}` value needs to be - // casted into the appropriate type. - // - // Example: - // internalSQLDriver := sess.Driver().(*sql.DB) - Driver() interface{} - - // Open attempts to establish a connection with a DBMS. - Open(ConnectionURL) error - - // Clone duplicates the current database session. Returns an error if the - // clone did not succeed. - // Clone() (Database, error) - - // Ping returns an error if the database manager could not be reached. - Ping() error - - // Close closes the currently active connection to the database and clears - // caches. - Close() error - - // Collection returns a collection reference given a table name. - Collection(string) Collection - - // Collections returns the names of all non-system tables on the database. - Collections() ([]string, error) - - // Name returns the name of the active database. - Name() string - - // ConnectionURL returns the data used to set up the adapter. - ConnectionURL() ConnectionURL - - // ClearCache clears all the cache mechanisms the adapter is using. - ClearCache() - - Settings -} diff --git a/vendor/upper.io/db.v3/db.go b/vendor/upper.io/db.v3/db.go deleted file mode 100644 index e26a852d443..00000000000 --- a/vendor/upper.io/db.v3/db.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package db (or upper-db) provides a common interface to work with a variety -// of data sources using adapters that wrap mature database drivers. -// -// Install upper-db: -// -// go get upper.io/db.v3 -// -// Usage -// -// package main -// -// import ( -// "log" -// -// "upper.io/db.v3/postgresql" // Imports the postgresql adapter. -// ) -// -// var settings = postgresql.ConnectionURL{ -// Database: `booktown`, -// Host: `demo.upper.io`, -// User: `demouser`, -// Password: `demop4ss`, -// } -// -// // Book represents a book. -// type Book struct { -// ID uint `db:"id"` -// Title string `db:"title"` -// AuthorID uint `db:"author_id"` -// SubjectID uint `db:"subject_id"` -// } -// -// func main() { -// sess, err := postgresql.Open(settings) -// if err != nil { -// log.Fatal(err) -// } -// defer sess.Close() -// -// var books []Book -// if err := sess.Collection("books").Find().OrderBy("title").All(&books); err != nil { -// log.Fatal(err) -// } -// -// log.Println("Books:") -// for _, book := range books { -// log.Printf("%q (ID: %d)\n", book.Title, book.ID) -// } -// } -// -// See more usage examples and documentation for users at -// https://upper.io/db.v3. -package db // import "upper.io/db.v3" diff --git a/vendor/upper.io/db.v3/env.go b/vendor/upper.io/db.v3/env.go deleted file mode 100644 index 70cbd63c07d..00000000000 --- a/vendor/upper.io/db.v3/env.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "os" -) - -func envEnabled(name string) bool { - switch os.Getenv(name) { - case "1", "true", "TRUE", "t", "T": - return true - } - return false -} diff --git a/vendor/upper.io/db.v3/errors.go b/vendor/upper.io/db.v3/errors.go deleted file mode 100644 index 0c6f1f44e51..00000000000 --- a/vendor/upper.io/db.v3/errors.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "errors" -) - -// Error messages. -var ( - ErrNoMoreRows = errors.New(`upper: no more rows in this result set`) - ErrNotConnected = errors.New(`upper: not connected to a database`) - ErrMissingDatabaseName = errors.New(`upper: missing database name`) - ErrMissingCollectionName = errors.New(`upper: missing collection name`) - ErrCollectionDoesNotExist = errors.New(`upper: collection does not exist`) - ErrSockerOrHost = errors.New(`upper: you may connect either to a unix socket or a tcp address, but not both`) - ErrQueryLimitParam = errors.New(`upper: a query can accept only one limit parameter`) - ErrQuerySortParam = errors.New(`upper: a query can accept only one order by parameter`) - ErrQueryOffsetParam = errors.New(`upper: a query can accept only one offset parameter`) - ErrMissingConditions = errors.New(`upper: missing selector conditions`) - ErrUnsupported = errors.New(`upper: this action is currently unsupported on this database`) - ErrUndefined = errors.New(`upper: this value is undefined`) - ErrQueryIsPending = errors.New(`upper: can't execute this instruction while the result set is still open`) - ErrUnsupportedDestination = errors.New(`upper: unsupported destination type`) - ErrUnsupportedType = errors.New(`upper: this type does not support marshaling`) - ErrUnsupportedValue = errors.New(`upper: this value does not support unmarshaling`) - ErrUnknownConditionType = errors.New(`upper: arguments of type %T can't be used as constraints`) - ErrTooManyClients = errors.New(`upper: can't connect to database server: too many clients`) - ErrGivingUpTryingToConnect = errors.New(`upper: giving up trying to connect: too many clients`) - ErrMissingConnURL = errors.New(`upper: missing DSN`) - ErrNotImplemented = errors.New(`upper: call not implemented`) - ErrAlreadyWithinTransaction = errors.New(`upper: already within a transaction`) -) diff --git a/vendor/upper.io/db.v3/function.go b/vendor/upper.io/db.v3/function.go deleted file mode 100644 index 9fef4a313e5..00000000000 --- a/vendor/upper.io/db.v3/function.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Function interface defines methods for representing database functions. -// This is an exported interface but it's rarely used directly, you may want to -// use the `db.Func()` function instead. -type Function interface { - // Name returns the function name. - Name() string - - // Argument returns the function arguments. - Arguments() []interface{} -} - -// Func represents a database function and satisfies the db.Function interface. -// -// Examples: -// -// // MOD(29, 9) -// db.Func("MOD", 29, 9) -// -// // CONCAT("foo", "bar") -// db.Func("CONCAT", "foo", "bar") -// -// // NOW() -// db.Func("NOW") -// -// // RTRIM("Hello ") -// db.Func("RTRIM", "Hello ") -func Func(name string, args ...interface{}) Function { - return &dbFunc{name: name, args: args} -} - -type dbFunc struct { - name string - args []interface{} -} - -func (f *dbFunc) Arguments() []interface{} { - return f.args -} - -func (f *dbFunc) Name() string { - return f.name -} - -var _ = Function(&dbFunc{}) diff --git a/vendor/upper.io/db.v3/internal/cache/cache.go b/vendor/upper.io/db.v3/internal/cache/cache.go deleted file mode 100644 index 36c2bd93c1c..00000000000 --- a/vendor/upper.io/db.v3/internal/cache/cache.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (c) 2014-present José Carlos Nieto, https://menteslibres.net/xiam -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package cache - -import ( - "container/list" - "errors" - "fmt" - "strconv" - "sync" - - "upper.io/db.v3/internal/cache/hashstructure" -) - -const defaultCapacity = 128 - -// Cache holds a map of volatile key -> values. -type Cache struct { - cache map[string]*list.Element - li *list.List - capacity int - mu sync.RWMutex -} - -type item struct { - key string - value interface{} -} - -// NewCacheWithCapacity initializes a new caching space with the given -// capacity. -func NewCacheWithCapacity(capacity int) (*Cache, error) { - if capacity < 1 { - return nil, errors.New("Capacity must be greater than zero.") - } - return &Cache{ - cache: make(map[string]*list.Element), - li: list.New(), - capacity: capacity, - }, nil -} - -// NewCache initializes a new caching space with default settings. -func NewCache() *Cache { - c, err := NewCacheWithCapacity(defaultCapacity) - if err != nil { - panic(err.Error()) // Should never happen as we're not providing a negative defaultCapacity. - } - return c -} - -// Read attempts to retrieve a cached value as a string, if the value does not -// exists returns an empty string and false. -func (c *Cache) Read(h Hashable) (string, bool) { - if v, ok := c.ReadRaw(h); ok { - if s, ok := v.(string); ok { - return s, true - } - } - return "", false -} - -// ReadRaw attempts to retrieve a cached value as an interface{}, if the value -// does not exists returns nil and false. -func (c *Cache) ReadRaw(h Hashable) (interface{}, bool) { - c.mu.RLock() - defer c.mu.RUnlock() - data, ok := c.cache[h.Hash()] - if ok { - return data.Value.(*item).value, true - } - return nil, false -} - -// Write stores a value in memory. If the value already exists its overwritten. -func (c *Cache) Write(h Hashable, value interface{}) { - key := h.Hash() - - c.mu.Lock() - defer c.mu.Unlock() - - if el, ok := c.cache[key]; ok { - el.Value.(*item).value = value - c.li.MoveToFront(el) - return - } - - c.cache[key] = c.li.PushFront(&item{key, value}) - - for c.li.Len() > c.capacity { - el := c.li.Remove(c.li.Back()) - delete(c.cache, el.(*item).key) - if p, ok := el.(*item).value.(HasOnPurge); ok { - p.OnPurge() - } - } -} - -// Clear generates a new memory space, leaving the old memory unreferenced, so -// it can be claimed by the garbage collector. -func (c *Cache) Clear() { - c.mu.Lock() - defer c.mu.Unlock() - for _, el := range c.cache { - if p, ok := el.Value.(*item).value.(HasOnPurge); ok { - p.OnPurge() - } - } - c.cache = make(map[string]*list.Element) - c.li.Init() -} - -// Hash returns a hash of the given struct. -func Hash(v interface{}) string { - q, err := hashstructure.Hash(v, nil) - if err != nil { - panic(fmt.Sprintf("Could not hash struct: %v", err.Error())) - } - return strconv.FormatUint(q, 10) -} - -type hash struct { - name string -} - -func (h *hash) Hash() string { - return h.name -} - -// String returns a Hashable that produces a hash equal to the given string. -func String(s string) Hashable { - return &hash{s} -} diff --git a/vendor/upper.io/db.v3/internal/cache/hashstructure/LICENSE b/vendor/upper.io/db.v3/internal/cache/hashstructure/LICENSE deleted file mode 100644 index a3866a291fd..00000000000 --- a/vendor/upper.io/db.v3/internal/cache/hashstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/upper.io/db.v3/internal/cache/hashstructure/README.md b/vendor/upper.io/db.v3/internal/cache/hashstructure/README.md deleted file mode 100644 index 7d0de5bf5a6..00000000000 --- a/vendor/upper.io/db.v3/internal/cache/hashstructure/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# hashstructure - -hashstructure is a Go library for creating a unique hash value -for arbitrary values in Go. - -This can be used to key values in a hash (for use in a map, set, etc.) -that are complex. The most common use case is comparing two values without -sending data across the network, caching values locally (de-dup), and so on. - -## Features - - * Hash any arbitrary Go value, including complex types. - - * Tag a struct field to ignore it and not affect the hash value. - - * Tag a slice type struct field to treat it as a set where ordering - doesn't affect the hash code but the field itself is still taken into - account to create the hash value. - - * Optionally specify a custom hash function to optimize for speed, collision - avoidance for your data set, etc. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/hashstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). - -A quick code example is shown below: - - - type ComplexStruct struct { - Name string - Age uint - Metadata map[string]interface{} - } - - v := ComplexStruct{ - Name: "mitchellh", - Age: 64, - Metadata: map[string]interface{}{ - "car": true, - "location": "California", - "siblings": []string{"Bob", "John"}, - }, - } - - hash, err := hashstructure.Hash(v, nil) - if err != nil { - panic(err) - } - - fmt.Printf("%d", hash) - // Output: - // 2307517237273902113 diff --git a/vendor/upper.io/db.v3/internal/cache/hashstructure/hashstructure.go b/vendor/upper.io/db.v3/internal/cache/hashstructure/hashstructure.go deleted file mode 100644 index 9a2e9535d2f..00000000000 --- a/vendor/upper.io/db.v3/internal/cache/hashstructure/hashstructure.go +++ /dev/null @@ -1,325 +0,0 @@ -package hashstructure - -import ( - "encoding/binary" - "fmt" - "hash" - "hash/fnv" - "reflect" -) - -// HashOptions are options that are available for hashing. -type HashOptions struct { - // Hasher is the hash function to use. If this isn't set, it will - // default to FNV. - Hasher hash.Hash64 - - // TagName is the struct tag to look at when hashing the structure. - // By default this is "hash". - TagName string -} - -// Hash returns the hash value of an arbitrary value. -// -// If opts is nil, then default options will be used. See HashOptions -// for the default values. -// -// Notes on the value: -// -// * Unexported fields on structs are ignored and do not affect the -// hash value. -// -// * Adding an exported field to a struct with the zero value will change -// the hash value. -// -// For structs, the hashing can be controlled using tags. For example: -// -// struct { -// Name string -// UUID string `hash:"ignore"` -// } -// -// The available tag values are: -// -// * "ignore" - The field will be ignored and not affect the hash code. -// -// * "set" - The field will be treated as a set, where ordering doesn't -// affect the hash code. This only works for slices. -// -func Hash(v interface{}, opts *HashOptions) (uint64, error) { - // Create default options - if opts == nil { - opts = &HashOptions{} - } - if opts.Hasher == nil { - opts.Hasher = fnv.New64() - } - if opts.TagName == "" { - opts.TagName = "hash" - } - - // Reset the hash - opts.Hasher.Reset() - - // Create our walker and walk the structure - w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - } - return w.visit(reflect.ValueOf(v), nil) -} - -type walker struct { - h hash.Hash64 - tag string -} - -type visitOpts struct { - // Flags are a bitmask of flags to affect behavior of this visit - Flags visitFlag - - // Information about the struct containing this field - Struct interface{} - StructField string -} - -func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { - // Loop since these can be wrapped in multiple layers of pointers - // and interfaces. - for { - // If we have an interface, dereference it. We have to do this up - // here because it might be a nil in there and the check below must - // catch that. - if v.Kind() == reflect.Interface { - v = v.Elem() - continue - } - - if v.Kind() == reflect.Ptr { - v = reflect.Indirect(v) - continue - } - - break - } - - // If it is nil, treat it like a zero. - if !v.IsValid() { - var tmp int8 - v = reflect.ValueOf(tmp) - } - - // Binary writing can use raw ints, we have to convert to - // a sized-int, we'll choose the largest... - switch v.Kind() { - case reflect.Int: - v = reflect.ValueOf(int64(v.Int())) - case reflect.Uint: - v = reflect.ValueOf(uint64(v.Uint())) - case reflect.Bool: - var tmp int8 - if v.Bool() { - tmp = 1 - } - v = reflect.ValueOf(tmp) - } - - k := v.Kind() - - // We can shortcut numeric values by directly binary writing them - if k >= reflect.Int && k <= reflect.Complex64 { - // A direct hash calculation - w.h.Reset() - err := binary.Write(w.h, binary.LittleEndian, v.Interface()) - return w.h.Sum64(), err - } - - switch k { - case reflect.Array: - var h uint64 - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - h = hashUpdateOrdered(w.h, h, current) - } - - return h, nil - - case reflect.Map: - var includeMap IncludableMap - if opts != nil && opts.Struct != nil { - if v, ok := opts.Struct.(IncludableMap); ok { - includeMap = v - } - } - - // Build the hash for the map. We do this by XOR-ing all the key - // and value hashes. This makes it deterministic despite ordering. - var h uint64 - for _, k := range v.MapKeys() { - v := v.MapIndex(k) - if includeMap != nil { - incl, err := includeMap.HashIncludeMap( - opts.StructField, k.Interface(), v.Interface()) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - kh, err := w.visit(k, nil) - if err != nil { - return 0, err - } - vh, err := w.visit(v, nil) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - - return h, nil - - case reflect.Struct: - var include Includable - parent := v.Interface() - if impl, ok := parent.(Includable); ok { - include = impl - } - - t := v.Type() - h, err := w.visit(reflect.ValueOf(t.Name()), nil) - if err != nil { - return 0, err - } - - l := v.NumField() - for i := 0; i < l; i++ { - if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { - var f visitFlag - fieldType := t.Field(i) - if fieldType.PkgPath != "" { - // Unexported - continue - } - - tag := fieldType.Tag.Get(w.tag) - if tag == "ignore" { - // Ignore this field - continue - } - - // Check if we implement includable and check it - if include != nil { - incl, err := include.HashInclude(fieldType.Name, v) - if err != nil { - return 0, err - } - if !incl { - continue - } - } - - switch tag { - case "set": - f |= visitFlagSet - } - - kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) - if err != nil { - return 0, err - } - - vh, err := w.visit(v, &visitOpts{ - Flags: f, - Struct: parent, - StructField: fieldType.Name, - }) - if err != nil { - return 0, err - } - - fieldHash := hashUpdateOrdered(w.h, kh, vh) - h = hashUpdateUnordered(h, fieldHash) - } - } - - return h, nil - - case reflect.Slice: - // We have two behaviors here. If it isn't a set, then we just - // visit all the elements. If it is a set, then we do a deterministic - // hash code. - var h uint64 - var set bool - if opts != nil { - set = (opts.Flags & visitFlagSet) != 0 - } - l := v.Len() - for i := 0; i < l; i++ { - current, err := w.visit(v.Index(i), nil) - if err != nil { - return 0, err - } - - if set { - h = hashUpdateUnordered(h, current) - } else { - h = hashUpdateOrdered(w.h, h, current) - } - } - - return h, nil - - case reflect.String: - // Directly hash - w.h.Reset() - _, err := w.h.Write([]byte(v.String())) - return w.h.Sum64(), err - - default: - return 0, fmt.Errorf("unknown kind to hash: %s", k) - } -} - -func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { - // For ordered updates, use a real hash function - h.Reset() - - // We just panic if the binary writes fail because we are writing - // an int64 which should never be fail-able. - e1 := binary.Write(h, binary.LittleEndian, a) - e2 := binary.Write(h, binary.LittleEndian, b) - if e1 != nil { - panic(e1) - } - if e2 != nil { - panic(e2) - } - - return h.Sum64() -} - -func hashUpdateUnordered(a, b uint64) uint64 { - return a ^ b -} - -// visitFlag is used as a bitmask for affecting visit behavior -type visitFlag uint - -const ( - visitFlagInvalid visitFlag = iota - visitFlagSet = iota << 1 -) - -var ( - _ = visitFlagInvalid -) diff --git a/vendor/upper.io/db.v3/internal/cache/hashstructure/include.go b/vendor/upper.io/db.v3/internal/cache/hashstructure/include.go deleted file mode 100644 index b6289c0bee7..00000000000 --- a/vendor/upper.io/db.v3/internal/cache/hashstructure/include.go +++ /dev/null @@ -1,15 +0,0 @@ -package hashstructure - -// Includable is an interface that can optionally be implemented by -// a struct. It will be called for each field in the struct to check whether -// it should be included in the hash. -type Includable interface { - HashInclude(field string, v interface{}) (bool, error) -} - -// IncludableMap is an interface that can optionally be implemented by -// a struct. It will be called when a map-type field is found to ask the -// struct if the map item should be included in the hash. -type IncludableMap interface { - HashIncludeMap(field string, k, v interface{}) (bool, error) -} diff --git a/vendor/upper.io/db.v3/internal/cache/interface.go b/vendor/upper.io/db.v3/internal/cache/interface.go deleted file mode 100644 index 489d64520fa..00000000000 --- a/vendor/upper.io/db.v3/internal/cache/interface.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2014-present José Carlos Nieto, https://menteslibres.net/xiam -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package cache - -// Hashable types must implement a method that returns a key. This key will be -// associated with a cached value. -type Hashable interface { - Hash() string -} - -// HasOnPurge type is (optionally) implemented by cache objects to clean after -// themselves. -type HasOnPurge interface { - OnPurge() -} diff --git a/vendor/upper.io/db.v3/internal/immutable/immutable.go b/vendor/upper.io/db.v3/internal/immutable/immutable.go deleted file mode 100644 index 57d29ce2708..00000000000 --- a/vendor/upper.io/db.v3/internal/immutable/immutable.go +++ /dev/null @@ -1,28 +0,0 @@ -package immutable - -// Immutable represents an immutable chain that, if passed to FastForward, -// applies Fn() to every element of a chain, the first element of this chain is -// represented by Base(). -type Immutable interface { - // Prev is the previous element on a chain. - Prev() Immutable - // Fn a function that is able to modify the passed element. - Fn(interface{}) error - // Base is the first element on a chain, there's no previous element before - // the Base element. - Base() interface{} -} - -// FastForward applies all Fn methods in order on the given new Base. -func FastForward(curr Immutable) (interface{}, error) { - prev := curr.Prev() - if prev == nil { - return curr.Base(), nil - } - in, err := FastForward(prev) - if err != nil { - return nil, err - } - err = curr.Fn(in) - return in, err -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/collection.go b/vendor/upper.io/db.v3/internal/sqladapter/collection.go deleted file mode 100644 index 444e440b23c..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/collection.go +++ /dev/null @@ -1,328 +0,0 @@ -package sqladapter - -import ( - "errors" - "fmt" - "reflect" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter/exql" - "upper.io/db.v3/lib/reflectx" -) - -var mapper = reflectx.NewMapper("db") - -var errMissingPrimaryKeys = errors.New("Table %q has no primary keys") - -// Collection represents a SQL table. -type Collection interface { - PartialCollection - BaseCollection -} - -// PartialCollection defines methods that must be implemented by the adapter. -type PartialCollection interface { - // Database returns the parent database. - Database() Database - - // Name returns the name of the table. - Name() string - - // Insert inserts a new item into the collection. - Insert(interface{}) (interface{}, error) -} - -// BaseCollection provides logic for methods that can be shared across all SQL -// adapters. -type BaseCollection interface { - // Exists returns true if the collection exists. - Exists() bool - - // Find creates and returns a new result set. - Find(conds ...interface{}) db.Result - - // Truncate removes all items on the collection. - Truncate() error - - // InsertReturning inserts a new item and updates it with the - // actual values from the database. - InsertReturning(interface{}) error - - // UpdateReturning updates an item and returns the actual values from the - // database. - UpdateReturning(interface{}) error - - // PrimaryKeys returns the table's primary keys. - PrimaryKeys() []string -} - -type condsFilter interface { - FilterConds(...interface{}) []interface{} -} - -// collection is the implementation of Collection. -type collection struct { - BaseCollection - PartialCollection - - pk []string - err error -} - -var ( - _ = Collection(&collection{}) -) - -// NewBaseCollection returns a collection with basic methods. -func NewBaseCollection(p PartialCollection) BaseCollection { - c := &collection{PartialCollection: p} - c.pk, c.err = c.Database().PrimaryKeys(c.Name()) - return c -} - -// PrimaryKeys returns the collection's primary keys, if any. -func (c *collection) PrimaryKeys() []string { - return c.pk -} - -func (c *collection) filterConds(conds ...interface{}) []interface{} { - if tr, ok := c.PartialCollection.(condsFilter); ok { - return tr.FilterConds(conds...) - } - if len(conds) == 1 && len(c.pk) == 1 { - if id := conds[0]; IsKeyValue(id) { - conds[0] = db.Cond{c.pk[0]: db.Eq(id)} - } - } - return conds -} - -// Find creates a result set with the given conditions. -func (c *collection) Find(conds ...interface{}) db.Result { - if c.err != nil { - res := &Result{} - res.setErr(c.err) - return res - } - return NewResult( - c.Database(), - c.Name(), - c.filterConds(conds...), - ) -} - -// Exists returns true if the collection exists. -func (c *collection) Exists() bool { - if err := c.Database().TableExists(c.Name()); err != nil { - return false - } - return true -} - -// InsertReturning inserts an item and updates the given variable reference. -func (c *collection) InsertReturning(item interface{}) error { - if item == nil || reflect.TypeOf(item).Kind() != reflect.Ptr { - return fmt.Errorf("Expecting a pointer but got %T", item) - } - - // Grab primary keys - pks := c.PrimaryKeys() - if len(pks) == 0 { - if !c.Exists() { - return db.ErrCollectionDoesNotExist - } - return fmt.Errorf(errMissingPrimaryKeys.Error(), c.Name()) - } - - var tx DatabaseTx - inTx := false - - if currTx := c.Database().Transaction(); currTx != nil { - tx = NewDatabaseTx(c.Database()) - inTx = true - } else { - // Not within a transaction, let's create one. - var err error - tx, err = c.Database().NewDatabaseTx(c.Database().Context()) - if err != nil { - return err - } - defer tx.(Database).Close() - } - - // Allocate a clone of item. - newItem := reflect.New(reflect.ValueOf(item).Elem().Type()).Interface() - var newItemFieldMap map[string]reflect.Value - - itemValue := reflect.ValueOf(item) - - col := tx.(Database).Collection(c.Name()) - - // Insert item as is and grab the returning ID. - var newItemRes db.Result - id, err := col.Insert(item) - if err != nil { - goto cancel - } - if id == nil { - err = fmt.Errorf("InsertReturning: Could not get a valid ID after inserting. Does the %q table have a primary key?", c.Name()) - goto cancel - } - - if len(pks) > 1 { - newItemRes = col.Find(id) - } else { - // We have one primary key, build a explicit db.Cond with it to prevent - // string keys to be considered as raw conditions. - newItemRes = col.Find(db.Cond{pks[0]: id}) // We already checked that pks is not empty, so pks[0] is defined. - } - - // Fetch the row that was just interted into newItem - err = newItemRes.One(newItem) - if err != nil { - goto cancel - } - - switch reflect.ValueOf(newItem).Elem().Kind() { - case reflect.Struct: - // Get valid fields from newItem to overwrite those that are on item. - newItemFieldMap = mapper.ValidFieldMap(reflect.ValueOf(newItem)) - for fieldName := range newItemFieldMap { - mapper.FieldByName(itemValue, fieldName).Set(newItemFieldMap[fieldName]) - } - case reflect.Map: - newItemV := reflect.ValueOf(newItem).Elem() - itemV := reflect.ValueOf(item) - if itemV.Kind() == reflect.Ptr { - itemV = itemV.Elem() - } - for _, keyV := range newItemV.MapKeys() { - itemV.SetMapIndex(keyV, newItemV.MapIndex(keyV)) - } - default: - err = fmt.Errorf("InsertReturning: expecting a pointer to map or struct, got %T", newItem) - goto cancel - } - - if !inTx { - // This is only executed if t.Database() was **not** a transaction and if - // sess was created with sess.NewTransaction(). - return tx.Commit() - } - - return err - -cancel: - // This goto label should only be used when we got an error within a - // transaction and we don't want to continue. - - if !inTx { - // This is only executed if t.Database() was **not** a transaction and if - // sess was created with sess.NewTransaction(). - _ = tx.Rollback() - } - return err -} - -func (c *collection) UpdateReturning(item interface{}) error { - if item == nil || reflect.TypeOf(item).Kind() != reflect.Ptr { - return fmt.Errorf("Expecting a pointer but got %T", item) - } - - // Grab primary keys - pks := c.PrimaryKeys() - if len(pks) == 0 { - if !c.Exists() { - return db.ErrCollectionDoesNotExist - } - return fmt.Errorf(errMissingPrimaryKeys.Error(), c.Name()) - } - - var tx DatabaseTx - inTx := false - - if currTx := c.Database().Transaction(); currTx != nil { - tx = NewDatabaseTx(c.Database()) - inTx = true - } else { - // Not within a transaction, let's create one. - var err error - tx, err = c.Database().NewDatabaseTx(c.Database().Context()) - if err != nil { - return err - } - defer tx.(Database).Close() - } - - // Allocate a clone of item. - defaultItem := reflect.New(reflect.ValueOf(item).Elem().Type()).Interface() - var defaultItemFieldMap map[string]reflect.Value - - itemValue := reflect.ValueOf(item) - - conds := db.Cond{} - for _, pk := range pks { - conds[pk] = db.Eq(mapper.FieldByName(itemValue, pk).Interface()) - } - - col := tx.(Database).Collection(c.Name()) - - err := col.Find(conds).Update(item) - if err != nil { - goto cancel - } - - if err = col.Find(conds).One(defaultItem); err != nil { - goto cancel - } - - switch reflect.ValueOf(defaultItem).Elem().Kind() { - case reflect.Struct: - // Get valid fields from defaultItem to overwrite those that are on item. - defaultItemFieldMap = mapper.ValidFieldMap(reflect.ValueOf(defaultItem)) - for fieldName := range defaultItemFieldMap { - mapper.FieldByName(itemValue, fieldName).Set(defaultItemFieldMap[fieldName]) - } - case reflect.Map: - defaultItemV := reflect.ValueOf(defaultItem).Elem() - itemV := reflect.ValueOf(item) - if itemV.Kind() == reflect.Ptr { - itemV = itemV.Elem() - } - for _, keyV := range defaultItemV.MapKeys() { - itemV.SetMapIndex(keyV, defaultItemV.MapIndex(keyV)) - } - default: - panic("default") - } - - if !inTx { - // This is only executed if t.Database() was **not** a transaction and if - // sess was created with sess.NewTransaction(). - return tx.Commit() - } - return err - -cancel: - // This goto label should only be used when we got an error within a - // transaction and we don't want to continue. - - if !inTx { - // This is only executed if t.Database() was **not** a transaction and if - // sess was created with sess.NewTransaction(). - _ = tx.Rollback() - } - return err -} - -// Truncate deletes all rows from the table. -func (c *collection) Truncate() error { - stmt := exql.Statement{ - Type: exql.Truncate, - Table: exql.TableWithName(c.Name()), - } - if _, err := c.Database().Exec(&stmt); err != nil { - return err - } - return nil -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/compat/query.go b/vendor/upper.io/db.v3/internal/sqladapter/compat/query.go deleted file mode 100644 index 93cb8fcf064..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/compat/query.go +++ /dev/null @@ -1,72 +0,0 @@ -// +build !go1.8 - -package compat - -import ( - "context" - "database/sql" -) - -type PreparedExecer interface { - Exec(...interface{}) (sql.Result, error) -} - -func PreparedExecContext(p PreparedExecer, ctx context.Context, args []interface{}) (sql.Result, error) { - return p.Exec(args...) -} - -type Execer interface { - Exec(string, ...interface{}) (sql.Result, error) -} - -func ExecContext(p Execer, ctx context.Context, query string, args []interface{}) (sql.Result, error) { - return p.Exec(query, args...) -} - -type PreparedQueryer interface { - Query(...interface{}) (*sql.Rows, error) -} - -func PreparedQueryContext(p PreparedQueryer, ctx context.Context, args []interface{}) (*sql.Rows, error) { - return p.Query(args...) -} - -type Queryer interface { - Query(string, ...interface{}) (*sql.Rows, error) -} - -func QueryContext(p Queryer, ctx context.Context, query string, args []interface{}) (*sql.Rows, error) { - return p.Query(query, args...) -} - -type PreparedRowQueryer interface { - QueryRow(...interface{}) *sql.Row -} - -func PreparedQueryRowContext(p PreparedRowQueryer, ctx context.Context, args []interface{}) *sql.Row { - return p.QueryRow(args...) -} - -type RowQueryer interface { - QueryRow(string, ...interface{}) *sql.Row -} - -func QueryRowContext(p RowQueryer, ctx context.Context, query string, args []interface{}) *sql.Row { - return p.QueryRow(query, args...) -} - -type Preparer interface { - Prepare(string) (*sql.Stmt, error) -} - -func PrepareContext(p Preparer, ctx context.Context, query string) (*sql.Stmt, error) { - return p.Prepare(query) -} - -type TxStarter interface { - Begin() (*sql.Tx, error) -} - -func BeginTx(p TxStarter, ctx context.Context, opts interface{}) (*sql.Tx, error) { - return p.Begin() -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/compat/query_go18.go b/vendor/upper.io/db.v3/internal/sqladapter/compat/query_go18.go deleted file mode 100644 index a3abbaf86b5..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/compat/query_go18.go +++ /dev/null @@ -1,72 +0,0 @@ -// +build go1.8 - -package compat - -import ( - "context" - "database/sql" -) - -type PreparedExecer interface { - ExecContext(context.Context, ...interface{}) (sql.Result, error) -} - -func PreparedExecContext(p PreparedExecer, ctx context.Context, args []interface{}) (sql.Result, error) { - return p.ExecContext(ctx, args...) -} - -type Execer interface { - ExecContext(context.Context, string, ...interface{}) (sql.Result, error) -} - -func ExecContext(p Execer, ctx context.Context, query string, args []interface{}) (sql.Result, error) { - return p.ExecContext(ctx, query, args...) -} - -type PreparedQueryer interface { - QueryContext(context.Context, ...interface{}) (*sql.Rows, error) -} - -func PreparedQueryContext(p PreparedQueryer, ctx context.Context, args []interface{}) (*sql.Rows, error) { - return p.QueryContext(ctx, args...) -} - -type Queryer interface { - QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) -} - -func QueryContext(p Queryer, ctx context.Context, query string, args []interface{}) (*sql.Rows, error) { - return p.QueryContext(ctx, query, args...) -} - -type PreparedRowQueryer interface { - QueryRowContext(context.Context, ...interface{}) *sql.Row -} - -func PreparedQueryRowContext(p PreparedRowQueryer, ctx context.Context, args []interface{}) *sql.Row { - return p.QueryRowContext(ctx, args...) -} - -type RowQueryer interface { - QueryRowContext(context.Context, string, ...interface{}) *sql.Row -} - -func QueryRowContext(p RowQueryer, ctx context.Context, query string, args []interface{}) *sql.Row { - return p.QueryRowContext(ctx, query, args...) -} - -type Preparer interface { - PrepareContext(context.Context, string) (*sql.Stmt, error) -} - -func PrepareContext(p Preparer, ctx context.Context, query string) (*sql.Stmt, error) { - return p.PrepareContext(ctx, query) -} - -type TxStarter interface { - BeginTx(context.Context, *sql.TxOptions) (*sql.Tx, error) -} - -func BeginTx(p TxStarter, ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) { - return p.BeginTx(ctx, opts) -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/database.go b/vendor/upper.io/db.v3/internal/sqladapter/database.go deleted file mode 100644 index 879c9abb99c..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/database.go +++ /dev/null @@ -1,734 +0,0 @@ -package sqladapter - -import ( - "context" - "database/sql" - "math" - "strconv" - "sync" - "sync/atomic" - "time" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/cache" - "upper.io/db.v3/internal/sqladapter/compat" - "upper.io/db.v3/internal/sqladapter/exql" - "upper.io/db.v3/lib/sqlbuilder" -) - -var ( - lastSessID uint64 - lastTxID uint64 -) - -// hasCleanUp is implemented by structs that have a clean up routine that needs -// to be called before Close(). -type hasCleanUp interface { - CleanUp() error -} - -// hasStatementExec allows the adapter to have its own exec statement. -type hasStatementExec interface { - StatementExec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) -} - -type hasConvertValues interface { - ConvertValues(values []interface{}) []interface{} -} - -// Database represents a SQL database. -type Database interface { - PartialDatabase - BaseDatabase -} - -// PartialDatabase defines methods to be implemented by SQL database adapters. -type PartialDatabase interface { - sqlbuilder.SQLBuilder - - // Collections returns a list of non-system tables from the database. - Collections() ([]string, error) - - // Open opens a new connection - Open(db.ConnectionURL) error - - // TableExists returns an error if the given table does not exist. - TableExists(name string) error - - // LookupName returns the name of the database. - LookupName() (string, error) - - // PrimaryKeys returns all primary keys on the table. - PrimaryKeys(name string) ([]string, error) - - // NewCollection allocates a new collection by name. - NewCollection(name string) db.Collection - - // CompileStatement transforms an internal statement into a format - // database/sql can understand. - CompileStatement(stmt *exql.Statement, args []interface{}) (string, []interface{}) - - // ConnectionURL returns the database's connection URL, if any. - ConnectionURL() db.ConnectionURL - - // Err wraps specific database errors (given in string form) and transforms them - // into error values. - Err(in error) (out error) - - // NewDatabaseTx begins a transaction block and returns a new - // session backed by it. - NewDatabaseTx(ctx context.Context) (DatabaseTx, error) -} - -// BaseDatabase provides logic for methods that can be shared across all SQL -// adapters. -type BaseDatabase interface { - db.Settings - - // Name returns the name of the database. - Name() string - - // Close closes the database session - Close() error - - // Ping checks if the database server is reachable. - Ping() error - - // ClearCache clears all caches the session is using - ClearCache() - - // Collection returns a new collection. - Collection(string) db.Collection - - // Driver returns the underlying driver the session is using - Driver() interface{} - - // WaitForConnection attempts to run the given connection function a fixed - // number of times before failing. - WaitForConnection(func() error) error - - // BindSession sets the *sql.DB the session will use. - BindSession(*sql.DB) error - - // Session returns the *sql.DB the session is using. - Session() *sql.DB - - // BindTx binds a transaction to the current session. - BindTx(context.Context, *sql.Tx) error - - // Returns the current transaction the session is using. - Transaction() BaseTx - - // NewClone clones the database using the given PartialDatabase as base. - NewClone(PartialDatabase, bool) (BaseDatabase, error) - - // Context returns the default context the session is using. - Context() context.Context - - // SetContext sets a default context for the session. - SetContext(context.Context) - - // TxOptions returns the default TxOptions for new transactions in the - // session. - TxOptions() *sql.TxOptions - - // SetTxOptions sets default TxOptions for the session. - SetTxOptions(txOptions sql.TxOptions) -} - -// NewBaseDatabase provides a BaseDatabase given a PartialDatabase -func NewBaseDatabase(p PartialDatabase) BaseDatabase { - d := &database{ - Settings: db.NewSettings(), - PartialDatabase: p, - cachedCollections: cache.NewCache(), - cachedStatements: cache.NewCache(), - } - return d -} - -// database is the actual implementation of Database and joins methods from -// BaseDatabase and PartialDatabase -type database struct { - PartialDatabase - db.Settings - - lookupNameOnce sync.Once - name string - - mu sync.Mutex // guards ctx, txOptions - ctx context.Context - txOptions *sql.TxOptions - - sessMu sync.Mutex // guards sess, baseTx - sess *sql.DB - baseTx BaseTx - - sessID uint64 - txID uint64 - - cacheMu sync.Mutex // guards cachedStatements and cachedCollections - cachedStatements *cache.Cache - cachedCollections *cache.Cache - - template *exql.Template -} - -var ( - _ = db.Database(&database{}) -) - -// Session returns the underlying *sql.DB -func (d *database) Session() *sql.DB { - return d.sess -} - -// SetContext sets the session's default context. -func (d *database) SetContext(ctx context.Context) { - d.mu.Lock() - d.ctx = ctx - d.mu.Unlock() -} - -// Context returns the session's default context. -func (d *database) Context() context.Context { - d.mu.Lock() - defer d.mu.Unlock() - if d.ctx == nil { - return context.Background() - } - return d.ctx -} - -// SetTxOptions sets the session's default TxOptions. -func (d *database) SetTxOptions(txOptions sql.TxOptions) { - d.mu.Lock() - d.txOptions = &txOptions - d.mu.Unlock() -} - -// TxOptions returns the session's default TxOptions. -func (d *database) TxOptions() *sql.TxOptions { - d.mu.Lock() - defer d.mu.Unlock() - - if d.txOptions == nil { - return nil - } - return d.txOptions -} - -// BindTx binds a *sql.Tx into *database -func (d *database) BindTx(ctx context.Context, t *sql.Tx) error { - d.sessMu.Lock() - defer d.sessMu.Unlock() - - d.baseTx = newBaseTx(t) - if err := d.Ping(); err != nil { - return err - } - - d.SetContext(ctx) - d.txID = newBaseTxID() - return nil -} - -// Tx returns a BaseTx, which, if not nil, means that this session is within a -// transaction -func (d *database) Transaction() BaseTx { - return d.baseTx -} - -// Name returns the database named -func (d *database) Name() string { - d.lookupNameOnce.Do(func() { - if d.name == "" { - d.name, _ = d.PartialDatabase.LookupName() - } - }) - - return d.name -} - -// BindSession binds a *sql.DB into *database -func (d *database) BindSession(sess *sql.DB) error { - d.sessMu.Lock() - d.sess = sess - d.sessMu.Unlock() - - if err := d.Ping(); err != nil { - return err - } - - d.sessID = newSessionID() - name, err := d.PartialDatabase.LookupName() - if err != nil { - return err - } - - d.name = name - - return nil -} - -// Ping checks whether a connection to the database is still alive by pinging -// it -func (d *database) Ping() error { - if d.sess != nil { - return d.sess.Ping() - } - return nil -} - -// SetConnMaxLifetime sets the maximum amount of time a connection may be -// reused. -func (d *database) SetConnMaxLifetime(t time.Duration) { - d.Settings.SetConnMaxLifetime(t) - if sess := d.Session(); sess != nil { - sess.SetConnMaxLifetime(d.Settings.ConnMaxLifetime()) - } -} - -// SetMaxIdleConns sets the maximum number of connections in the idle -// connection pool. -func (d *database) SetMaxIdleConns(n int) { - d.Settings.SetMaxIdleConns(n) - if sess := d.Session(); sess != nil { - sess.SetMaxIdleConns(d.MaxIdleConns()) - } -} - -// SetMaxOpenConns sets the maximum number of open connections to the -// database. -func (d *database) SetMaxOpenConns(n int) { - d.Settings.SetMaxOpenConns(n) - if sess := d.Session(); sess != nil { - sess.SetMaxOpenConns(d.MaxOpenConns()) - } -} - -// ClearCache removes all caches. -func (d *database) ClearCache() { - d.cacheMu.Lock() - defer d.cacheMu.Unlock() - d.cachedCollections.Clear() - d.cachedStatements.Clear() - if d.template != nil { - d.template.Cache.Clear() - } -} - -// NewClone binds a clone that is linked to the current -// session. This is commonly done before creating a transaction -// session. -func (d *database) NewClone(p PartialDatabase, checkConn bool) (BaseDatabase, error) { - nd := NewBaseDatabase(p).(*database) - - nd.name = d.name - nd.sess = d.sess - - if checkConn { - if err := nd.Ping(); err != nil { - // Retry once if ping fails. - return d.NewClone(p, false) - } - } - - nd.sessID = newSessionID() - - // New transaction should inherit parent settings - copySettings(d, nd) - - return nd, nil -} - -// Close terminates the current database session -func (d *database) Close() error { - defer func() { - d.sessMu.Lock() - d.sess = nil - d.baseTx = nil - d.sessMu.Unlock() - }() - if d.sess == nil { - return nil - } - - d.cachedCollections.Clear() - d.cachedStatements.Clear() // Closes prepared statements as well. - - tx := d.Transaction() - if tx == nil { - if cleaner, ok := d.PartialDatabase.(hasCleanUp); ok { - if err := cleaner.CleanUp(); err != nil { - return err - } - } - // Not within a transaction. - return d.sess.Close() - } - - if !tx.Committed() { - _ = tx.Rollback() - } - return nil -} - -// Collection returns a db.Collection given a name. Results are cached. -func (d *database) Collection(name string) db.Collection { - d.cacheMu.Lock() - defer d.cacheMu.Unlock() - - h := cache.String(name) - - ccol, ok := d.cachedCollections.ReadRaw(h) - if ok { - return ccol.(db.Collection) - } - - col := d.PartialDatabase.NewCollection(name) - d.cachedCollections.Write(h, col) - - return col -} - -// StatementPrepare creates a prepared statement. -func (d *database) StatementPrepare(ctx context.Context, stmt *exql.Statement) (sqlStmt *sql.Stmt, err error) { - var query string - - if d.Settings.LoggingEnabled() { - defer func(start time.Time) { - d.Logger().Log(&db.QueryStatus{ - TxID: d.txID, - SessID: d.sessID, - Query: query, - Err: err, - Start: start, - End: time.Now(), - Context: ctx, - }) - }(time.Now()) - } - - tx := d.Transaction() - - query, _ = d.compileStatement(stmt, nil) - if tx != nil { - sqlStmt, err = compat.PrepareContext(tx.(*baseTx), ctx, query) - return - } - - sqlStmt, err = compat.PrepareContext(d.sess, ctx, query) - return -} - -// ConvertValues converts native values into driver specific values. -func (d *database) ConvertValues(values []interface{}) []interface{} { - if converter, ok := d.PartialDatabase.(hasConvertValues); ok { - return converter.ConvertValues(values) - } - return values -} - -// StatementExec compiles and executes a statement that does not return any -// rows. -func (d *database) StatementExec(ctx context.Context, stmt *exql.Statement, args ...interface{}) (res sql.Result, err error) { - var query string - - if d.Settings.LoggingEnabled() { - defer func(start time.Time) { - - status := db.QueryStatus{ - TxID: d.txID, - SessID: d.sessID, - Query: query, - Args: args, - Err: err, - Start: start, - End: time.Now(), - Context: ctx, - } - - if res != nil { - if rowsAffected, err := res.RowsAffected(); err == nil { - status.RowsAffected = &rowsAffected - } - - if lastInsertID, err := res.LastInsertId(); err == nil { - status.LastInsertID = &lastInsertID - } - } - - d.Logger().Log(&status) - }(time.Now()) - } - - if execer, ok := d.PartialDatabase.(hasStatementExec); ok { - query, args = d.compileStatement(stmt, args) - res, err = execer.StatementExec(ctx, query, args...) - return - } - - tx := d.Transaction() - - if d.Settings.PreparedStatementCacheEnabled() && tx == nil { - var p *Stmt - if p, query, args, err = d.prepareStatement(ctx, stmt, args); err != nil { - return nil, err - } - defer p.Close() - - res, err = compat.PreparedExecContext(p, ctx, args) - return - } - - query, args = d.compileStatement(stmt, args) - if tx != nil { - res, err = compat.ExecContext(tx.(*baseTx), ctx, query, args) - return - } - - res, err = compat.ExecContext(d.sess, ctx, query, args) - return -} - -// StatementQuery compiles and executes a statement that returns rows. -func (d *database) StatementQuery(ctx context.Context, stmt *exql.Statement, args ...interface{}) (rows *sql.Rows, err error) { - var query string - - if d.Settings.LoggingEnabled() { - defer func(start time.Time) { - d.Logger().Log(&db.QueryStatus{ - TxID: d.txID, - SessID: d.sessID, - Query: query, - Args: args, - Err: err, - Start: start, - End: time.Now(), - Context: ctx, - }) - }(time.Now()) - } - - tx := d.Transaction() - - if d.Settings.PreparedStatementCacheEnabled() && tx == nil { - var p *Stmt - if p, query, args, err = d.prepareStatement(ctx, stmt, args); err != nil { - return nil, err - } - defer p.Close() - - rows, err = compat.PreparedQueryContext(p, ctx, args) - return - } - - query, args = d.compileStatement(stmt, args) - if tx != nil { - rows, err = compat.QueryContext(tx.(*baseTx), ctx, query, args) - return - } - - rows, err = compat.QueryContext(d.sess, ctx, query, args) - return - -} - -// StatementQueryRow compiles and executes a statement that returns at most one -// row. -func (d *database) StatementQueryRow(ctx context.Context, stmt *exql.Statement, args ...interface{}) (row *sql.Row, err error) { - var query string - - if d.Settings.LoggingEnabled() { - defer func(start time.Time) { - d.Logger().Log(&db.QueryStatus{ - TxID: d.txID, - SessID: d.sessID, - Query: query, - Args: args, - Err: err, - Start: start, - End: time.Now(), - Context: ctx, - }) - }(time.Now()) - } - - tx := d.Transaction() - - if d.Settings.PreparedStatementCacheEnabled() && tx == nil { - var p *Stmt - if p, query, args, err = d.prepareStatement(ctx, stmt, args); err != nil { - return nil, err - } - defer p.Close() - - row = compat.PreparedQueryRowContext(p, ctx, args) - return - } - - query, args = d.compileStatement(stmt, args) - if tx != nil { - row = compat.QueryRowContext(tx.(*baseTx), ctx, query, args) - return - } - - row = compat.QueryRowContext(d.sess, ctx, query, args) - return -} - -// Driver returns the underlying *sql.DB or *sql.Tx instance. -func (d *database) Driver() interface{} { - if tx := d.Transaction(); tx != nil { - // A transaction - return tx.(*baseTx).Tx - } - return d.sess -} - -// compileStatement compiles the given statement into a string. -func (d *database) compileStatement(stmt *exql.Statement, args []interface{}) (string, []interface{}) { - if converter, ok := d.PartialDatabase.(hasConvertValues); ok { - args = converter.ConvertValues(args) - } - return d.PartialDatabase.CompileStatement(stmt, args) -} - -// prepareStatement compiles a query and tries to use previously generated -// statement. -func (d *database) prepareStatement(ctx context.Context, stmt *exql.Statement, args []interface{}) (*Stmt, string, []interface{}, error) { - d.sessMu.Lock() - defer d.sessMu.Unlock() - - sess, tx := d.sess, d.Transaction() - if sess == nil && tx == nil { - return nil, "", nil, db.ErrNotConnected - } - - pc, ok := d.cachedStatements.ReadRaw(stmt) - if ok { - // The statement was cached. - ps, err := pc.(*Stmt).Open() - if err == nil { - _, args = d.compileStatement(stmt, args) - return ps, ps.query, args, nil - } - } - - query, args := d.compileStatement(stmt, args) - sqlStmt, err := func(query *string) (*sql.Stmt, error) { - if tx != nil { - return compat.PrepareContext(tx.(*baseTx), ctx, *query) - } - return compat.PrepareContext(sess, ctx, *query) - }(&query) - if err != nil { - return nil, "", nil, err - } - - p, err := NewStatement(sqlStmt, query).Open() - if err != nil { - return nil, query, args, err - } - d.cachedStatements.Write(stmt, p) - return p, p.query, args, nil -} - -var waitForConnMu sync.Mutex - -// WaitForConnection tries to execute the given connectFn function, if -// connectFn returns an error, then WaitForConnection will keep trying until -// connectFn returns nil. Maximum waiting time is 5s after having acquired the -// lock. -func (d *database) WaitForConnection(connectFn func() error) error { - // This lock ensures first-come, first-served and prevents opening too many - // file descriptors. - waitForConnMu.Lock() - defer waitForConnMu.Unlock() - - // Minimum waiting time. - waitTime := time.Millisecond * 10 - - // Waitig 5 seconds for a successful connection. - for timeStart := time.Now(); time.Since(timeStart) < time.Second*5; { - err := connectFn() - if err == nil { - return nil // Connected! - } - - // Only attempt to reconnect if the error is too many clients. - if d.PartialDatabase.Err(err) == db.ErrTooManyClients { - // Sleep and try again if, and only if, the server replied with a "too - // many clients" error. - time.Sleep(waitTime) - if waitTime < time.Millisecond*500 { - // Wait a bit more next time. - waitTime = waitTime * 2 - } - continue - } - - // Return any other error immediately. - return err - } - - return db.ErrGivingUpTryingToConnect -} - -// ReplaceWithDollarSign turns a SQL statament with '?' placeholders into -// dollar placeholders, like $1, $2, ..., $n -func ReplaceWithDollarSign(in string) string { - buf := []byte(in) - out := make([]byte, 0, len(buf)) - - i, j, k, t := 0, 1, 0, len(buf) - - for i < t { - if buf[i] == '?' { - out = append(out, buf[k:i]...) - k = i + 1 - - if k < t && buf[k] == '?' { - i = k - } else { - out = append(out, []byte("$"+strconv.Itoa(j))...) - j++ - } - } - i++ - } - out = append(out, buf[k:i]...) - - return string(out) -} - -func copySettings(from BaseDatabase, into BaseDatabase) { - into.SetLogging(from.LoggingEnabled()) - into.SetLogger(from.Logger()) - into.SetPreparedStatementCache(from.PreparedStatementCacheEnabled()) - into.SetConnMaxLifetime(from.ConnMaxLifetime()) - into.SetMaxIdleConns(from.MaxIdleConns()) - into.SetMaxOpenConns(from.MaxOpenConns()) - - txOptions := from.TxOptions() - if txOptions != nil { - into.SetTxOptions(*txOptions) - } -} - -func newSessionID() uint64 { - if atomic.LoadUint64(&lastSessID) == math.MaxUint64 { - atomic.StoreUint64(&lastSessID, 0) - return 0 - } - return atomic.AddUint64(&lastSessID, 1) -} - -func newBaseTxID() uint64 { - if atomic.LoadUint64(&lastTxID) == math.MaxUint64 { - atomic.StoreUint64(&lastTxID, 0) - return 0 - } - return atomic.AddUint64(&lastTxID, 1) -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/column.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/column.go deleted file mode 100644 index 4140a442fb4..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/column.go +++ /dev/null @@ -1,81 +0,0 @@ -package exql - -import ( - "fmt" - "strings" -) - -type columnT struct { - Name string - Alias string -} - -// Column represents a SQL column. -type Column struct { - Name interface{} - Alias string - hash hash -} - -var _ = Fragment(&Column{}) - -// ColumnWithName creates and returns a Column with the given name. -func ColumnWithName(name string) *Column { - return &Column{Name: name} -} - -// Hash returns a unique identifier for the struct. -func (c *Column) Hash() string { - return c.hash.Hash(c) -} - -// Compile transforms the ColumnValue into an equivalent SQL representation. -func (c *Column) Compile(layout *Template) (compiled string, err error) { - if z, ok := layout.Read(c); ok { - return z, nil - } - - alias := c.Alias - - switch value := c.Name.(type) { - case string: - input := trimString(value) - - chunks := separateByAS(input) - - if len(chunks) == 1 { - chunks = separateBySpace(input) - } - - name := chunks[0] - - nameChunks := strings.SplitN(name, layout.ColumnSeparator, 2) - - for i := range nameChunks { - nameChunks[i] = trimString(nameChunks[i]) - if nameChunks[i] == "*" { - continue - } - nameChunks[i] = layout.MustCompile(layout.IdentifierQuote, Raw{Value: nameChunks[i]}) - } - - compiled = strings.Join(nameChunks, layout.ColumnSeparator) - - if len(chunks) > 1 { - alias = trimString(chunks[1]) - alias = layout.MustCompile(layout.IdentifierQuote, Raw{Value: alias}) - } - case Raw: - compiled = value.String() - default: - compiled = fmt.Sprintf("%v", c.Name) - } - - if alias != "" { - compiled = layout.MustCompile(layout.ColumnAliasLayout, columnT{compiled, alias}) - } - - layout.Write(c, compiled) - - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/column_value.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/column_value.go deleted file mode 100644 index 018faa45e49..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/column_value.go +++ /dev/null @@ -1,106 +0,0 @@ -package exql - -import ( - "strings" -) - -// ColumnValue represents a bundle between a column and a corresponding value. -type ColumnValue struct { - Column Fragment - Operator string - Value Fragment - hash hash -} - -var _ = Fragment(&ColumnValue{}) - -type columnValueT struct { - Column string - Operator string - Value string -} - -// Hash returns a unique identifier for the struct. -func (c *ColumnValue) Hash() string { - return c.hash.Hash(c) -} - -// Compile transforms the ColumnValue into an equivalent SQL representation. -func (c *ColumnValue) Compile(layout *Template) (compiled string, err error) { - if z, ok := layout.Read(c); ok { - return z, nil - } - - column, err := c.Column.Compile(layout) - if err != nil { - return "", err - } - - data := columnValueT{ - Column: column, - Operator: c.Operator, - } - - if c.Value != nil { - data.Value, err = c.Value.Compile(layout) - if err != nil { - return "", err - } - } - - compiled = strings.TrimSpace(layout.MustCompile(layout.ColumnValue, data)) - - layout.Write(c, compiled) - - return -} - -// ColumnValues represents an array of ColumnValue -type ColumnValues struct { - ColumnValues []Fragment - hash hash -} - -var _ = Fragment(&ColumnValues{}) - -// JoinColumnValues returns an array of ColumnValue -func JoinColumnValues(values ...Fragment) *ColumnValues { - return &ColumnValues{ColumnValues: values} -} - -// Insert adds a column to the columns array. -func (c *ColumnValues) Insert(values ...Fragment) *ColumnValues { - c.ColumnValues = append(c.ColumnValues, values...) - c.hash.Reset() - return c -} - -// Hash returns a unique identifier for the struct. -func (c *ColumnValues) Hash() string { - return c.hash.Hash(c) -} - -// Compile transforms the ColumnValues into its SQL representation. -func (c *ColumnValues) Compile(layout *Template) (compiled string, err error) { - - if z, ok := layout.Read(c); ok { - return z, nil - } - - l := len(c.ColumnValues) - - out := make([]string, l) - - for i := range c.ColumnValues { - out[i], err = c.ColumnValues[i].Compile(layout) - if err != nil { - return "", err - } - } - - compiled = strings.TrimSpace(strings.Join(out, layout.IdentifierSeparator)) - - layout.Write(c, compiled) - - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/columns.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/columns.go deleted file mode 100644 index d85e8e4a670..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/columns.go +++ /dev/null @@ -1,76 +0,0 @@ -package exql - -import ( - "strings" -) - -// Columns represents an array of Column. -type Columns struct { - Columns []Fragment - hash hash -} - -var _ = Fragment(&Columns{}) - -// Hash returns a unique identifier. -func (c *Columns) Hash() string { - return c.hash.Hash(c) -} - -// JoinColumns creates and returns an array of Column. -func JoinColumns(columns ...Fragment) *Columns { - return &Columns{Columns: columns} -} - -// OnConditions creates and retuens a new On. -func OnConditions(conditions ...Fragment) *On { - return &On{Conditions: conditions} -} - -// UsingColumns builds a Using from the given columns. -func UsingColumns(columns ...Fragment) *Using { - return &Using{Columns: columns} -} - -// Append -func (c *Columns) Append(a *Columns) *Columns { - c.Columns = append(c.Columns, a.Columns...) - return c -} - -// IsEmpty -func (c *Columns) IsEmpty() bool { - if c == nil || len(c.Columns) < 1 { - return true - } - return false -} - -// Compile transforms the Columns into an equivalent SQL representation. -func (c *Columns) Compile(layout *Template) (compiled string, err error) { - - if z, ok := layout.Read(c); ok { - return z, nil - } - - l := len(c.Columns) - - if l > 0 { - out := make([]string, l) - - for i := 0; i < l; i++ { - out[i], err = c.Columns[i].Compile(layout) - if err != nil { - return "", err - } - } - - compiled = strings.Join(out, layout.IdentifierSeparator) - } else { - compiled = "*" - } - - layout.Write(c, compiled) - - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/database.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/database.go deleted file mode 100644 index 1603607ea9f..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/database.go +++ /dev/null @@ -1,31 +0,0 @@ -package exql - -// Database represents a SQL database. -type Database struct { - Name string - hash hash -} - -var _ = Fragment(&Database{}) - -// DatabaseWithName returns a Database with the given name. -func DatabaseWithName(name string) *Database { - return &Database{Name: name} -} - -// Hash returns a unique identifier for the struct. -func (d *Database) Hash() string { - return d.hash.Hash(d) -} - -// Compile transforms the Database into an equivalent SQL representation. -func (d *Database) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(d); ok { - return c, nil - } - - compiled = layout.MustCompile(layout.IdentifierQuote, Raw{Value: d.Name}) - - layout.Write(d, compiled) - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/default.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/default.go deleted file mode 100644 index 658e57cdacd..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/default.go +++ /dev/null @@ -1,192 +0,0 @@ -package exql - -import ( - "upper.io/db.v3/internal/cache" -) - -const ( - defaultColumnSeparator = `.` - defaultIdentifierSeparator = `, ` - defaultIdentifierQuote = `"{{.Value}}"` - defaultValueSeparator = `, ` - defaultValueQuote = `'{{.}}'` - defaultAndKeyword = `AND` - defaultOrKeyword = `OR` - defaultDescKeyword = `DESC` - defaultAscKeyword = `ASC` - defaultAssignmentOperator = `=` - defaultClauseGroup = `({{.}})` - defaultClauseOperator = ` {{.}} ` - defaultColumnValue = `{{.Column}} {{.Operator}} {{.Value}}` - defaultTableAliasLayout = `{{.Name}}{{if .Alias}} AS {{.Alias}}{{end}}` - defaultColumnAliasLayout = `{{.Name}}{{if .Alias}} AS {{.Alias}}{{end}}` - defaultSortByColumnLayout = `{{.Column}} {{.Order}}` - - defaultOrderByLayout = ` - {{if .SortColumns}} - ORDER BY {{.SortColumns}} - {{end}} - ` - - defaultWhereLayout = ` - {{if .Conds}} - WHERE {{.Conds}} - {{end}} - ` - - defaultUsingLayout = ` - {{if .Columns}} - USING ({{.Columns}}) - {{end}} - ` - - defaultJoinLayout = ` - {{if .Table}} - {{ if .On }} - {{.Type}} JOIN {{.Table}} - {{.On}} - {{ else if .Using }} - {{.Type}} JOIN {{.Table}} - {{.Using}} - {{ else if .Type | eq "CROSS" }} - {{.Type}} JOIN {{.Table}} - {{else}} - NATURAL {{.Type}} JOIN {{.Table}} - {{end}} - {{end}} - ` - - defaultOnLayout = ` - {{if .Conds}} - ON {{.Conds}} - {{end}} - ` - - defaultSelectLayout = ` - SELECT - {{if .Distinct}} - DISTINCT - {{end}} - - {{if .Columns}} - {{.Columns | compile}} - {{else}} - * - {{end}} - - {{if defined .Table}} - FROM {{.Table | compile}} - {{end}} - - {{.Joins | compile}} - - {{.Where | compile}} - - {{.GroupBy | compile}} - - {{.OrderBy | compile}} - - {{if .Limit}} - LIMIT {{.Limit}} - {{end}} - - {{if .Offset}} - OFFSET {{.Offset}} - {{end}} - ` - defaultDeleteLayout = ` - DELETE - FROM {{.Table | compile}} - {{.Where | compile}} - {{if .Limit}} - LIMIT {{.Limit}} - {{end}} - {{if .Offset}} - OFFSET {{.Offset}} - {{end}} - ` - defaultUpdateLayout = ` - UPDATE - {{.Table | compile}} - SET {{.ColumnValues | compile}} - {{.Where | compile}} - ` - - defaultCountLayout = ` - SELECT - COUNT(1) AS _t - FROM {{.Table | compile}} - {{.Where | compile}} - - {{if .Limit}} - LIMIT {{.Limit | compile}} - {{end}} - - {{if .Offset}} - OFFSET {{.Offset}} - {{end}} - ` - - defaultInsertLayout = ` - INSERT INTO {{.Table | compile}} - {{if .Columns }}({{.Columns | compile}}){{end}} - VALUES - {{.Values | compile}} - {{if .Returning}} - RETURNING {{.Returning | compile}} - {{end}} - ` - - defaultTruncateLayout = ` - TRUNCATE TABLE {{.Table | compile}} - ` - - defaultDropDatabaseLayout = ` - DROP DATABASE {{.Database | compile}} - ` - - defaultDropTableLayout = ` - DROP TABLE {{.Table | compile}} - ` - - defaultGroupByLayout = ` - {{if .GroupColumns}} - GROUP BY {{.GroupColumns}} - {{end}} - ` -) - -var defaultTemplate = &Template{ - AndKeyword: defaultAndKeyword, - AscKeyword: defaultAscKeyword, - AssignmentOperator: defaultAssignmentOperator, - ClauseGroup: defaultClauseGroup, - ClauseOperator: defaultClauseOperator, - ColumnAliasLayout: defaultColumnAliasLayout, - ColumnSeparator: defaultColumnSeparator, - ColumnValue: defaultColumnValue, - CountLayout: defaultCountLayout, - DeleteLayout: defaultDeleteLayout, - DescKeyword: defaultDescKeyword, - DropDatabaseLayout: defaultDropDatabaseLayout, - DropTableLayout: defaultDropTableLayout, - GroupByLayout: defaultGroupByLayout, - IdentifierQuote: defaultIdentifierQuote, - IdentifierSeparator: defaultIdentifierSeparator, - InsertLayout: defaultInsertLayout, - JoinLayout: defaultJoinLayout, - OnLayout: defaultOnLayout, - OrKeyword: defaultOrKeyword, - OrderByLayout: defaultOrderByLayout, - SelectLayout: defaultSelectLayout, - SortByColumnLayout: defaultSortByColumnLayout, - TableAliasLayout: defaultTableAliasLayout, - TruncateLayout: defaultTruncateLayout, - UpdateLayout: defaultUpdateLayout, - UsingLayout: defaultUsingLayout, - ValueQuote: defaultValueQuote, - ValueSeparator: defaultValueSeparator, - WhereLayout: defaultWhereLayout, - - Cache: cache.NewCache(), -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/group_by.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/group_by.go deleted file mode 100644 index 4f0132c7f26..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/group_by.go +++ /dev/null @@ -1,54 +0,0 @@ -package exql - -// GroupBy represents a SQL's "group by" statement. -type GroupBy struct { - Columns Fragment - hash hash -} - -var _ = Fragment(&GroupBy{}) - -type groupByT struct { - GroupColumns string -} - -// Hash returns a unique identifier. -func (g *GroupBy) Hash() string { - return g.hash.Hash(g) -} - -// GroupByColumns creates and returns a GroupBy with the given column. -func GroupByColumns(columns ...Fragment) *GroupBy { - return &GroupBy{Columns: JoinColumns(columns...)} -} - -func (g *GroupBy) IsEmpty() bool { - if g == nil || g.Columns == nil { - return true - } - return g.Columns.(hasIsEmpty).IsEmpty() -} - -// Compile transforms the GroupBy into an equivalent SQL representation. -func (g *GroupBy) Compile(layout *Template) (compiled string, err error) { - - if c, ok := layout.Read(g); ok { - return c, nil - } - - if g.Columns != nil { - columns, err := g.Columns.Compile(layout) - if err != nil { - return "", err - } - - data := groupByT{ - GroupColumns: columns, - } - compiled = layout.MustCompile(layout.GroupByLayout, data) - } - - layout.Write(g, compiled) - - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/hash.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/hash.go deleted file mode 100644 index 48f3c2f06ea..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/hash.go +++ /dev/null @@ -1,26 +0,0 @@ -package exql - -import ( - "reflect" - "sync/atomic" - - "upper.io/db.v3/internal/cache" -) - -type hash struct { - v atomic.Value -} - -func (h *hash) Hash(i interface{}) string { - v := h.v.Load() - if r, ok := v.(string); ok && r != "" { - return r - } - s := reflect.TypeOf(i).String() + ":" + cache.Hash(i) - h.v.Store(s) - return s -} - -func (h *hash) Reset() { - h.v.Store("") -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/interfaces.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/interfaces.go deleted file mode 100644 index b871ef8c110..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/interfaces.go +++ /dev/null @@ -1,20 +0,0 @@ -package exql - -import ( - "upper.io/db.v3/internal/cache" -) - -// Fragment is any interface that can be both cached and compiled. -type Fragment interface { - cache.Hashable - - compilable -} - -type compilable interface { - Compile(*Template) (string, error) -} - -type hasIsEmpty interface { - IsEmpty() bool -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/join.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/join.go deleted file mode 100644 index ba982f7cbc8..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/join.go +++ /dev/null @@ -1,181 +0,0 @@ -package exql - -import ( - "strings" -) - -type innerJoinT struct { - Type string - Table string - On string - Using string -} - -// Joins represents the union of different join conditions. -type Joins struct { - Conditions []Fragment - hash hash -} - -var _ = Fragment(&Joins{}) - -// Hash returns a unique identifier for the struct. -func (j *Joins) Hash() string { - return j.hash.Hash(j) -} - -// Compile transforms the Where into an equivalent SQL representation. -func (j *Joins) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(j); ok { - return c, nil - } - - l := len(j.Conditions) - - chunks := make([]string, 0, l) - - if l > 0 { - for i := 0; i < l; i++ { - chunk, err := j.Conditions[i].Compile(layout) - if err != nil { - return "", err - } - chunks = append(chunks, chunk) - } - } - - compiled = strings.Join(chunks, " ") - - layout.Write(j, compiled) - - return -} - -// JoinConditions creates a Joins object. -func JoinConditions(joins ...*Join) *Joins { - fragments := make([]Fragment, len(joins)) - for i := range fragments { - fragments[i] = joins[i] - } - return &Joins{Conditions: fragments} -} - -// Join represents a generic JOIN statement. -type Join struct { - Type string - Table Fragment - On Fragment - Using Fragment - hash hash -} - -var _ = Fragment(&Join{}) - -// Hash returns a unique identifier for the struct. -func (j *Join) Hash() string { - return j.hash.Hash(j) -} - -// Compile transforms the Join into its equivalent SQL representation. -func (j *Join) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(j); ok { - return c, nil - } - - if j.Table == nil { - return "", nil - } - - table, err := j.Table.Compile(layout) - if err != nil { - return "", err - } - - on, err := layout.doCompile(j.On) - if err != nil { - return "", err - } - - using, err := layout.doCompile(j.Using) - if err != nil { - return "", err - } - - data := innerJoinT{ - Type: j.Type, - Table: table, - On: on, - Using: using, - } - - compiled = layout.MustCompile(layout.JoinLayout, data) - layout.Write(j, compiled) - return -} - -// On represents JOIN conditions. -type On Where - -var _ = Fragment(&On{}) - -// Hash returns a unique identifier. -func (o *On) Hash() string { - return o.hash.Hash(o) -} - -// Compile transforms the On into an equivalent SQL representation. -func (o *On) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(o); ok { - return c, nil - } - - grouped, err := groupCondition(layout, o.Conditions, layout.MustCompile(layout.ClauseOperator, layout.AndKeyword)) - if err != nil { - return "", err - } - - if grouped != "" { - compiled = layout.MustCompile(layout.OnLayout, conds{grouped}) - } - - layout.Write(o, compiled) - return -} - -// Using represents a USING function. -type Using Columns - -var _ = Fragment(&Using{}) - -type usingT struct { - Columns string -} - -// Hash returns a unique identifier. -func (u *Using) Hash() string { - return u.hash.Hash(u) -} - -// Compile transforms the Using into an equivalent SQL representation. -func (u *Using) Compile(layout *Template) (compiled string, err error) { - if u == nil { - return "", nil - } - - if c, ok := layout.Read(u); ok { - return c, nil - } - - if len(u.Columns) > 0 { - c := Columns(*u) - columns, err := c.Compile(layout) - if err != nil { - return "", err - } - data := usingT{Columns: columns} - compiled = layout.MustCompile(layout.UsingLayout, data) - } - - layout.Write(u, compiled) - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/order_by.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/order_by.go deleted file mode 100644 index 8ee9f4641e9..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/order_by.go +++ /dev/null @@ -1,164 +0,0 @@ -package exql - -import ( - "fmt" - "strings" -) - -// Order represents the order in which SQL results are sorted. -type Order uint8 - -// Possible values for Order -const ( - DefaultOrder = Order(iota) - Ascendent - Descendent -) - -// SortColumn represents the column-order relation in an ORDER BY clause. -type SortColumn struct { - Column Fragment - Order - hash hash -} - -var _ = Fragment(&SortColumn{}) - -type sortColumnT struct { - Column string - Order string -} - -var _ = Fragment(&SortColumn{}) - -// SortColumns represents the columns in an ORDER BY clause. -type SortColumns struct { - Columns []Fragment - hash hash -} - -var _ = Fragment(&SortColumns{}) - -// OrderBy represents an ORDER BY clause. -type OrderBy struct { - SortColumns Fragment - hash hash -} - -var _ = Fragment(&OrderBy{}) - -type orderByT struct { - SortColumns string -} - -// JoinSortColumns creates and returns an array of column-order relations. -func JoinSortColumns(values ...Fragment) *SortColumns { - return &SortColumns{Columns: values} -} - -// JoinWithOrderBy creates an returns an OrderBy using the given SortColumns. -func JoinWithOrderBy(sc *SortColumns) *OrderBy { - return &OrderBy{SortColumns: sc} -} - -// Hash returns a unique identifier for the struct. -func (s *SortColumn) Hash() string { - return s.hash.Hash(s) -} - -// Compile transforms the SortColumn into an equivalent SQL representation. -func (s *SortColumn) Compile(layout *Template) (compiled string, err error) { - - if c, ok := layout.Read(s); ok { - return c, nil - } - - column, err := s.Column.Compile(layout) - if err != nil { - return "", err - } - - orderBy, err := s.Order.Compile(layout) - if err != nil { - return "", err - } - - data := sortColumnT{Column: column, Order: orderBy} - - compiled = layout.MustCompile(layout.SortByColumnLayout, data) - - layout.Write(s, compiled) - - return -} - -// Hash returns a unique identifier for the struct. -func (s *SortColumns) Hash() string { - return s.hash.Hash(s) -} - -// Compile transforms the SortColumns into an equivalent SQL representation. -func (s *SortColumns) Compile(layout *Template) (compiled string, err error) { - if z, ok := layout.Read(s); ok { - return z, nil - } - - z := make([]string, len(s.Columns)) - - for i := range s.Columns { - z[i], err = s.Columns[i].Compile(layout) - if err != nil { - return "", err - } - } - - compiled = strings.Join(z, layout.IdentifierSeparator) - - layout.Write(s, compiled) - - return -} - -// Hash returns a unique identifier for the struct. -func (s *OrderBy) Hash() string { - return s.hash.Hash(s) -} - -// Compile transforms the SortColumn into an equivalent SQL representation. -func (s *OrderBy) Compile(layout *Template) (compiled string, err error) { - if z, ok := layout.Read(s); ok { - return z, nil - } - - if s.SortColumns != nil { - sortColumns, err := s.SortColumns.Compile(layout) - if err != nil { - return "", err - } - - data := orderByT{ - SortColumns: sortColumns, - } - compiled = layout.MustCompile(layout.OrderByLayout, data) - } - - layout.Write(s, compiled) - - return -} - -// Hash returns a unique identifier. -func (s *Order) Hash() string { - return fmt.Sprintf("%T.%d", s, uint8(*s)) -} - -// Compile transforms the SortColumn into an equivalent SQL representation. -func (s Order) Compile(layout *Template) (string, error) { - switch s { - case Ascendent: - return layout.AscKeyword, nil - case Descendent: - return layout.DescKeyword, nil - } - return "", nil -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/raw.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/raw.go deleted file mode 100644 index 2936c879ca5..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/raw.go +++ /dev/null @@ -1,38 +0,0 @@ -package exql - -import ( - "fmt" - "strings" -) - -var ( - _ = fmt.Stringer(&Raw{}) -) - -// Raw represents a value that is meant to be used in a query without escaping. -type Raw struct { - Value string // Value should not be modified after assigned. - hash hash -} - -// RawValue creates and returns a new raw value. -func RawValue(v string) *Raw { - return &Raw{Value: strings.TrimSpace(v)} -} - -// Hash returns a unique identifier for the struct. -func (r *Raw) Hash() string { - return r.hash.Hash(r) -} - -// Compile returns the raw value. -func (r *Raw) Compile(*Template) (string, error) { - return r.Value, nil -} - -// String returns the raw value. -func (r *Raw) String() string { - return r.Value -} - -var _ = Fragment(&Raw{}) diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/returning.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/returning.go deleted file mode 100644 index ef392bf550d..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/returning.go +++ /dev/null @@ -1,35 +0,0 @@ -package exql - -// Returning represents a RETURNING clause. -type Returning struct { - *Columns - hash hash -} - -// Hash returns a unique identifier for the struct. -func (r *Returning) Hash() string { - return r.hash.Hash(r) -} - -var _ = Fragment(&Returning{}) - -// ReturningColumns creates and returns an array of Column. -func ReturningColumns(columns ...Fragment) *Returning { - return &Returning{Columns: &Columns{Columns: columns}} -} - -// Compile transforms the clause into its equivalent SQL representation. -func (r *Returning) Compile(layout *Template) (compiled string, err error) { - if z, ok := layout.Read(r); ok { - return z, nil - } - - compiled, err = r.Columns.Compile(layout) - if err != nil { - return "", err - } - - layout.Write(r, compiled) - - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/statement.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/statement.go deleted file mode 100644 index 032466e2542..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/statement.go +++ /dev/null @@ -1,111 +0,0 @@ -package exql - -import ( - "errors" - "reflect" - "strings" -) - -var errUnknownTemplateType = errors.New("Unknown template type") - -// represents different kinds of SQL statements. -type Statement struct { - Type - Table Fragment - Database Fragment - Columns Fragment - Values Fragment - Distinct bool - ColumnValues Fragment - OrderBy Fragment - GroupBy Fragment - Joins Fragment - Where Fragment - Returning Fragment - - Limit - Offset - - SQL string - - hash hash - amendFn func(string) string -} - -func (layout *Template) doCompile(c Fragment) (string, error) { - if c != nil && !reflect.ValueOf(c).IsNil() { - return c.Compile(layout) - } - return "", nil -} - -// Hash returns a unique identifier for the struct. -func (s *Statement) Hash() string { - return s.hash.Hash(s) -} - -func (s *Statement) SetAmendment(amendFn func(string) string) { - s.amendFn = amendFn -} - -func (s *Statement) Amend(in string) string { - if s.amendFn == nil { - return in - } - return s.amendFn(in) -} - -func (s *Statement) template(layout *Template) (string, error) { - switch s.Type { - case Truncate: - return layout.TruncateLayout, nil - case DropTable: - return layout.DropTableLayout, nil - case DropDatabase: - return layout.DropDatabaseLayout, nil - case Count: - return layout.CountLayout, nil - case Select: - return layout.SelectLayout, nil - case Delete: - return layout.DeleteLayout, nil - case Update: - return layout.UpdateLayout, nil - case Insert: - return layout.InsertLayout, nil - default: - return "", errUnknownTemplateType - } -} - -// Compile transforms the Statement into an equivalent SQL query. -func (s *Statement) Compile(layout *Template) (compiled string, err error) { - if s.Type == SQL { - // No need to hit the cache. - return s.SQL, nil - } - - if z, ok := layout.Read(s); ok { - return s.Amend(z), nil - } - - tpl, err := s.template(layout) - if err != nil { - return "", err - } - - compiled = layout.MustCompile(tpl, s) - - compiled = strings.TrimSpace(compiled) - layout.Write(s, compiled) - - return s.Amend(compiled), nil -} - -// RawSQL represents a raw SQL statement. -func RawSQL(s string) *Statement { - return &Statement{ - Type: SQL, - SQL: s, - } -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/table.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/table.go deleted file mode 100644 index 5c0c8f83acd..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/table.go +++ /dev/null @@ -1,94 +0,0 @@ -package exql - -import ( - "strings" -) - -type tableT struct { - Name string - Alias string -} - -// Table struct represents a SQL table. -type Table struct { - Name interface{} - hash hash -} - -var _ = Fragment(&Table{}) - -func quotedTableName(layout *Template, input string) string { - input = trimString(input) - - // chunks := reAliasSeparator.Split(input, 2) - chunks := separateByAS(input) - - if len(chunks) == 1 { - // chunks = reSpaceSeparator.Split(input, 2) - chunks = separateBySpace(input) - } - - name := chunks[0] - - nameChunks := strings.SplitN(name, layout.ColumnSeparator, 2) - - for i := range nameChunks { - // nameChunks[i] = strings.TrimSpace(nameChunks[i]) - nameChunks[i] = trimString(nameChunks[i]) - nameChunks[i] = layout.MustCompile(layout.IdentifierQuote, Raw{Value: nameChunks[i]}) - } - - name = strings.Join(nameChunks, layout.ColumnSeparator) - - var alias string - - if len(chunks) > 1 { - // alias = strings.TrimSpace(chunks[1]) - alias = trimString(chunks[1]) - alias = layout.MustCompile(layout.IdentifierQuote, Raw{Value: alias}) - } - - return layout.MustCompile(layout.TableAliasLayout, tableT{name, alias}) -} - -// TableWithName creates an returns a Table with the given name. -func TableWithName(name string) *Table { - return &Table{Name: name} -} - -// Hash returns a string hash of the table value. -func (t *Table) Hash() string { - return t.hash.Hash(t) -} - -// Compile transforms a table struct into a SQL chunk. -func (t *Table) Compile(layout *Template) (compiled string, err error) { - - if z, ok := layout.Read(t); ok { - return z, nil - } - - switch value := t.Name.(type) { - case string: - if t.Name == "" { - return - } - - // Splitting tables by a comma - parts := separateByComma(value) - - l := len(parts) - - for i := 0; i < l; i++ { - parts[i] = quotedTableName(layout, parts[i]) - } - - compiled = strings.Join(parts, layout.IdentifierSeparator) - case Raw: - compiled = value.String() - } - - layout.Write(t, compiled) - - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/template.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/template.go deleted file mode 100644 index bec32bf4ad8..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/template.go +++ /dev/null @@ -1,136 +0,0 @@ -package exql - -import ( - "bytes" - "reflect" - "sync" - "text/template" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/cache" -) - -// Type is the type of SQL query the statement represents. -type Type uint - -// Values for Type. -const ( - NoOp = Type(iota) - - Truncate - DropTable - DropDatabase - Count - Insert - Select - Update - Delete - - SQL -) - -type ( - // Limit represents the SQL limit in a query. - Limit int - // Offset represents the SQL offset in a query. - Offset int -) - -// Template is an SQL template. -type Template struct { - AndKeyword string - AscKeyword string - AssignmentOperator string - ClauseGroup string - ClauseOperator string - ColumnAliasLayout string - ColumnSeparator string - ColumnValue string - CountLayout string - DeleteLayout string - DescKeyword string - DropDatabaseLayout string - DropTableLayout string - GroupByLayout string - IdentifierQuote string - IdentifierSeparator string - InsertLayout string - JoinLayout string - OnLayout string - OrKeyword string - OrderByLayout string - SelectLayout string - SortByColumnLayout string - TableAliasLayout string - TruncateLayout string - UpdateLayout string - UsingLayout string - ValueQuote string - ValueSeparator string - WhereLayout string - - ComparisonOperator map[db.ComparisonOperator]string - - templateMutex sync.RWMutex - templateMap map[string]*template.Template - - *cache.Cache -} - -func (layout *Template) MustCompile(templateText string, data interface{}) string { - var b bytes.Buffer - - v, ok := layout.getTemplate(templateText) - if !ok || true { - v = template. - Must(template.New(""). - Funcs(map[string]interface{}{ - "defined": func(in Fragment) bool { - if in == nil || reflect.ValueOf(in).IsNil() { - return false - } - if check, ok := in.(hasIsEmpty); ok { - if check.IsEmpty() { - return false - } - } - return true - }, - "compile": func(in Fragment) (string, error) { - s, err := layout.doCompile(in) - if err != nil { - return "", err - } - return s, nil - }, - }). - Parse(templateText)) - - layout.setTemplate(templateText, v) - } - - if err := v.Execute(&b, data); err != nil { - panic("There was an error compiling the following template:\n" + templateText + "\nError was: " + err.Error()) - } - - return b.String() -} - -func (t *Template) getTemplate(k string) (*template.Template, bool) { - t.templateMutex.RLock() - defer t.templateMutex.RUnlock() - - if t.templateMap == nil { - t.templateMap = make(map[string]*template.Template) - } - - v, ok := t.templateMap[k] - return v, ok -} - -func (t *Template) setTemplate(k string, v *template.Template) { - t.templateMutex.Lock() - defer t.templateMutex.Unlock() - - t.templateMap[k] = v -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/utilities.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/utilities.go deleted file mode 100644 index 972ebb47a31..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/utilities.go +++ /dev/null @@ -1,151 +0,0 @@ -package exql - -import ( - "strings" -) - -// isBlankSymbol returns true if the given byte is either space, tab, carriage -// return or newline. -func isBlankSymbol(in byte) bool { - return in == ' ' || in == '\t' || in == '\r' || in == '\n' -} - -// trimString returns a slice of s with a leading and trailing blank symbols -// (as defined by isBlankSymbol) removed. -func trimString(s string) string { - - // This conversion is rather slow. - // return string(trimBytes([]byte(s))) - - start, end := 0, len(s)-1 - - if end < start { - return "" - } - - for isBlankSymbol(s[start]) { - start++ - if start >= end { - return "" - } - } - - for isBlankSymbol(s[end]) { - end-- - } - - return s[start : end+1] -} - -// trimBytes returns a slice of s with a leading and trailing blank symbols (as -// defined by isBlankSymbol) removed. -func trimBytes(s []byte) []byte { - - start, end := 0, len(s)-1 - - if end < start { - return []byte{} - } - - for isBlankSymbol(s[start]) { - start++ - if start >= end { - return []byte{} - } - } - - for isBlankSymbol(s[end]) { - end-- - } - - return s[start : end+1] -} - -/* -// Separates by a comma, ignoring spaces too. -// This was slower than strings.Split. -func separateByComma(in string) (out []string) { - - out = []string{} - - start, lim := 0, len(in)-1 - - for start < lim { - var end int - - for end = start; end <= lim; end++ { - // Is a comma? - if in[end] == ',' { - break - } - } - - out = append(out, trimString(in[start:end])) - - start = end + 1 - } - - return -} -*/ - -// Separates by a comma, ignoring spaces too. -func separateByComma(in string) (out []string) { - out = strings.Split(in, ",") - for i := range out { - out[i] = trimString(out[i]) - } - return -} - -// Separates by spaces, ignoring spaces too. -func separateBySpace(in string) (out []string) { - if len(in) == 0 { - return []string{""} - } - - pre := strings.Split(in, " ") - out = make([]string, 0, len(pre)) - - for i := range pre { - pre[i] = trimString(pre[i]) - if pre[i] != "" { - out = append(out, pre[i]) - } - } - - return -} - -func separateByAS(in string) (out []string) { - out = []string{} - - if len(in) < 6 { - // The minimum expression with the AS keyword is "x AS y", 6 chars. - return []string{in} - } - - start, lim := 0, len(in)-1 - - for start <= lim { - var end int - - for end = start; end <= lim; end++ { - if end > 3 && isBlankSymbol(in[end]) && isBlankSymbol(in[end-3]) { - if (in[end-1] == 's' || in[end-1] == 'S') && (in[end-2] == 'a' || in[end-2] == 'A') { - break - } - } - } - - if end < lim { - out = append(out, trimString(in[start:end-3])) - } else { - out = append(out, trimString(in[start:end])) - } - - start = end + 1 - } - - return -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/value.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/value.go deleted file mode 100644 index 49b22aa9d7c..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/value.go +++ /dev/null @@ -1,155 +0,0 @@ -package exql - -import ( - "fmt" - "strings" -) - -// ValueGroups represents an array of value groups. -type ValueGroups struct { - Values []*Values - hash hash -} - -func (vg *ValueGroups) IsEmpty() bool { - if vg == nil || len(vg.Values) < 1 { - return true - } - for i := range vg.Values { - if !vg.Values[i].IsEmpty() { - return false - } - } - return true -} - -var _ = Fragment(&ValueGroups{}) - -// Values represents an array of Value. -type Values struct { - Values []Fragment - hash hash -} - -func (vs *Values) IsEmpty() bool { - if vs == nil || len(vs.Values) < 1 { - return true - } - return false -} - -var _ = Fragment(&Values{}) - -// Value represents an escaped SQL value. -type Value struct { - V interface{} - hash hash -} - -var _ = Fragment(&Value{}) - -// NewValue creates and returns a Value. -func NewValue(v interface{}) *Value { - return &Value{V: v} -} - -// NewValueGroup creates and returns an array of values. -func NewValueGroup(v ...Fragment) *Values { - return &Values{Values: v} -} - -// Hash returns a unique identifier for the struct. -func (v *Value) Hash() string { - return v.hash.Hash(v) -} - -func (v *Value) IsEmpty() bool { - return false -} - -// Compile transforms the Value into an equivalent SQL representation. -func (v *Value) Compile(layout *Template) (compiled string, err error) { - - if z, ok := layout.Read(v); ok { - return z, nil - } - - switch t := v.V.(type) { - case Raw: - compiled, err = t.Compile(layout) - if err != nil { - return "", err - } - case Fragment: - compiled, err = t.Compile(layout) - if err != nil { - return "", err - } - default: - compiled = layout.MustCompile(layout.ValueQuote, RawValue(fmt.Sprintf(`%v`, v.V))) - } - - layout.Write(v, compiled) - - return -} - -// Hash returns a unique identifier for the struct. -func (vs *Values) Hash() string { - return vs.hash.Hash(vs) -} - -// Compile transforms the Values into an equivalent SQL representation. -func (vs *Values) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(vs); ok { - return c, nil - } - - l := len(vs.Values) - if l > 0 { - chunks := make([]string, 0, l) - for i := 0; i < l; i++ { - chunk, err := vs.Values[i].Compile(layout) - if err != nil { - return "", err - } - chunks = append(chunks, chunk) - } - compiled = layout.MustCompile(layout.ClauseGroup, strings.Join(chunks, layout.ValueSeparator)) - } - layout.Write(vs, compiled) - return -} - -// Hash returns a unique identifier for the struct. -func (vg *ValueGroups) Hash() string { - return vg.hash.Hash(vg) -} - -// Compile transforms the ValueGroups into an equivalent SQL representation. -func (vg *ValueGroups) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(vg); ok { - return c, nil - } - - l := len(vg.Values) - if l > 0 { - chunks := make([]string, 0, l) - for i := 0; i < l; i++ { - chunk, err := vg.Values[i].Compile(layout) - if err != nil { - return "", err - } - chunks = append(chunks, chunk) - } - compiled = strings.Join(chunks, layout.ValueSeparator) - } - - layout.Write(vg, compiled) - return -} - -// JoinValueGroups creates a new *ValueGroups object. -func JoinValueGroups(values ...*Values) *ValueGroups { - return &ValueGroups{Values: values} -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/exql/where.go b/vendor/upper.io/db.v3/internal/sqladapter/exql/where.go deleted file mode 100644 index 3e77e005db7..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/exql/where.go +++ /dev/null @@ -1,137 +0,0 @@ -package exql - -import ( - "strings" -) - -// Or represents an SQL OR operator. -type Or Where - -// And represents an SQL AND operator. -type And Where - -// Where represents an SQL WHERE clause. -type Where struct { - Conditions []Fragment - hash hash -} - -var _ = Fragment(&Where{}) - -type conds struct { - Conds string -} - -// WhereConditions creates and retuens a new Where. -func WhereConditions(conditions ...Fragment) *Where { - return &Where{Conditions: conditions} -} - -// JoinWithOr creates and returns a new Or. -func JoinWithOr(conditions ...Fragment) *Or { - return &Or{Conditions: conditions} -} - -// JoinWithAnd creates and returns a new And. -func JoinWithAnd(conditions ...Fragment) *And { - return &And{Conditions: conditions} -} - -// Hash returns a unique identifier for the struct. -func (w *Where) Hash() string { - return w.hash.Hash(w) -} - -// Appends adds the conditions to the ones that already exist. -func (w *Where) Append(a *Where) *Where { - if a != nil { - w.Conditions = append(w.Conditions, a.Conditions...) - } - return w -} - -// Hash returns a unique identifier. -func (o *Or) Hash() string { - w := Where(*o) - return `Or(` + w.Hash() + `)` -} - -// Hash returns a unique identifier. -func (a *And) Hash() string { - w := Where(*a) - return `And(` + w.Hash() + `)` -} - -// Compile transforms the Or into an equivalent SQL representation. -func (o *Or) Compile(layout *Template) (compiled string, err error) { - if z, ok := layout.Read(o); ok { - return z, nil - } - - compiled, err = groupCondition(layout, o.Conditions, layout.MustCompile(layout.ClauseOperator, layout.OrKeyword)) - if err != nil { - return "", err - } - - layout.Write(o, compiled) - - return -} - -// Compile transforms the And into an equivalent SQL representation. -func (a *And) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(a); ok { - return c, nil - } - - compiled, err = groupCondition(layout, a.Conditions, layout.MustCompile(layout.ClauseOperator, layout.AndKeyword)) - if err != nil { - return "", err - } - - layout.Write(a, compiled) - - return -} - -// Compile transforms the Where into an equivalent SQL representation. -func (w *Where) Compile(layout *Template) (compiled string, err error) { - if c, ok := layout.Read(w); ok { - return c, nil - } - - grouped, err := groupCondition(layout, w.Conditions, layout.MustCompile(layout.ClauseOperator, layout.AndKeyword)) - if err != nil { - return "", err - } - - if grouped != "" { - compiled = layout.MustCompile(layout.WhereLayout, conds{grouped}) - } - - layout.Write(w, compiled) - - return -} - -func groupCondition(layout *Template, terms []Fragment, joinKeyword string) (string, error) { - l := len(terms) - - chunks := make([]string, 0, l) - - if l > 0 { - for i := 0; i < l; i++ { - chunk, err := terms[i].Compile(layout) - if err != nil { - return "", err - } - chunks = append(chunks, chunk) - } - } - - if len(chunks) > 0 { - return layout.MustCompile(layout.ClauseGroup, strings.Join(chunks, joinKeyword)), nil - } - - return "", nil -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/result.go b/vendor/upper.io/db.v3/internal/sqladapter/result.go deleted file mode 100644 index 082b6b82c34..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/result.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package sqladapter - -import ( - "sync" - "sync/atomic" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/immutable" - "upper.io/db.v3/lib/sqlbuilder" -) - -type Result struct { - builder sqlbuilder.SQLBuilder - - err atomic.Value - - iter sqlbuilder.Iterator - iterMu sync.Mutex - - prev *Result - fn func(*result) error -} - -// result represents a delimited set of items bound by a condition. -type result struct { - table string - limit int - offset int - - pageSize uint - pageNumber uint - - cursorColumn string - nextPageCursorValue interface{} - prevPageCursorValue interface{} - - fields []interface{} - orderBy []interface{} - groupBy []interface{} - conds [][]interface{} -} - -func filter(conds []interface{}) []interface{} { - return conds -} - -// NewResult creates and Results a new Result set on the given table, this set -// is limited by the given exql.Where conditions. -func NewResult(builder sqlbuilder.SQLBuilder, table string, conds []interface{}) *Result { - r := &Result{ - builder: builder, - } - return r.from(table).where(conds) -} - -func (r *Result) frame(fn func(*result) error) *Result { - return &Result{err: r.err, prev: r, fn: fn} -} - -func (r *Result) SQLBuilder() sqlbuilder.SQLBuilder { - if r.prev == nil { - return r.builder - } - return r.prev.SQLBuilder() -} - -func (r *Result) from(table string) *Result { - return r.frame(func(res *result) error { - res.table = table - return nil - }) -} - -func (r *Result) where(conds []interface{}) *Result { - return r.frame(func(res *result) error { - res.conds = [][]interface{}{conds} - return nil - }) -} - -func (r *Result) setErr(err error) { - if err == nil { - return - } - r.err.Store(err) -} - -// Err returns the last error that has happened with the result set, -// nil otherwise -func (r *Result) Err() error { - if errV := r.err.Load(); errV != nil { - return errV.(error) - } - return nil -} - -// Where sets conditions for the result set. -func (r *Result) Where(conds ...interface{}) db.Result { - return r.where(conds) -} - -// And adds more conditions on top of the existing ones. -func (r *Result) And(conds ...interface{}) db.Result { - return r.frame(func(res *result) error { - res.conds = append(res.conds, conds) - return nil - }) -} - -// Limit determines the maximum limit of Results to be returned. -func (r *Result) Limit(n int) db.Result { - return r.frame(func(res *result) error { - res.limit = n - return nil - }) -} - -func (r *Result) Paginate(pageSize uint) db.Result { - return r.frame(func(res *result) error { - res.pageSize = pageSize - return nil - }) -} - -func (r *Result) Page(pageNumber uint) db.Result { - return r.frame(func(res *result) error { - res.pageNumber = pageNumber - res.nextPageCursorValue = nil - res.prevPageCursorValue = nil - return nil - }) -} - -func (r *Result) Cursor(cursorColumn string) db.Result { - return r.frame(func(res *result) error { - res.cursorColumn = cursorColumn - return nil - }) -} - -func (r *Result) NextPage(cursorValue interface{}) db.Result { - return r.frame(func(res *result) error { - res.nextPageCursorValue = cursorValue - res.prevPageCursorValue = nil - return nil - }) -} - -func (r *Result) PrevPage(cursorValue interface{}) db.Result { - return r.frame(func(res *result) error { - res.nextPageCursorValue = nil - res.prevPageCursorValue = cursorValue - return nil - }) -} - -// Offset determines how many documents will be skipped before starting to grab -// Results. -func (r *Result) Offset(n int) db.Result { - return r.frame(func(res *result) error { - res.offset = n - return nil - }) -} - -// Group is used to group Results that have the same value in the same column -// or columns. -func (r *Result) Group(fields ...interface{}) db.Result { - return r.frame(func(res *result) error { - res.groupBy = fields - return nil - }) -} - -// OrderBy determines sorting of Results according to the provided names. Fields -// may be prefixed by - (minus) which means descending order, ascending order -// would be used otherwise. -func (r *Result) OrderBy(fields ...interface{}) db.Result { - return r.frame(func(res *result) error { - res.orderBy = fields - return nil - }) -} - -// Select determines which fields to return. -func (r *Result) Select(fields ...interface{}) db.Result { - return r.frame(func(res *result) error { - res.fields = fields - return nil - }) -} - -// String satisfies fmt.Stringer -func (r *Result) String() string { - query, err := r.buildPaginator() - if err != nil { - panic(err.Error()) - } - return query.String() -} - -// All dumps all Results into a pointer to an slice of structs or maps. -func (r *Result) All(dst interface{}) error { - query, err := r.buildPaginator() - if err != nil { - r.setErr(err) - return err - } - err = query.Iterator().All(dst) - r.setErr(err) - return err -} - -// One fetches only one Result from the set. -func (r *Result) One(dst interface{}) error { - one := r.Limit(1).(*Result) - query, err := one.buildPaginator() - if err != nil { - one.setErr(err) - return err - } - err = query.Iterator().One(dst) - one.setErr(err) - return err -} - -// Next fetches the next Result from the set. -func (r *Result) Next(dst interface{}) bool { - r.iterMu.Lock() - defer r.iterMu.Unlock() - - if r.iter == nil { - query, err := r.buildPaginator() - if err != nil { - r.setErr(err) - return false - } - r.iter = query.Iterator() - } - - if r.iter.Next(dst) { - return true - } - - if err := r.iter.Err(); err != db.ErrNoMoreRows { - r.setErr(err) - return false - } - - return false -} - -// Delete deletes all matching items from the collection. -func (r *Result) Delete() error { - query, err := r.buildDelete() - if err != nil { - r.setErr(err) - return err - } - - _, err = query.Exec() - r.setErr(err) - return err -} - -// Close closes the Result set. -func (r *Result) Close() error { - if r.iter != nil { - err := r.iter.Close() - r.setErr(err) - return err - } - return nil -} - -// Update updates matching items from the collection with values of the given -// map or struct. -func (r *Result) Update(values interface{}) error { - query, err := r.buildUpdate(values) - if err != nil { - r.setErr(err) - return err - } - - _, err = query.Exec() - r.setErr(err) - return err -} - -func (r *Result) TotalPages() (uint, error) { - query, err := r.buildPaginator() - if err != nil { - r.setErr(err) - return 0, err - } - - total, err := query.TotalPages() - if err != nil { - r.setErr(err) - return 0, err - } - - return total, nil -} - -func (r *Result) TotalEntries() (uint64, error) { - query, err := r.buildPaginator() - if err != nil { - r.setErr(err) - return 0, err - } - - total, err := query.TotalEntries() - if err != nil { - r.setErr(err) - return 0, err - } - - return total, nil -} - -// Exists returns true if at least one item on the collection exists. -func (r *Result) Exists() (bool, error) { - query, err := r.buildCount() - if err != nil { - r.setErr(err) - return false, err - } - - query = query.Limit(1) - - value := struct { - Exists uint64 `db:"_t"` - }{} - - if err := query.One(&value); err != nil { - if err == db.ErrNoMoreRows { - return false, nil - } - r.setErr(err) - return false, err - } - - if value.Exists > 0 { - return true, nil - } - - return false, nil -} - -// Count counts the elements on the set. -func (r *Result) Count() (uint64, error) { - query, err := r.buildCount() - if err != nil { - r.setErr(err) - return 0, err - } - - counter := struct { - Count uint64 `db:"_t"` - }{} - if err := query.One(&counter); err != nil { - if err == db.ErrNoMoreRows { - return 0, nil - } - r.setErr(err) - return 0, err - } - - return counter.Count, nil -} - -func (r *Result) buildPaginator() (sqlbuilder.Paginator, error) { - if err := r.Err(); err != nil { - return nil, err - } - - res, err := r.fastForward() - if err != nil { - return nil, err - } - - b := r.SQLBuilder() - - sel := b.Select(res.fields...).From(res.table). - Limit(res.limit). - Offset(res.offset). - GroupBy(res.groupBy...). - OrderBy(res.orderBy...) - - for i := range res.conds { - sel = sel.And(filter(res.conds[i])...) - } - - pag := sel.Paginate(res.pageSize). - Page(res.pageNumber). - Cursor(res.cursorColumn) - - if res.nextPageCursorValue != nil { - pag = pag.NextPage(res.nextPageCursorValue) - } - - if res.prevPageCursorValue != nil { - pag = pag.PrevPage(res.prevPageCursorValue) - } - - return pag, nil -} - -func (r *Result) buildDelete() (sqlbuilder.Deleter, error) { - if err := r.Err(); err != nil { - return nil, err - } - - res, err := r.fastForward() - if err != nil { - return nil, err - } - - del := r.SQLBuilder().DeleteFrom(res.table). - Limit(res.limit) - - for i := range res.conds { - del = del.And(filter(res.conds[i])...) - } - - return del, nil -} - -func (r *Result) buildUpdate(values interface{}) (sqlbuilder.Updater, error) { - if err := r.Err(); err != nil { - return nil, err - } - - res, err := r.fastForward() - if err != nil { - return nil, err - } - - upd := r.SQLBuilder().Update(res.table). - Set(values). - Limit(res.limit) - - for i := range res.conds { - upd = upd.And(filter(res.conds[i])...) - } - - return upd, nil -} - -func (r *Result) buildCount() (sqlbuilder.Selector, error) { - if err := r.Err(); err != nil { - return nil, err - } - - res, err := r.fastForward() - if err != nil { - return nil, err - } - - sel := r.SQLBuilder().Select(db.Raw("count(1) AS _t")). - From(res.table). - GroupBy(res.groupBy...) - - for i := range res.conds { - sel = sel.And(filter(res.conds[i])...) - } - - return sel, nil -} - -func (r *Result) Prev() immutable.Immutable { - if r == nil { - return nil - } - return r.prev -} - -func (r *Result) Fn(in interface{}) error { - if r.fn == nil { - return nil - } - return r.fn(in.(*result)) -} - -func (r *Result) Base() interface{} { - return &result{} -} - -func (r *Result) fastForward() (*result, error) { - ff, err := immutable.FastForward(r) - if err != nil { - return nil, err - } - return ff.(*result), nil -} - -var _ = immutable.Immutable(&Result{}) diff --git a/vendor/upper.io/db.v3/internal/sqladapter/sqladapter.go b/vendor/upper.io/db.v3/internal/sqladapter/sqladapter.go deleted file mode 100644 index 7fb21246f88..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/sqladapter.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package sqladapter provides common logic for SQL adapters. -package sqladapter - -import ( - "database/sql/driver" -) - -// IsKeyValue reports whether v is a valid value for a primary key that can be -// used with Find(pKey). -func IsKeyValue(v interface{}) bool { - if v == nil { - return true - } - switch v.(type) { - case int64, int, uint, uint64, - []int64, []int, []uint, []uint64, - []byte, []string, - []interface{}, - driver.Valuer: - return true - } - return false -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/statement.go b/vendor/upper.io/db.v3/internal/sqladapter/statement.go deleted file mode 100644 index 17e7c6d7041..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/statement.go +++ /dev/null @@ -1,85 +0,0 @@ -package sqladapter - -import ( - "database/sql" - "errors" - "sync" - "sync/atomic" -) - -var ( - activeStatements int64 -) - -// Stmt represents a *sql.Stmt that is cached and provides the -// OnPurge method to allow it to clean after itself. -type Stmt struct { - *sql.Stmt - - query string - mu sync.Mutex - - count int64 - dead bool -} - -// NewStatement creates an returns an opened statement -func NewStatement(stmt *sql.Stmt, query string) *Stmt { - s := &Stmt{ - Stmt: stmt, - query: query, - } - atomic.AddInt64(&activeStatements, 1) - return s -} - -// Open marks the statement as in-use -func (c *Stmt) Open() (*Stmt, error) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.dead { - return nil, errors.New("statement is dead") - } - - c.count++ - return c, nil -} - -// Close closes the underlying statement if no other go-routine is using it. -func (c *Stmt) Close() error { - c.mu.Lock() - defer c.mu.Unlock() - - c.count-- - - return c.checkClose() -} - -func (c *Stmt) checkClose() error { - if c.dead && c.count == 0 { - // Statement is dead and we can close it for real. - err := c.Stmt.Close() - if err != nil { - return err - } - // Reduce active statements counter. - atomic.AddInt64(&activeStatements, -1) - } - return nil -} - -// OnPurge marks the statement as ready to be cleaned up. -func (c *Stmt) OnPurge() { - c.mu.Lock() - defer c.mu.Unlock() - - c.dead = true - c.checkClose() -} - -// NumActiveStatements returns the global number of prepared statements in use -// at any point. -func NumActiveStatements() int64 { - return atomic.LoadInt64(&activeStatements) -} diff --git a/vendor/upper.io/db.v3/internal/sqladapter/tx.go b/vendor/upper.io/db.v3/internal/sqladapter/tx.go deleted file mode 100644 index c1d2a403d7a..00000000000 --- a/vendor/upper.io/db.v3/internal/sqladapter/tx.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package sqladapter - -import ( - "context" - "database/sql" - "sync/atomic" - - db "upper.io/db.v3" - "upper.io/db.v3/lib/sqlbuilder" -) - -// DatabaseTx represents a database session within a transaction. -type DatabaseTx interface { - BaseDatabase - PartialDatabase - - BaseTx -} - -// BaseTx provides logic for methods that can be shared across all SQL -// adapters. -type BaseTx interface { - db.Tx - - // Committed returns true if the transaction was already commited. - Committed() bool -} - -type databaseTx struct { - Database - BaseTx -} - -// NewDatabaseTx creates a database session within a transaction. -func NewDatabaseTx(db Database) DatabaseTx { - return &databaseTx{ - Database: db, - BaseTx: db.Transaction(), - } -} - -type baseTx struct { - *sql.Tx - committed atomic.Value -} - -func newBaseTx(tx *sql.Tx) BaseTx { - return &baseTx{Tx: tx} -} - -func (b *baseTx) Committed() bool { - committed := b.committed.Load() - return committed != nil -} - -func (b *baseTx) Commit() (err error) { - err = b.Tx.Commit() - if err != nil { - return err - } - b.committed.Store(struct{}{}) - return nil -} - -func (w *databaseTx) Commit() error { - defer w.Database.Close() // Automatic close on commit. - return w.BaseTx.Commit() -} - -func (w *databaseTx) Rollback() error { - defer w.Database.Close() // Automatic close on rollback. - return w.BaseTx.Rollback() -} - -// RunTx creates a transaction context and runs fn within it. -func RunTx(d sqlbuilder.Database, ctx context.Context, fn func(tx sqlbuilder.Tx) error) error { - tx, err := d.NewTx(ctx) - if err != nil { - return err - } - - defer tx.Close() - if err := fn(tx); err != nil { - _ = tx.Rollback() - return err - } - return tx.Commit() -} - -var ( - _ = BaseTx(&baseTx{}) - _ = DatabaseTx(&databaseTx{}) -) diff --git a/vendor/upper.io/db.v3/intersection.go b/vendor/upper.io/db.v3/intersection.go deleted file mode 100644 index 77274b3a9b4..00000000000 --- a/vendor/upper.io/db.v3/intersection.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Intersection represents a compound joined by AND. -type Intersection struct { - *compound -} - -// And adds more terms to the compound. -func (a *Intersection) And(andConds ...Compound) *Intersection { - var fn func(*[]Compound) error - if len(andConds) > 0 { - fn = func(in *[]Compound) error { - *in = append(*in, andConds...) - return nil - } - } - return &Intersection{a.compound.frame(fn)} -} - -// Empty returns false if this struct holds no conditions. -func (a *Intersection) Empty() bool { - return a.compound.Empty() -} - -// Operator returns the AND operator. -func (a *Intersection) Operator() CompoundOperator { - return OperatorAnd -} - -// And joins conditions under logical conjunction. Conditions can be -// represented by db.Cond{}, db.Or() or db.And(). -// -// Examples: -// -// // name = "Peter" AND last_name = "Parker" -// db.And( -// db.Cond{"name": "Peter"}, -// db.Cond{"last_name": "Parker "}, -// ) -// -// // (name = "Peter" OR name = "Mickey") AND last_name = "Mouse" -// db.And( -// db.Or( -// db.Cond{"name": "Peter"}, -// db.Cond{"name": "Mickey"}, -// ), -// db.Cond{"last_name": "Mouse"}, -// ) -func And(conds ...Compound) *Intersection { - return &Intersection{newCompound(conds...)} -} diff --git a/vendor/upper.io/db.v3/lib/reflectx/LICENSE b/vendor/upper.io/db.v3/lib/reflectx/LICENSE deleted file mode 100644 index 0d31edfa737..00000000000 --- a/vendor/upper.io/db.v3/lib/reflectx/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ - Copyright (c) 2013, Jason Moiron - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, - copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following - conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/upper.io/db.v3/lib/reflectx/README.md b/vendor/upper.io/db.v3/lib/reflectx/README.md deleted file mode 100644 index 76f1b5dfe9e..00000000000 --- a/vendor/upper.io/db.v3/lib/reflectx/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# reflectx - -The sqlx package has special reflect needs. In particular, it needs to: - -* be able to map a name to a field -* understand embedded structs -* understand mapping names to fields by a particular tag -* user specified name -> field mapping functions - -These behaviors mimic the behaviors by the standard library marshallers and also the -behavior of standard Go accessors. - -The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is -addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct -tags in the ways that are vital to most marshalers, and they are slow. - -This reflectx package extends reflect to achieve these goals. diff --git a/vendor/upper.io/db.v3/lib/reflectx/reflect.go b/vendor/upper.io/db.v3/lib/reflectx/reflect.go deleted file mode 100644 index 028163fe327..00000000000 --- a/vendor/upper.io/db.v3/lib/reflectx/reflect.go +++ /dev/null @@ -1,407 +0,0 @@ -// Package reflectx implements extensions to the standard reflect lib suitable -// for implementing marshaling and unmarshaling packages. The main Mapper type -// allows for Go-compatible named attribute access, including accessing embedded -// struct attributes and the ability to use functions and struct tags to -// customize field names. -// -package reflectx - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "sync" -) - -// A FieldInfo is a collection of metadata about a struct field. -type FieldInfo struct { - Index []int - Path string - Field reflect.StructField - Zero reflect.Value - Name string - Options map[string]string - Embedded bool - Children []*FieldInfo - Parent *FieldInfo -} - -// A StructMap is an index of field metadata for a struct. -type StructMap struct { - Tree *FieldInfo - Index []*FieldInfo - Paths map[string]*FieldInfo - Names map[string]*FieldInfo -} - -// GetByPath returns a *FieldInfo for a given string path. -func (f StructMap) GetByPath(path string) *FieldInfo { - return f.Paths[path] -} - -// GetByTraversal returns a *FieldInfo for a given integer path. It is -// analogous to reflect.FieldByIndex. -func (f StructMap) GetByTraversal(index []int) *FieldInfo { - if len(index) == 0 { - return nil - } - - tree := f.Tree - for _, i := range index { - if i >= len(tree.Children) || tree.Children[i] == nil { - return nil - } - tree = tree.Children[i] - } - return tree -} - -// Mapper is a general purpose mapper of names to struct fields. A Mapper -// behaves like most marshallers, optionally obeying a field tag for name -// mapping and a function to provide a basic mapping of fields to names. -type Mapper struct { - cache map[reflect.Type]*StructMap - tagName string - tagMapFunc func(string) string - mapFunc func(string) string - mutex sync.Mutex -} - -// NewMapper returns a new mapper which optionally obeys the field tag given -// by tagName. If tagName is the empty string, it is ignored. -func NewMapper(tagName string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - } -} - -// NewMapperTagFunc returns a new mapper which contains a mapper for field names -// AND a mapper for tag values. This is useful for tags like json which can -// have values like "name,omitempty". -func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - mapFunc: mapFunc, - tagMapFunc: tagMapFunc, - } -} - -// NewMapperFunc returns a new mapper which optionally obeys a field tag and -// a struct field name mapper func given by f. Tags will take precedence, but -// for any other field, the mapped name will be f(field.Name) -func NewMapperFunc(tagName string, f func(string) string) *Mapper { - return &Mapper{ - cache: make(map[reflect.Type]*StructMap), - tagName: tagName, - mapFunc: f, - } -} - -// TypeMap returns a mapping of field strings to int slices representing -// the traversal down the struct to reach the field. -func (m *Mapper) TypeMap(t reflect.Type) *StructMap { - m.mutex.Lock() - mapping, ok := m.cache[t] - if !ok { - mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) - m.cache[t] = mapping - } - m.mutex.Unlock() - return mapping -} - -// FieldMap returns the mapper's mapping of field names to reflect values. Panics -// if v's Kind is not Struct, or v is not Indirectable to a struct kind. -func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - r := map[string]reflect.Value{} - tm := m.TypeMap(v.Type()) - for tagName, fi := range tm.Names { - r[tagName] = FieldByIndexes(v, fi.Index) - } - return r -} - -// ValidFieldMap returns the mapper's mapping of field names to reflect valid -// field values. Panics if v's Kind is not Struct, or v is not Indirectable to -// a struct kind. -func (m *Mapper) ValidFieldMap(v reflect.Value) map[string]reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - r := map[string]reflect.Value{} - tm := m.TypeMap(v.Type()) - for tagName, fi := range tm.Names { - v := ValidFieldByIndexes(v, fi.Index) - if v.IsValid() { - r[tagName] = v - } - } - return r -} - -// FieldByName returns a field by the its mapped name as a reflect.Value. -// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. -// Returns zero Value if the name is not found. -func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - tm := m.TypeMap(v.Type()) - fi, ok := tm.Names[name] - if !ok { - return v - } - return FieldByIndexes(v, fi.Index) -} - -// FieldsByName returns a slice of values corresponding to the slice of names -// for the value. Panics if v's Kind is not Struct or v is not Indirectable -// to a struct Kind. Returns zero Value for each name not found. -func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { - v = reflect.Indirect(v) - mustBe(v, reflect.Struct) - - tm := m.TypeMap(v.Type()) - vals := make([]reflect.Value, 0, len(names)) - for _, name := range names { - fi, ok := tm.Names[name] - if !ok { - vals = append(vals, *new(reflect.Value)) - } else { - vals = append(vals, FieldByIndexes(v, fi.Index)) - } - } - return vals -} - -// TraversalsByName returns a slice of int slices which represent the struct -// traversals for each mapped name. Panics if t is not a struct or Indirectable -// to a struct. Returns empty int slice for each name not found. -func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { - t = Deref(t) - mustBe(t, reflect.Struct) - tm := m.TypeMap(t) - - r := make([][]int, 0, len(names)) - for _, name := range names { - fi, ok := tm.Names[name] - if !ok { - r = append(r, []int{}) - } else { - r = append(r, fi.Index) - } - } - return r -} - -// FieldByIndexes returns a value for a particular struct traversal. -func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { - - for _, i := range indexes { - v = reflect.Indirect(v).Field(i) - // if this is a pointer, it's possible it is nil - if v.Kind() == reflect.Ptr && v.IsNil() { - alloc := reflect.New(Deref(v.Type())) - v.Set(alloc) - } - if v.Kind() == reflect.Map && v.IsNil() { - v.Set(reflect.MakeMap(v.Type())) - } - } - - return v -} - -// ValidFieldByIndexes returns a value for a particular struct traversal. -func ValidFieldByIndexes(v reflect.Value, indexes []int) reflect.Value { - - for _, i := range indexes { - v = reflect.Indirect(v) - if !v.IsValid() { - return reflect.Value{} - } - v = v.Field(i) - // if this is a pointer, it's possible it is nil - if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Map) && v.IsNil() { - return reflect.Value{} - } - } - - return v -} - -// FieldByIndexesReadOnly returns a value for a particular struct traversal, -// but is not concerned with allocating nil pointers because the value is -// going to be used for reading and not setting. -func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { - for _, i := range indexes { - v = reflect.Indirect(v).Field(i) - } - return v -} - -// Deref is Indirect for reflect.Types -func Deref(t reflect.Type) reflect.Type { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -// -- helpers & utilities -- - -type kinder interface { - Kind() reflect.Kind -} - -// mustBe checks a value against a kind, panicing with a reflect.ValueError -// if the kind isn't that which is required. -func mustBe(v kinder, expected reflect.Kind) { - k := v.Kind() - if k != expected { - panic(&reflect.ValueError{Method: methodName(), Kind: k}) - } -} - -// methodName is returns the caller of the function calling methodName -func methodName() string { - pc, _, _, _ := runtime.Caller(2) - f := runtime.FuncForPC(pc) - if f == nil { - return "unknown method" - } - return f.Name() -} - -type typeQueue struct { - t reflect.Type - fi *FieldInfo - pp string // Parent path -} - -// A copying append that creates a new slice each time. -func apnd(is []int, i int) []int { - x := make([]int, len(is)+1) - copy(x, is) - x[len(x)-1] = i - return x -} - -// getMapping returns a mapping for the t type, using the tagName, mapFunc and -// tagMapFunc to determine the canonical names of fields. -func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc func(string) string) *StructMap { - m := []*FieldInfo{} - - root := &FieldInfo{} - queue := []typeQueue{} - queue = append(queue, typeQueue{Deref(t), root, ""}) - - for len(queue) != 0 { - // pop the first item off of the queue - tq := queue[0] - queue = queue[1:] - nChildren := 0 - if tq.t.Kind() == reflect.Struct { - nChildren = tq.t.NumField() - } - tq.fi.Children = make([]*FieldInfo, nChildren) - - // iterate through all of its fields - for fieldPos := 0; fieldPos < nChildren; fieldPos++ { - f := tq.t.Field(fieldPos) - - fi := FieldInfo{} - fi.Field = f - fi.Zero = reflect.New(f.Type).Elem() - fi.Options = map[string]string{} - - var tag, name string - if tagName != "" && strings.Contains(string(f.Tag), tagName+":") { - tag = f.Tag.Get(tagName) - name = tag - } else { - if mapFunc != nil { - name = mapFunc(f.Name) - } - } - - parts := strings.Split(name, ",") - if len(parts) > 1 { - name = parts[0] - for _, opt := range parts[1:] { - kv := strings.Split(opt, "=") - if len(kv) > 1 { - fi.Options[kv[0]] = kv[1] - } else { - fi.Options[kv[0]] = "" - } - } - } - - if tagMapFunc != nil { - tag = tagMapFunc(tag) - } - - fi.Name = name - - if tq.pp == "" || (tq.pp == "" && tag == "") { - fi.Path = fi.Name - } else { - fi.Path = fmt.Sprintf("%s.%s", tq.pp, fi.Name) - } - - // if the name is "-", disabled via a tag, skip it - if name == "-" { - continue - } - - // skip unexported fields - if len(f.PkgPath) != 0 && !f.Anonymous { - continue - } - - // bfs search of anonymous embedded structs - if f.Anonymous { - pp := tq.pp - if tag != "" { - pp = fi.Path - } - - fi.Embedded = true - fi.Index = apnd(tq.fi.Index, fieldPos) - nChildren := 0 - ft := Deref(f.Type) - if ft.Kind() == reflect.Struct { - nChildren = ft.NumField() - } - fi.Children = make([]*FieldInfo, nChildren) - queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) - } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { - fi.Index = apnd(tq.fi.Index, fieldPos) - fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) - queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) - } - - fi.Index = apnd(tq.fi.Index, fieldPos) - fi.Parent = tq.fi - tq.fi.Children[fieldPos] = &fi - m = append(m, &fi) - } - } - - flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} - for _, fi := range flds.Index { - flds.Paths[fi.Path] = fi - if fi.Name != "" && !fi.Embedded { - flds.Names[fi.Path] = fi - } - } - - return flds -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/batch.go b/vendor/upper.io/db.v3/lib/sqlbuilder/batch.go deleted file mode 100644 index a8a8b144df5..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/batch.go +++ /dev/null @@ -1,82 +0,0 @@ -package sqlbuilder - -// BatchInserter provides a helper that can be used to do massive insertions in -// batches. -type BatchInserter struct { - inserter *inserter - size int - values chan []interface{} - err error -} - -func newBatchInserter(inserter *inserter, size int) *BatchInserter { - if size < 1 { - size = 1 - } - b := &BatchInserter{ - inserter: inserter, - size: size, - values: make(chan []interface{}, size), - } - return b -} - -// Values pushes column values to be inserted as part of the batch. -func (b *BatchInserter) Values(values ...interface{}) *BatchInserter { - b.values <- values - return b -} - -func (b *BatchInserter) nextQuery() *inserter { - ins := &inserter{} - *ins = *b.inserter - i := 0 - for values := range b.values { - i++ - ins = ins.Values(values...).(*inserter) - if i == b.size { - break - } - } - if i == 0 { - return nil - } - return ins -} - -// NextResult is useful when using PostgreSQL and Returning(), it dumps the -// next slice of results to dst, which can mean having the IDs of all inserted -// elements in the batch. -func (b *BatchInserter) NextResult(dst interface{}) bool { - clone := b.nextQuery() - if clone == nil { - return false - } - b.err = clone.Iterator().All(dst) - return (b.err == nil) -} - -// Done means that no more elements are going to be added. -func (b *BatchInserter) Done() { - close(b.values) -} - -// Wait blocks until the whole batch is executed. -func (b *BatchInserter) Wait() error { - for { - q := b.nextQuery() - if q == nil { - break - } - if _, err := q.Exec(); err != nil { - b.err = err - break - } - } - return b.Err() -} - -// Err returns any error while executing the batch. -func (b *BatchInserter) Err() error { - return b.err -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/builder.go b/vendor/upper.io/db.v3/lib/sqlbuilder/builder.go deleted file mode 100644 index 91ab9a5ba69..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/builder.go +++ /dev/null @@ -1,595 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package sqlbuilder provides tools for building custom SQL queries. -package sqlbuilder - -import ( - "context" - "database/sql" - "errors" - "fmt" - "log" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter/compat" - "upper.io/db.v3/internal/sqladapter/exql" - "upper.io/db.v3/lib/reflectx" -) - -// MapOptions represents options for the mapper. -type MapOptions struct { - IncludeZeroed bool - IncludeNil bool -} - -var defaultMapOptions = MapOptions{ - IncludeZeroed: false, - IncludeNil: false, -} - -type compilable interface { - Compile() (string, error) - Arguments() []interface{} -} - -type hasIsZero interface { - IsZero() bool -} - -type iterator struct { - sess exprDB - cursor *sql.Rows // This is the main query cursor. It starts as a nil value. - err error -} - -type fieldValue struct { - fields []string - values []interface{} -} - -var ( - reInvisibleChars = regexp.MustCompile(`[\s\r\n\t]+`) -) - -var ( - sqlPlaceholder = exql.RawValue(`?`) -) - -var ( - errDeprecatedJSONBTag = errors.New(`Tag "jsonb" is deprecated. See "PostgreSQL: jsonb tag" at https://github.com/upper/db/releases/tag/v3.4.0`) -) - -type exprDB interface { - StatementExec(ctx context.Context, stmt *exql.Statement, args ...interface{}) (sql.Result, error) - StatementPrepare(ctx context.Context, stmt *exql.Statement) (*sql.Stmt, error) - StatementQuery(ctx context.Context, stmt *exql.Statement, args ...interface{}) (*sql.Rows, error) - StatementQueryRow(ctx context.Context, stmt *exql.Statement, args ...interface{}) (*sql.Row, error) - - Context() context.Context -} - -type sqlBuilder struct { - sess exprDB - t *templateWithUtils -} - -// WithSession returns a query builder that is bound to the given database session. -func WithSession(sess interface{}, t *exql.Template) SQLBuilder { - if sqlDB, ok := sess.(*sql.DB); ok { - sess = sqlDB - } - return &sqlBuilder{ - sess: sess.(exprDB), // Let it panic, it will show the developer an informative error. - t: newTemplateWithUtils(t), - } -} - -// WithTemplate returns a builder that is based on the given template. -func WithTemplate(t *exql.Template) SQLBuilder { - return &sqlBuilder{ - t: newTemplateWithUtils(t), - } -} - -// NewIterator creates an iterator using the given *sql.Rows. -func NewIterator(rows *sql.Rows) Iterator { - return &iterator{nil, rows, nil} -} - -func (b *sqlBuilder) Iterator(query interface{}, args ...interface{}) Iterator { - return b.IteratorContext(b.sess.Context(), query, args...) -} - -func (b *sqlBuilder) IteratorContext(ctx context.Context, query interface{}, args ...interface{}) Iterator { - rows, err := b.QueryContext(ctx, query, args...) - return &iterator{b.sess, rows, err} -} - -func (b *sqlBuilder) Prepare(query interface{}) (*sql.Stmt, error) { - return b.PrepareContext(b.sess.Context(), query) -} - -func (b *sqlBuilder) PrepareContext(ctx context.Context, query interface{}) (*sql.Stmt, error) { - switch q := query.(type) { - case *exql.Statement: - return b.sess.StatementPrepare(ctx, q) - case string: - return b.sess.StatementPrepare(ctx, exql.RawSQL(q)) - case db.RawValue: - return b.PrepareContext(ctx, q.Raw()) - default: - return nil, fmt.Errorf("unsupported query type %T", query) - } -} - -func (b *sqlBuilder) Exec(query interface{}, args ...interface{}) (sql.Result, error) { - return b.ExecContext(b.sess.Context(), query, args...) -} - -func (b *sqlBuilder) ExecContext(ctx context.Context, query interface{}, args ...interface{}) (sql.Result, error) { - switch q := query.(type) { - case *exql.Statement: - return b.sess.StatementExec(ctx, q, args...) - case string: - return b.sess.StatementExec(ctx, exql.RawSQL(q), args...) - case db.RawValue: - return b.ExecContext(ctx, q.Raw(), q.Arguments()...) - default: - return nil, fmt.Errorf("unsupported query type %T", query) - } -} - -func (b *sqlBuilder) Query(query interface{}, args ...interface{}) (*sql.Rows, error) { - return b.QueryContext(b.sess.Context(), query, args...) -} - -func (b *sqlBuilder) QueryContext(ctx context.Context, query interface{}, args ...interface{}) (*sql.Rows, error) { - switch q := query.(type) { - case *exql.Statement: - return b.sess.StatementQuery(ctx, q, args...) - case string: - return b.sess.StatementQuery(ctx, exql.RawSQL(q), args...) - case db.RawValue: - return b.QueryContext(ctx, q.Raw(), q.Arguments()...) - default: - return nil, fmt.Errorf("unsupported query type %T", query) - } -} - -func (b *sqlBuilder) QueryRow(query interface{}, args ...interface{}) (*sql.Row, error) { - return b.QueryRowContext(b.sess.Context(), query, args...) -} - -func (b *sqlBuilder) QueryRowContext(ctx context.Context, query interface{}, args ...interface{}) (*sql.Row, error) { - switch q := query.(type) { - case *exql.Statement: - return b.sess.StatementQueryRow(ctx, q, args...) - case string: - return b.sess.StatementQueryRow(ctx, exql.RawSQL(q), args...) - case db.RawValue: - return b.QueryRowContext(ctx, q.Raw(), q.Arguments()...) - default: - return nil, fmt.Errorf("unsupported query type %T", query) - } -} - -func (b *sqlBuilder) SelectFrom(table ...interface{}) Selector { - qs := &selector{ - builder: b, - } - return qs.From(table...) -} - -func (b *sqlBuilder) Select(columns ...interface{}) Selector { - qs := &selector{ - builder: b, - } - return qs.Columns(columns...) -} - -func (b *sqlBuilder) InsertInto(table string) Inserter { - qi := &inserter{ - builder: b, - } - return qi.Into(table) -} - -func (b *sqlBuilder) DeleteFrom(table string) Deleter { - qd := &deleter{ - builder: b, - } - return qd.setTable(table) -} - -func (b *sqlBuilder) Update(table string) Updater { - qu := &updater{ - builder: b, - } - return qu.setTable(table) -} - -// Map receives a pointer to map or struct and maps it to columns and values. -func Map(item interface{}, options *MapOptions) ([]string, []interface{}, error) { - var fv fieldValue - if options == nil { - options = &defaultMapOptions - } - - itemV := reflect.ValueOf(item) - if !itemV.IsValid() { - return nil, nil, nil - } - - itemT := itemV.Type() - - if itemT.Kind() == reflect.Ptr { - // Single dereference. Just in case the user passes a pointer to struct - // instead of a struct. - item = itemV.Elem().Interface() - itemV = reflect.ValueOf(item) - itemT = itemV.Type() - } - - switch itemT.Kind() { - case reflect.Struct: - fieldMap := mapper.TypeMap(itemT).Names - nfields := len(fieldMap) - - fv.values = make([]interface{}, 0, nfields) - fv.fields = make([]string, 0, nfields) - - for _, fi := range fieldMap { - - // Check for deprecated JSONB tag - if _, hasJSONBTag := fi.Options["jsonb"]; hasJSONBTag { - return nil, nil, errDeprecatedJSONBTag - } - - // Field options - _, tagOmitEmpty := fi.Options["omitempty"] - - fld := reflectx.FieldByIndexesReadOnly(itemV, fi.Index) - if fld.Kind() == reflect.Ptr && fld.IsNil() { - if tagOmitEmpty && !options.IncludeNil { - continue - } - fv.fields = append(fv.fields, fi.Name) - if tagOmitEmpty { - fv.values = append(fv.values, sqlDefault) - } else { - fv.values = append(fv.values, nil) - } - continue - } - - value := fld.Interface() - - isZero := false - if t, ok := fld.Interface().(hasIsZero); ok { - if t.IsZero() { - isZero = true - } - } else if fld.Kind() == reflect.Array || fld.Kind() == reflect.Slice { - if fld.Len() == 0 { - isZero = true - } - } else if reflect.DeepEqual(fi.Zero.Interface(), value) { - isZero = true - } - - if isZero && tagOmitEmpty && !options.IncludeZeroed { - continue - } - - fv.fields = append(fv.fields, fi.Name) - v, err := marshal(value) - if err != nil { - return nil, nil, err - } - if isZero && tagOmitEmpty { - v = sqlDefault - } - fv.values = append(fv.values, v) - } - - case reflect.Map: - nfields := itemV.Len() - fv.values = make([]interface{}, nfields) - fv.fields = make([]string, nfields) - mkeys := itemV.MapKeys() - - for i, keyV := range mkeys { - valv := itemV.MapIndex(keyV) - fv.fields[i] = fmt.Sprintf("%v", keyV.Interface()) - - v, err := marshal(valv.Interface()) - if err != nil { - return nil, nil, err - } - - fv.values[i] = v - } - default: - return nil, nil, ErrExpectingPointerToEitherMapOrStruct - } - - sort.Sort(&fv) - - return fv.fields, fv.values, nil -} - -func columnFragments(columns []interface{}) ([]exql.Fragment, []interface{}, error) { - l := len(columns) - f := make([]exql.Fragment, l) - args := []interface{}{} - - for i := 0; i < l; i++ { - switch v := columns[i].(type) { - case compilable: - c, err := v.Compile() - if err != nil { - return nil, nil, err - } - q, a := Preprocess(c, v.Arguments()) - if _, ok := v.(Selector); ok { - q = "(" + q + ")" - } - f[i] = exql.RawValue(q) - args = append(args, a...) - case db.Function: - fnName, fnArgs := v.Name(), v.Arguments() - if len(fnArgs) == 0 { - fnName = fnName + "()" - } else { - fnName = fnName + "(?" + strings.Repeat("?, ", len(fnArgs)-1) + ")" - } - fnName, fnArgs = Preprocess(fnName, fnArgs) - f[i] = exql.RawValue(fnName) - args = append(args, fnArgs...) - case db.RawValue: - q, a := Preprocess(v.Raw(), v.Arguments()) - f[i] = exql.RawValue(q) - args = append(args, a...) - case exql.Fragment: - f[i] = v - case string: - f[i] = exql.ColumnWithName(v) - case int: - f[i] = exql.RawValue(fmt.Sprintf("%v", v)) - case interface{}: - f[i] = exql.ColumnWithName(fmt.Sprintf("%v", v)) - default: - return nil, nil, fmt.Errorf("unexpected argument type %T for Select() argument", v) - } - } - return f, args, nil -} - -func prepareQueryForDisplay(in string) (out string) { - j := 1 - for i := range in { - if in[i] == '?' { - out = out + "$" + strconv.Itoa(j) - j++ - } else { - out = out + string(in[i]) - } - } - - out = reInvisibleChars.ReplaceAllString(out, ` `) - return strings.TrimSpace(out) -} - -func (iter *iterator) NextScan(dst ...interface{}) error { - if ok := iter.Next(); ok { - return iter.Scan(dst...) - } - if err := iter.Err(); err != nil { - return err - } - return db.ErrNoMoreRows -} - -func (iter *iterator) ScanOne(dst ...interface{}) error { - defer iter.Close() - return iter.NextScan(dst...) -} - -func (iter *iterator) Scan(dst ...interface{}) error { - if err := iter.Err(); err != nil { - return err - } - return iter.cursor.Scan(dst...) -} - -func (iter *iterator) setErr(err error) error { - iter.err = err - return iter.err -} - -func (iter *iterator) One(dst interface{}) error { - if err := iter.Err(); err != nil { - return err - } - defer iter.Close() - return iter.setErr(iter.next(dst)) -} - -func (iter *iterator) All(dst interface{}) error { - if err := iter.Err(); err != nil { - return err - } - defer iter.Close() - - // Fetching all results within the cursor. - if err := fetchRows(iter, dst); err != nil { - return iter.setErr(err) - } - - return nil -} - -func (iter *iterator) Err() (err error) { - return iter.err -} - -func (iter *iterator) Next(dst ...interface{}) bool { - if err := iter.Err(); err != nil { - return false - } - - if err := iter.next(dst...); err != nil { - // ignore db.ErrNoMoreRows, just break. - if err != db.ErrNoMoreRows { - _ = iter.setErr(err) - } - return false - } - - return true -} - -func (iter *iterator) next(dst ...interface{}) error { - if iter.cursor == nil { - return iter.setErr(db.ErrNoMoreRows) - } - - switch len(dst) { - case 0: - if ok := iter.cursor.Next(); !ok { - defer iter.Close() - err := iter.cursor.Err() - if err == nil { - err = db.ErrNoMoreRows - } - return err - } - return nil - case 1: - if err := fetchRow(iter, dst[0]); err != nil { - defer iter.Close() - return err - } - return nil - } - - return errors.New("Next does not currently supports more than one parameters") -} - -func (iter *iterator) Close() (err error) { - if iter.cursor != nil { - err = iter.cursor.Close() - iter.cursor = nil - } - return err -} - -func marshal(v interface{}) (interface{}, error) { - if m, isMarshaler := v.(db.Marshaler); isMarshaler { - var err error - if v, err = m.MarshalDB(); err != nil { - return nil, err - } - } - return v, nil -} - -func (fv *fieldValue) Len() int { - return len(fv.fields) -} - -func (fv *fieldValue) Swap(i, j int) { - fv.fields[i], fv.fields[j] = fv.fields[j], fv.fields[i] - fv.values[i], fv.values[j] = fv.values[j], fv.values[i] -} - -func (fv *fieldValue) Less(i, j int) bool { - return fv.fields[i] < fv.fields[j] -} - -type exprProxy struct { - db *sql.DB - t *exql.Template -} - -func (p *exprProxy) Context() context.Context { - log.Printf("Missing context") - return context.Background() -} - -func (p *exprProxy) StatementExec(ctx context.Context, stmt *exql.Statement, args ...interface{}) (sql.Result, error) { - s, err := stmt.Compile(p.t) - if err != nil { - return nil, err - } - return compat.ExecContext(p.db, ctx, s, args) -} - -func (p *exprProxy) StatementPrepare(ctx context.Context, stmt *exql.Statement) (*sql.Stmt, error) { - s, err := stmt.Compile(p.t) - if err != nil { - return nil, err - } - return compat.PrepareContext(p.db, ctx, s) -} - -func (p *exprProxy) StatementQuery(ctx context.Context, stmt *exql.Statement, args ...interface{}) (*sql.Rows, error) { - s, err := stmt.Compile(p.t) - if err != nil { - return nil, err - } - return compat.QueryContext(p.db, ctx, s, args) -} - -func (p *exprProxy) StatementQueryRow(ctx context.Context, stmt *exql.Statement, args ...interface{}) (*sql.Row, error) { - s, err := stmt.Compile(p.t) - if err != nil { - return nil, err - } - return compat.QueryRowContext(p.db, ctx, s, args), nil -} - -var ( - _ = SQLBuilder(&sqlBuilder{}) - _ = exprDB(&exprProxy{}) -) - -func joinArguments(args ...[]interface{}) []interface{} { - total := 0 - for i := range args { - total += len(args[i]) - } - if total == 0 { - return nil - } - - flatten := make([]interface{}, 0, total) - for i := range args { - flatten = append(flatten, args[i]...) - } - return flatten -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/comparison.go b/vendor/upper.io/db.v3/lib/sqlbuilder/comparison.go deleted file mode 100644 index 992b248f847..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/comparison.go +++ /dev/null @@ -1,127 +0,0 @@ -package sqlbuilder - -import ( - "fmt" - "strings" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter/exql" -) - -var comparisonOperators = map[db.ComparisonOperator]string{ - db.ComparisonOperatorEqual: "=", - db.ComparisonOperatorNotEqual: "!=", - - db.ComparisonOperatorLessThan: "<", - db.ComparisonOperatorGreaterThan: ">", - - db.ComparisonOperatorLessThanOrEqualTo: "<=", - db.ComparisonOperatorGreaterThanOrEqualTo: ">=", - - db.ComparisonOperatorBetween: "BETWEEN", - db.ComparisonOperatorNotBetween: "NOT BETWEEN", - - db.ComparisonOperatorIn: "IN", - db.ComparisonOperatorNotIn: "NOT IN", - - db.ComparisonOperatorIs: "IS", - db.ComparisonOperatorIsNot: "IS NOT", - - db.ComparisonOperatorLike: "LIKE", - db.ComparisonOperatorNotLike: "NOT LIKE", - - db.ComparisonOperatorRegExp: "REGEXP", - db.ComparisonOperatorNotRegExp: "NOT REGEXP", -} - -type hasCustomOperator interface { - CustomOperator() string -} - -type operatorWrapper struct { - tu *templateWithUtils - cv *exql.ColumnValue - - op db.Comparison - v interface{} -} - -func (ow *operatorWrapper) cmp() db.Comparison { - if ow.op != nil { - return ow.op - } - - if ow.cv.Operator != "" { - return db.Op(ow.cv.Operator, ow.v) - } - - if ow.v == nil { - return db.Is(nil) - } - - args, isSlice := toInterfaceArguments(ow.v) - if isSlice { - return db.In(args) - } - - return db.Eq(ow.v) -} - -func (ow *operatorWrapper) preprocess() (string, []interface{}) { - placeholder := "?" - - column, err := ow.cv.Column.Compile(ow.tu.Template) - if err != nil { - panic(fmt.Sprintf("could not compile column: %v", err.Error())) - } - - c := ow.cmp() - - op := ow.tu.comparisonOperatorMapper(c.Operator()) - - var args []interface{} - - switch c.Operator() { - case db.ComparisonOperatorNone: - if c, ok := c.(hasCustomOperator); ok { - op = c.CustomOperator() - } else { - panic("no operator given") - } - case db.ComparisonOperatorIn, db.ComparisonOperatorNotIn: - values := c.Value().([]interface{}) - if len(values) < 1 { - placeholder, args = "(NULL)", []interface{}{} - break - } - placeholder, args = "(?"+strings.Repeat(", ?", len(values)-1)+")", values - case db.ComparisonOperatorIs, db.ComparisonOperatorIsNot: - switch c.Value() { - case nil: - placeholder, args = "NULL", []interface{}{} - case false: - placeholder, args = "FALSE", []interface{}{} - case true: - placeholder, args = "TRUE", []interface{}{} - } - case db.ComparisonOperatorBetween, db.ComparisonOperatorNotBetween: - values := c.Value().([]interface{}) - placeholder, args = "? AND ?", []interface{}{values[0], values[1]} - case db.ComparisonOperatorEqual: - v := c.Value() - if b, ok := v.([]byte); ok { - v = string(b) - } - args = []interface{}{v} - } - - if args == nil { - args = []interface{}{c.Value()} - } - - if strings.Contains(op, ":column") { - return strings.Replace(op, ":column", column, -1), args - } - - return column + " " + op + " " + placeholder, args -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/convert.go b/vendor/upper.io/db.v3/lib/sqlbuilder/convert.go deleted file mode 100644 index d0688d738f3..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/convert.go +++ /dev/null @@ -1,144 +0,0 @@ -package sqlbuilder - -import ( - "database/sql/driver" - "reflect" - "strings" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter/exql" -) - -var ( - sqlDefault = exql.RawValue(`DEFAULT`) -) - -func expandQuery(in string, args []interface{}, fn func(interface{}) (string, []interface{})) (string, []interface{}) { - argn := 0 - argx := make([]interface{}, 0, len(args)) - for i := 0; i < len(in); i++ { - if in[i] != '?' { - continue - } - if len(args) > argn { - k, values := fn(args[argn]) - k, values = expandQuery(k, values, fn) - - if k != "" { - in = in[:i] + k + in[i+1:] - i += len(k) - 1 - } - if len(values) > 0 { - argx = append(argx, values...) - } - argn++ - } - } - if len(argx) < len(args) { - argx = append(argx, args[argn:]...) - } - return in, argx -} - -// toInterfaceArguments converts the given value into an array of interfaces. -func toInterfaceArguments(value interface{}) (args []interface{}, isSlice bool) { - v := reflect.ValueOf(value) - - if value == nil { - return nil, false - } - - switch t := value.(type) { - case driver.Valuer: - return []interface{}{t}, false - } - - if v.Type().Kind() == reflect.Slice { - var i, total int - - // Byte slice gets transformed into a string. - if v.Type().Elem().Kind() == reflect.Uint8 { - return []interface{}{string(value.([]byte))}, false - } - - total = v.Len() - args = make([]interface{}, total) - for i = 0; i < total; i++ { - args[i] = v.Index(i).Interface() - } - return args, true - } - - return []interface{}{value}, false -} - -// toColumnsValuesAndArguments maps the given columnNames and columnValues into -// expr's Columns and Values, it also extracts and returns query arguments. -func toColumnsValuesAndArguments(columnNames []string, columnValues []interface{}) (*exql.Columns, *exql.Values, []interface{}, error) { - var arguments []interface{} - - columns := new(exql.Columns) - - columns.Columns = make([]exql.Fragment, 0, len(columnNames)) - for i := range columnNames { - columns.Columns = append(columns.Columns, exql.ColumnWithName(columnNames[i])) - } - - values := new(exql.Values) - - arguments = make([]interface{}, 0, len(columnValues)) - values.Values = make([]exql.Fragment, 0, len(columnValues)) - - for i := range columnValues { - switch v := columnValues[i].(type) { - case *exql.Raw, exql.Raw: - values.Values = append(values.Values, sqlDefault) - case *exql.Value: - // Adding value. - values.Values = append(values.Values, v) - case exql.Value: - // Adding value. - values.Values = append(values.Values, &v) - default: - // Adding both value and placeholder. - values.Values = append(values.Values, sqlPlaceholder) - arguments = append(arguments, v) - } - } - - return columns, values, arguments, nil -} - -func preprocessFn(arg interface{}) (string, []interface{}) { - values, isSlice := toInterfaceArguments(arg) - - if isSlice { - if len(values) == 0 { - return `(NULL)`, nil - } - return `(?` + strings.Repeat(`, ?`, len(values)-1) + `)`, values - } - - if len(values) == 1 { - switch t := arg.(type) { - case db.RawValue: - return Preprocess(t.Raw(), t.Arguments()) - case compilable: - c, err := t.Compile() - if err == nil { - return `(` + c + `)`, t.Arguments() - } - panic(err.Error()) - } - } else if len(values) == 0 { - return `NULL`, nil - } - - return "", []interface{}{arg} -} - -// Preprocess expands arguments that needs to be expanded and compiles a query -// into a single string. -func Preprocess(in string, args []interface{}) (string, []interface{}) { - return expandQuery(in, args, preprocessFn) -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/custom_types.go b/vendor/upper.io/db.v3/lib/sqlbuilder/custom_types.go deleted file mode 100644 index bf4b0b0a7e6..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/custom_types.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package sqlbuilder - -import ( - "database/sql" - "database/sql/driver" - - "reflect" -) - -var ( - // ValuerType is the reflection type for the driver.Valuer interface. - ValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() - - // ScannerType is the reflection type for the sql.Scanner interface. - ScannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() - - // ValueWrapperType is the reflection type for the sql.ValueWrapper interface. - ValueWrapperType = reflect.TypeOf((*ValueWrapper)(nil)).Elem() -) - -// ValueWrapper defines a method WrapValue that query arguments can use to wrap -// themselves around helper types right before being used in a query. -// -// Example: -// -// func (a MyCustomArray) WrapValue(value interface{}) interface{} { -// // postgresql.Array adds a driver.Valuer and sql.Scanner around -// // custom arrays. -// return postgresql.Array(values) -// } -type ValueWrapper interface { - WrapValue(value interface{}) interface{} -} - -// ScannerValuer represents a value that satisfies both driver.Valuer and -// sql.Scanner interfaces. -type ScannerValuer interface { - driver.Valuer - sql.Scanner -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/delete.go b/vendor/upper.io/db.v3/lib/sqlbuilder/delete.go deleted file mode 100644 index 39441852ca9..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/delete.go +++ /dev/null @@ -1,194 +0,0 @@ -package sqlbuilder - -import ( - "context" - "database/sql" - - "upper.io/db.v3/internal/immutable" - "upper.io/db.v3/internal/sqladapter/exql" -) - -type deleterQuery struct { - table string - limit int - - where *exql.Where - whereArgs []interface{} - - amendFn func(string) string -} - -func (dq *deleterQuery) and(b *sqlBuilder, terms ...interface{}) error { - where, whereArgs := b.t.toWhereWithArguments(terms) - - if dq.where == nil { - dq.where, dq.whereArgs = &exql.Where{}, []interface{}{} - } - dq.where.Append(&where) - dq.whereArgs = append(dq.whereArgs, whereArgs...) - - return nil -} - -func (dq *deleterQuery) statement() *exql.Statement { - stmt := &exql.Statement{ - Type: exql.Delete, - Table: exql.TableWithName(dq.table), - } - - if dq.where != nil { - stmt.Where = dq.where - } - - if dq.limit != 0 { - stmt.Limit = exql.Limit(dq.limit) - } - - stmt.SetAmendment(dq.amendFn) - - return stmt -} - -type deleter struct { - builder *sqlBuilder - - fn func(*deleterQuery) error - prev *deleter -} - -var _ = immutable.Immutable(&deleter{}) - -func (del *deleter) SQLBuilder() *sqlBuilder { - if del.prev == nil { - return del.builder - } - return del.prev.SQLBuilder() -} - -func (del *deleter) template() *exql.Template { - return del.SQLBuilder().t.Template -} - -func (del *deleter) String() string { - s, err := del.Compile() - if err != nil { - panic(err.Error()) - } - return prepareQueryForDisplay(s) -} - -func (del *deleter) setTable(table string) *deleter { - return del.frame(func(uq *deleterQuery) error { - uq.table = table - return nil - }) -} - -func (del *deleter) frame(fn func(*deleterQuery) error) *deleter { - return &deleter{prev: del, fn: fn} -} - -func (del *deleter) Where(terms ...interface{}) Deleter { - return del.frame(func(dq *deleterQuery) error { - dq.where, dq.whereArgs = &exql.Where{}, []interface{}{} - return dq.and(del.SQLBuilder(), terms...) - }) -} - -func (del *deleter) And(terms ...interface{}) Deleter { - return del.frame(func(dq *deleterQuery) error { - return dq.and(del.SQLBuilder(), terms...) - }) -} - -func (del *deleter) Limit(limit int) Deleter { - return del.frame(func(dq *deleterQuery) error { - dq.limit = limit - return nil - }) -} - -func (del *deleter) Amend(fn func(string) string) Deleter { - return del.frame(func(dq *deleterQuery) error { - dq.amendFn = fn - return nil - }) -} - -func (dq *deleterQuery) arguments() []interface{} { - return joinArguments(dq.whereArgs) -} - -func (del *deleter) Arguments() []interface{} { - dq, err := del.build() - if err != nil { - return nil - } - return dq.arguments() -} - -func (del *deleter) Prepare() (*sql.Stmt, error) { - return del.PrepareContext(del.SQLBuilder().sess.Context()) -} - -func (del *deleter) PrepareContext(ctx context.Context) (*sql.Stmt, error) { - dq, err := del.build() - if err != nil { - return nil, err - } - return del.SQLBuilder().sess.StatementPrepare(ctx, dq.statement()) -} - -func (del *deleter) Exec() (sql.Result, error) { - return del.ExecContext(del.SQLBuilder().sess.Context()) -} - -func (del *deleter) ExecContext(ctx context.Context) (sql.Result, error) { - dq, err := del.build() - if err != nil { - return nil, err - } - return del.SQLBuilder().sess.StatementExec(ctx, dq.statement(), dq.arguments()...) -} - -func (del *deleter) statement() (*exql.Statement, error) { - iq, err := del.build() - if err != nil { - return nil, err - } - return iq.statement(), nil -} - -func (del *deleter) build() (*deleterQuery, error) { - dq, err := immutable.FastForward(del) - if err != nil { - return nil, err - } - return dq.(*deleterQuery), nil -} - -func (del *deleter) Compile() (string, error) { - s, err := del.statement() - if err != nil { - return "", err - } - return s.Compile(del.template()) -} - -func (del *deleter) Prev() immutable.Immutable { - if del == nil { - return nil - } - return del.prev -} - -func (del *deleter) Fn(in interface{}) error { - if del.fn == nil { - return nil - } - return del.fn(in.(*deleterQuery)) -} - -func (del *deleter) Base() interface{} { - return &deleterQuery{} -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/errors.go b/vendor/upper.io/db.v3/lib/sqlbuilder/errors.go deleted file mode 100644 index 5c5a723df8b..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package sqlbuilder - -import ( - "errors" -) - -// Common error messages. -var ( - ErrExpectingPointer = errors.New(`argument must be an address`) - ErrExpectingSlicePointer = errors.New(`argument must be a slice address`) - ErrExpectingSliceMapStruct = errors.New(`argument must be a slice address of maps or structs`) - ErrExpectingMapOrStruct = errors.New(`argument must be either a map or a struct`) - ErrExpectingPointerToEitherMapOrStruct = errors.New(`expecting a pointer to either a map or a struct`) -) diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/fetch.go b/vendor/upper.io/db.v3/lib/sqlbuilder/fetch.go deleted file mode 100644 index 00e02192299..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/fetch.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package sqlbuilder - -import ( - "reflect" - - db "upper.io/db.v3" - "upper.io/db.v3/lib/reflectx" -) - -type hasConvertValues interface { - ConvertValues(values []interface{}) []interface{} -} - -var mapper = reflectx.NewMapper("db") - -// fetchRow receives a *sql.Rows value and tries to map all the rows into a -// single struct given by the pointer `dst`. -func fetchRow(iter *iterator, dst interface{}) error { - var columns []string - var err error - - rows := iter.cursor - - dstv := reflect.ValueOf(dst) - - if dstv.IsNil() || dstv.Kind() != reflect.Ptr { - return ErrExpectingPointer - } - - itemV := dstv.Elem() - - if columns, err = rows.Columns(); err != nil { - return err - } - - reset(dst) - - next := rows.Next() - - if !next { - if err = rows.Err(); err != nil { - return err - } - return db.ErrNoMoreRows - } - - itemT := itemV.Type() - item, err := fetchResult(iter, itemT, columns) - - if err != nil { - return err - } - - if itemT.Kind() == reflect.Ptr { - itemV.Set(item) - } else { - itemV.Set(reflect.Indirect(item)) - } - - return nil -} - -// fetchRows receives a *sql.Rows value and tries to map all the rows into a -// slice of structs given by the pointer `dst`. -func fetchRows(iter *iterator, dst interface{}) error { - var err error - rows := iter.cursor - defer rows.Close() - - // Destination. - dstv := reflect.ValueOf(dst) - - if dstv.IsNil() || dstv.Kind() != reflect.Ptr { - return ErrExpectingPointer - } - - if dstv.Elem().Kind() != reflect.Slice { - return ErrExpectingSlicePointer - } - - if dstv.Kind() != reflect.Ptr || dstv.Elem().Kind() != reflect.Slice || dstv.IsNil() { - return ErrExpectingSliceMapStruct - } - - var columns []string - if columns, err = rows.Columns(); err != nil { - return err - } - - slicev := dstv.Elem() - itemT := slicev.Type().Elem() - - reset(dst) - - for rows.Next() { - item, err := fetchResult(iter, itemT, columns) - if err != nil { - return err - } - if itemT.Kind() == reflect.Ptr { - slicev = reflect.Append(slicev, item) - } else { - slicev = reflect.Append(slicev, reflect.Indirect(item)) - } - } - - dstv.Elem().Set(slicev) - - return rows.Err() -} - -func fetchResult(iter *iterator, itemT reflect.Type, columns []string) (reflect.Value, error) { - var item reflect.Value - var err error - rows := iter.cursor - - objT := itemT - - switch objT.Kind() { - case reflect.Map: - item = reflect.MakeMap(objT) - case reflect.Struct: - item = reflect.New(objT) - case reflect.Ptr: - objT = itemT.Elem() - if objT.Kind() != reflect.Struct { - return item, ErrExpectingMapOrStruct - } - item = reflect.New(objT) - default: - return item, ErrExpectingMapOrStruct - } - - switch objT.Kind() { - case reflect.Struct: - - values := make([]interface{}, len(columns)) - typeMap := mapper.TypeMap(itemT) - fieldMap := typeMap.Names - - for i, k := range columns { - fi, ok := fieldMap[k] - if !ok { - values[i] = new(interface{}) - continue - } - - // Check for deprecated jsonb tag. - if _, hasJSONBTag := fi.Options["jsonb"]; hasJSONBTag { - return item, errDeprecatedJSONBTag - } - - f := reflectx.FieldByIndexes(item, fi.Index) - values[i] = f.Addr().Interface() - - if u, ok := values[i].(db.Unmarshaler); ok { - values[i] = scanner{u} - } - } - - if converter, ok := iter.sess.(hasConvertValues); ok { - values = converter.ConvertValues(values) - } - - if err = rows.Scan(values...); err != nil { - return item, err - } - case reflect.Map: - - columns, err := rows.Columns() - if err != nil { - return item, err - } - - values := make([]interface{}, len(columns)) - for i := range values { - if itemT.Elem().Kind() == reflect.Interface { - values[i] = new(interface{}) - } else { - values[i] = reflect.New(itemT.Elem()).Interface() - } - } - - if err = rows.Scan(values...); err != nil { - return item, err - } - - for i, column := range columns { - item.SetMapIndex(reflect.ValueOf(column), reflect.Indirect(reflect.ValueOf(values[i]))) - } - } - - return item, nil -} - -func reset(data interface{}) { - // Resetting element. - v := reflect.ValueOf(data).Elem() - t := v.Type() - - var z reflect.Value - - switch v.Kind() { - case reflect.Slice: - z = reflect.MakeSlice(t, 0, v.Cap()) - default: - z = reflect.Zero(t) - } - - v.Set(z) -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/insert.go b/vendor/upper.io/db.v3/lib/sqlbuilder/insert.go deleted file mode 100644 index 6ebcccbe135..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/insert.go +++ /dev/null @@ -1,283 +0,0 @@ -package sqlbuilder - -import ( - "context" - "database/sql" - - "upper.io/db.v3/internal/immutable" - "upper.io/db.v3/internal/sqladapter/exql" -) - -type inserterQuery struct { - table string - enqueuedValues [][]interface{} - returning []exql.Fragment - columns []exql.Fragment - values []*exql.Values - arguments []interface{} - amendFn func(string) string -} - -func (iq *inserterQuery) processValues() ([]*exql.Values, []interface{}, error) { - var values []*exql.Values - var arguments []interface{} - - var mapOptions *MapOptions - if len(iq.enqueuedValues) > 1 { - mapOptions = &MapOptions{IncludeZeroed: true, IncludeNil: true} - } - - for _, enqueuedValue := range iq.enqueuedValues { - if len(enqueuedValue) == 1 { - // If and only if we passed one argument to Values. - ff, vv, err := Map(enqueuedValue[0], mapOptions) - - if err == nil { - // If we didn't have any problem with mapping we can convert it into - // columns and values. - columns, vals, args, _ := toColumnsValuesAndArguments(ff, vv) - - values, arguments = append(values, vals), append(arguments, args...) - - if len(iq.columns) == 0 { - iq.columns = append(iq.columns, columns.Columns...) - } - continue - } - - // The only error we can expect without exiting is this argument not - // being a map or struct, in which case we can continue. - if err != ErrExpectingPointerToEitherMapOrStruct { - return nil, nil, err - } - } - - if len(iq.columns) == 0 || len(enqueuedValue) == len(iq.columns) { - arguments = append(arguments, enqueuedValue...) - - l := len(enqueuedValue) - placeholders := make([]exql.Fragment, l) - for i := 0; i < l; i++ { - placeholders[i] = exql.RawValue(`?`) - } - values = append(values, exql.NewValueGroup(placeholders...)) - } - } - - return values, arguments, nil -} - -func (iq *inserterQuery) statement() *exql.Statement { - stmt := &exql.Statement{ - Type: exql.Insert, - Table: exql.TableWithName(iq.table), - } - - if len(iq.values) > 0 { - stmt.Values = exql.JoinValueGroups(iq.values...) - } - - if len(iq.columns) > 0 { - stmt.Columns = exql.JoinColumns(iq.columns...) - } - - if len(iq.returning) > 0 { - stmt.Returning = exql.ReturningColumns(iq.returning...) - } - - stmt.SetAmendment(iq.amendFn) - - return stmt -} - -type inserter struct { - builder *sqlBuilder - - fn func(*inserterQuery) error - prev *inserter -} - -var _ = immutable.Immutable(&inserter{}) - -func (ins *inserter) SQLBuilder() *sqlBuilder { - if ins.prev == nil { - return ins.builder - } - return ins.prev.SQLBuilder() -} - -func (ins *inserter) template() *exql.Template { - return ins.SQLBuilder().t.Template -} - -func (ins *inserter) String() string { - s, err := ins.Compile() - if err != nil { - panic(err.Error()) - } - return prepareQueryForDisplay(s) -} - -func (ins *inserter) frame(fn func(*inserterQuery) error) *inserter { - return &inserter{prev: ins, fn: fn} -} - -func (ins *inserter) Batch(n int) *BatchInserter { - return newBatchInserter(ins, n) -} - -func (ins *inserter) Amend(fn func(string) string) Inserter { - return ins.frame(func(iq *inserterQuery) error { - iq.amendFn = fn - return nil - }) -} - -func (ins *inserter) Arguments() []interface{} { - iq, err := ins.build() - if err != nil { - return nil - } - return iq.arguments -} - -func (ins *inserter) Returning(columns ...string) Inserter { - return ins.frame(func(iq *inserterQuery) error { - columnsToFragments(&iq.returning, columns) - return nil - }) -} - -func (ins *inserter) Exec() (sql.Result, error) { - return ins.ExecContext(ins.SQLBuilder().sess.Context()) -} - -func (ins *inserter) ExecContext(ctx context.Context) (sql.Result, error) { - iq, err := ins.build() - if err != nil { - return nil, err - } - return ins.SQLBuilder().sess.StatementExec(ctx, iq.statement(), iq.arguments...) -} - -func (ins *inserter) Prepare() (*sql.Stmt, error) { - return ins.PrepareContext(ins.SQLBuilder().sess.Context()) -} - -func (ins *inserter) PrepareContext(ctx context.Context) (*sql.Stmt, error) { - iq, err := ins.build() - if err != nil { - return nil, err - } - return ins.SQLBuilder().sess.StatementPrepare(ctx, iq.statement()) -} - -func (ins *inserter) Query() (*sql.Rows, error) { - return ins.QueryContext(ins.SQLBuilder().sess.Context()) -} - -func (ins *inserter) QueryContext(ctx context.Context) (*sql.Rows, error) { - iq, err := ins.build() - if err != nil { - return nil, err - } - return ins.SQLBuilder().sess.StatementQuery(ctx, iq.statement(), iq.arguments...) -} - -func (ins *inserter) QueryRow() (*sql.Row, error) { - return ins.QueryRowContext(ins.SQLBuilder().sess.Context()) -} - -func (ins *inserter) QueryRowContext(ctx context.Context) (*sql.Row, error) { - iq, err := ins.build() - if err != nil { - return nil, err - } - return ins.SQLBuilder().sess.StatementQueryRow(ctx, iq.statement(), iq.arguments...) -} - -func (ins *inserter) Iterator() Iterator { - return ins.IteratorContext(ins.SQLBuilder().sess.Context()) -} - -func (ins *inserter) IteratorContext(ctx context.Context) Iterator { - rows, err := ins.QueryContext(ctx) - return &iterator{ins.SQLBuilder().sess, rows, err} -} - -func (ins *inserter) Into(table string) Inserter { - return ins.frame(func(iq *inserterQuery) error { - iq.table = table - return nil - }) -} - -func (ins *inserter) Columns(columns ...string) Inserter { - return ins.frame(func(iq *inserterQuery) error { - columnsToFragments(&iq.columns, columns) - return nil - }) -} - -func (ins *inserter) Values(values ...interface{}) Inserter { - return ins.frame(func(iq *inserterQuery) error { - iq.enqueuedValues = append(iq.enqueuedValues, values) - return nil - }) -} - -func (ins *inserter) statement() (*exql.Statement, error) { - iq, err := ins.build() - if err != nil { - return nil, err - } - return iq.statement(), nil -} - -func (ins *inserter) build() (*inserterQuery, error) { - iq, err := immutable.FastForward(ins) - if err != nil { - return nil, err - } - ret := iq.(*inserterQuery) - ret.values, ret.arguments, err = ret.processValues() - if err != nil { - return nil, err - } - return ret, nil -} - -func (ins *inserter) Compile() (string, error) { - s, err := ins.statement() - if err != nil { - return "", err - } - return s.Compile(ins.template()) -} - -func (ins *inserter) Prev() immutable.Immutable { - if ins == nil { - return nil - } - return ins.prev -} - -func (ins *inserter) Fn(in interface{}) error { - if ins.fn == nil { - return nil - } - return ins.fn(in.(*inserterQuery)) -} - -func (ins *inserter) Base() interface{} { - return &inserterQuery{} -} - -func columnsToFragments(dst *[]exql.Fragment, columns []string) { - l := len(columns) - f := make([]exql.Fragment, l) - for i := 0; i < l; i++ { - f[i] = exql.ColumnWithName(columns[i]) - } - *dst = append(*dst, f...) -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/interfaces.go b/vendor/upper.io/db.v3/lib/sqlbuilder/interfaces.go deleted file mode 100644 index 35e0ce5c066..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/interfaces.go +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright (c) 2015 The upper.io/db.v3/lib/sqlbuilder authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package sqlbuilder - -import ( - "context" - "database/sql" - "fmt" -) - -// SQLBuilder defines methods that can be used to build a SQL query with -// chainable method calls. -// -// Queries are immutable, so every call to any method will return a new -// pointer, if you want to build a query using variables you need to reassign -// them, like this: -// -// a = builder.Select("name").From("foo") // "a" is created -// -// a.Where(...) // No effect, the value returned from Where is ignored. -// -// a = a.Where(...) // "a" is reassigned and points to a different address. -// -type SQLBuilder interface { - - // Select initializes and returns a Selector, it accepts column names as - // parameters. - // - // The returned Selector does not initially point to any table, a call to - // From() is required after Select() to complete a valid query. - // - // Example: - // - // q := sqlbuilder.Select("first_name", "last_name").From("people").Where(...) - Select(columns ...interface{}) Selector - - // SelectFrom creates a Selector that selects all columns (like SELECT *) - // from the given table. - // - // Example: - // - // q := sqlbuilder.SelectFrom("people").Where(...) - SelectFrom(table ...interface{}) Selector - - // InsertInto prepares and returns an Inserter targeted at the given table. - // - // Example: - // - // q := sqlbuilder.InsertInto("books").Columns(...).Values(...) - InsertInto(table string) Inserter - - // DeleteFrom prepares a Deleter targeted at the given table. - // - // Example: - // - // q := sqlbuilder.DeleteFrom("tasks").Where(...) - DeleteFrom(table string) Deleter - - // Update prepares and returns an Updater targeted at the given table. - // - // Example: - // - // q := sqlbuilder.Update("profile").Set(...).Where(...) - Update(table string) Updater - - // Exec executes a SQL query that does not return any rows, like sql.Exec. - // Queries can be either strings or upper-db statements. - // - // Example: - // - // sqlbuilder.Exec(`INSERT INTO books (title) VALUES("La Ciudad y los Perros")`) - Exec(query interface{}, args ...interface{}) (sql.Result, error) - - // ExecContext executes a SQL query that does not return any rows, like sql.ExecContext. - // Queries can be either strings or upper-db statements. - // - // Example: - // - // sqlbuilder.ExecContext(ctx, `INSERT INTO books (title) VALUES(?)`, "La Ciudad y los Perros") - ExecContext(ctx context.Context, query interface{}, args ...interface{}) (sql.Result, error) - - // Prepare creates a prepared statement for later queries or executions. The - // caller must call the statement's Close method when the statement is no - // longer needed. - Prepare(query interface{}) (*sql.Stmt, error) - - // Prepare creates a prepared statement on the guiven context for later - // queries or executions. The caller must call the statement's Close method - // when the statement is no longer needed. - PrepareContext(ctx context.Context, query interface{}) (*sql.Stmt, error) - - // Query executes a SQL query that returns rows, like sql.Query. Queries can - // be either strings or upper-db statements. - // - // Example: - // - // sqlbuilder.Query(`SELECT * FROM people WHERE name = "Mateo"`) - Query(query interface{}, args ...interface{}) (*sql.Rows, error) - - // QueryContext executes a SQL query that returns rows, like - // sql.QueryContext. Queries can be either strings or upper-db statements. - // - // Example: - // - // sqlbuilder.QueryContext(ctx, `SELECT * FROM people WHERE name = ?`, "Mateo") - QueryContext(ctx context.Context, query interface{}, args ...interface{}) (*sql.Rows, error) - - // QueryRow executes a SQL query that returns one row, like sql.QueryRow. - // Queries can be either strings or upper-db statements. - // - // Example: - // - // sqlbuilder.QueryRow(`SELECT * FROM people WHERE name = "Haruki" AND last_name = "Murakami" LIMIT 1`) - QueryRow(query interface{}, args ...interface{}) (*sql.Row, error) - - // QueryRowContext executes a SQL query that returns one row, like - // sql.QueryRowContext. Queries can be either strings or upper-db statements. - // - // Example: - // - // sqlbuilder.QueryRowContext(ctx, `SELECT * FROM people WHERE name = "Haruki" AND last_name = "Murakami" LIMIT 1`) - QueryRowContext(ctx context.Context, query interface{}, args ...interface{}) (*sql.Row, error) - - // Iterator executes a SQL query that returns rows and creates an Iterator - // with it. - // - // Example: - // - // sqlbuilder.Iterator(`SELECT * FROM people WHERE name LIKE "M%"`) - Iterator(query interface{}, args ...interface{}) Iterator - - // IteratorContext executes a SQL query that returns rows and creates an Iterator - // with it. - // - // Example: - // - // sqlbuilder.IteratorContext(ctx, `SELECT * FROM people WHERE name LIKE "M%"`) - IteratorContext(ctx context.Context, query interface{}, args ...interface{}) Iterator -} - -// Selector represents a SELECT statement. -type Selector interface { - // Columns defines which columns to retrive. - // - // You should call From() after Columns() if you want to query data from an - // specific table. - // - // s.Columns("name", "last_name").From(...) - // - // It is also possible to use an alias for the column, this could be handy if - // you plan to use the alias later, use the "AS" keyword to denote an alias. - // - // s.Columns("name AS n") - // - // or the shortcut: - // - // s.Columns("name n") - // - // If you don't want the column to be escaped use the db.Raw - // function. - // - // s.Columns(db.Raw("MAX(id)")) - // - // The above statement is equivalent to: - // - // s.Columns(db.Func("MAX", "id")) - Columns(columns ...interface{}) Selector - - // From represents a FROM clause and is tipically used after Columns(). - // - // FROM defines from which table data is going to be retrieved - // - // s.Columns(...).From("people") - // - // It is also possible to use an alias for the table, this could be handy if - // you plan to use the alias later: - // - // s.Columns(...).From("people AS p").Where("p.name = ?", ...) - // - // Or with the shortcut: - // - // s.Columns(...).From("people p").Where("p.name = ?", ...) - From(tables ...interface{}) Selector - - // Distict represents a DISTINCT clause - // - // DISTINCT is used to ask the database to return only values that are - // different. - Distinct(columns ...interface{}) Selector - - // As defines an alias for a table. - As(string) Selector - - // Where specifies the conditions that columns must match in order to be - // retrieved. - // - // Where accepts raw strings and fmt.Stringer to define conditions and - // interface{} to specify parameters. Be careful not to embed any parameters - // within the SQL part as that could lead to security problems. You can use - // que question mark (?) as placeholder for parameters. - // - // s.Where("name = ?", "max") - // - // s.Where("name = ? AND last_name = ?", "Mary", "Doe") - // - // s.Where("last_name IS NULL") - // - // You can also use other types of parameters besides only strings, like: - // - // s.Where("online = ? AND last_logged <= ?", true, time.Now()) - // - // and Where() will transform them into strings before feeding them to the - // database. - // - // When an unknown type is provided, Where() will first try to match it with - // the Marshaler interface, then with fmt.Stringer and finally, if the - // argument does not satisfy any of those interfaces Where() will use - // fmt.Sprintf("%v", arg) to transform the type into a string. - // - // Subsequent calls to Where() will overwrite previously set conditions, if - // you want these new conditions to be appended use And() instead. - Where(conds ...interface{}) Selector - - // And appends more constraints to the WHERE clause without overwriting - // conditions that have been already set. - And(conds ...interface{}) Selector - - // GroupBy represents a GROUP BY statement. - // - // GROUP BY defines which columns should be used to aggregate and group - // results. - // - // s.GroupBy("country_id") - // - // GroupBy accepts more than one column: - // - // s.GroupBy("country_id", "city_id") - GroupBy(columns ...interface{}) Selector - - // Having(...interface{}) Selector - - // OrderBy represents a ORDER BY statement. - // - // ORDER BY is used to define which columns are going to be used to sort - // results. - // - // Use the column name to sort results in ascendent order. - // - // // "last_name" ASC - // s.OrderBy("last_name") - // - // Prefix the column name with the minus sign (-) to sort results in - // descendent order. - // - // // "last_name" DESC - // s.OrderBy("-last_name") - // - // If you would rather be very explicit, you can also use ASC and DESC. - // - // s.OrderBy("last_name ASC") - // - // s.OrderBy("last_name DESC", "name ASC") - OrderBy(columns ...interface{}) Selector - - // Join represents a JOIN statement. - // - // JOIN statements are used to define external tables that the user wants to - // include as part of the result. - // - // You can use the On() method after Join() to define the conditions of the - // join. - // - // s.Join("author").On("author.id = book.author_id") - // - // If you don't specify conditions for the join, a NATURAL JOIN will be used. - // - // On() accepts the same arguments as Where() - // - // You can also use Using() after Join(). - // - // s.Join("employee").Using("department_id") - Join(table ...interface{}) Selector - - // FullJoin is like Join() but with FULL JOIN. - FullJoin(...interface{}) Selector - - // CrossJoin is like Join() but with CROSS JOIN. - CrossJoin(...interface{}) Selector - - // RightJoin is like Join() but with RIGHT JOIN. - RightJoin(...interface{}) Selector - - // LeftJoin is like Join() but with LEFT JOIN. - LeftJoin(...interface{}) Selector - - // Using represents the USING clause. - // - // USING is used to specifiy columns to join results. - // - // s.LeftJoin(...).Using("country_id") - Using(...interface{}) Selector - - // On represents the ON clause. - // - // ON is used to define conditions on a join. - // - // s.Join(...).On("b.author_id = a.id") - On(...interface{}) Selector - - // Limit represents the LIMIT parameter. - // - // LIMIT defines the maximum number of rows to return from the table. A - // negative limit cancels any previous limit settings. - // - // s.Limit(42) - Limit(int) Selector - - // Offset represents the OFFSET parameter. - // - // OFFSET defines how many results are going to be skipped before starting to - // return results. A negative offset cancels any previous offset settings. - // - // s.Offset(56) - Offset(int) Selector - - // Amend lets you alter the query's text just before sending it to the - // database server. - Amend(func(queryIn string) (queryOut string)) Selector - - // Paginate returns a paginator that can display a paginated lists of items. - // Paginators ignore previous Offset and Limit settings. Page numbering - // starts at 1. - Paginate(uint) Paginator - - // Iterator provides methods to iterate over the results returned by the - // Selector. - Iterator() Iterator - - // IteratorContext provides methods to iterate over the results returned by - // the Selector. - IteratorContext(ctx context.Context) Iterator - - // Preparer provides methods for creating prepared statements. - Preparer - - // Getter provides methods to compile and execute a query that returns - // results. - Getter - - // ResultMapper provides methods to retrieve and map results. - ResultMapper - - // fmt.Stringer provides `String() string`, you can use `String()` to compile - // the `Selector` into a string. - fmt.Stringer - - // Arguments returns the arguments that are prepared for this query. - Arguments() []interface{} -} - -// Inserter represents an INSERT statement. -type Inserter interface { - // Columns represents the COLUMNS clause. - // - // COLUMNS defines the columns that we are going to provide values for. - // - // i.Columns("name", "last_name").Values(...) - Columns(...string) Inserter - - // Values represents the VALUES clause. - // - // VALUES defines the values of the columns. - // - // i.Columns(...).Values("María", "Méndez") - // - // i.Values(map[string][string]{"name": "María"}) - Values(...interface{}) Inserter - - // Arguments returns the arguments that are prepared for this query. - Arguments() []interface{} - - // Returning represents a RETURNING clause. - // - // RETURNING specifies which columns should be returned after INSERT. - // - // RETURNING may not be supported by all SQL databases. - Returning(columns ...string) Inserter - - // Iterator provides methods to iterate over the results returned by the - // Inserter. This is only possible when using Returning(). - Iterator() Iterator - - // IteratorContext provides methods to iterate over the results returned by - // the Inserter. This is only possible when using Returning(). - IteratorContext(ctx context.Context) Iterator - - // Amend lets you alter the query's text just before sending it to the - // database server. - Amend(func(queryIn string) (queryOut string)) Inserter - - // Batch provies a BatchInserter that can be used to insert many elements at - // once by issuing several calls to Values(). It accepts a size parameter - // which defines the batch size. If size is < 1, the batch size is set to 1. - Batch(size int) *BatchInserter - - // Execer provides the Exec method. - Execer - - // Preparer provides methods for creating prepared statements. - Preparer - - // Getter provides methods to return query results from INSERT statements - // that support such feature (e.g.: queries with Returning). - Getter - - // fmt.Stringer provides `String() string`, you can use `String()` to compile - // the `Inserter` into a string. - fmt.Stringer -} - -// Deleter represents a DELETE statement. -type Deleter interface { - // Where represents the WHERE clause. - // - // See Selector.Where for documentation and usage examples. - Where(...interface{}) Deleter - - // And appends more constraints to the WHERE clause without overwriting - // conditions that have been already set. - And(conds ...interface{}) Deleter - - // Limit represents the LIMIT clause. - // - // See Selector.Limit for documentation and usage examples. - Limit(int) Deleter - - // Amend lets you alter the query's text just before sending it to the - // database server. - Amend(func(queryIn string) (queryOut string)) Deleter - - // Preparer provides methods for creating prepared statements. - Preparer - - // Execer provides the Exec method. - Execer - - // fmt.Stringer provides `String() string`, you can use `String()` to compile - // the `Inserter` into a string. - fmt.Stringer - - // Arguments returns the arguments that are prepared for this query. - Arguments() []interface{} -} - -// Updater represents an UPDATE statement. -type Updater interface { - // Set represents the SET clause. - Set(...interface{}) Updater - - // Where represents the WHERE clause. - // - // See Selector.Where for documentation and usage examples. - Where(...interface{}) Updater - - // And appends more constraints to the WHERE clause without overwriting - // conditions that have been already set. - And(conds ...interface{}) Updater - - // Limit represents the LIMIT parameter. - // - // See Selector.Limit for documentation and usage examples. - Limit(int) Updater - - // Preparer provides methods for creating prepared statements. - Preparer - - // Execer provides the Exec method. - Execer - - // fmt.Stringer provides `String() string`, you can use `String()` to compile - // the `Inserter` into a string. - fmt.Stringer - - // Arguments returns the arguments that are prepared for this query. - Arguments() []interface{} - - // Amend lets you alter the query's text just before sending it to the - // database server. - Amend(func(queryIn string) (queryOut string)) Updater -} - -// Execer provides methods for executing statements that do not return results. -type Execer interface { - // Exec executes a statement and returns sql.Result. - Exec() (sql.Result, error) - - // ExecContext executes a statement and returns sql.Result. - ExecContext(context.Context) (sql.Result, error) -} - -// Preparer provides the Prepare and PrepareContext methods for creating -// prepared statements. -type Preparer interface { - // Prepare creates a prepared statement. - Prepare() (*sql.Stmt, error) - - // PrepareContext creates a prepared statement. - PrepareContext(context.Context) (*sql.Stmt, error) -} - -// Getter provides methods for executing statements that return results. -type Getter interface { - // Query returns *sql.Rows. - Query() (*sql.Rows, error) - - // QueryContext returns *sql.Rows. - QueryContext(context.Context) (*sql.Rows, error) - - // QueryRow returns only one row. - QueryRow() (*sql.Row, error) - - // QueryRowContext returns only one row. - QueryRowContext(ctx context.Context) (*sql.Row, error) -} - -// Paginator provides tools for splitting the results of a query into chunks -// containing a fixed number of items. -type Paginator interface { - // Page sets the page number. - Page(uint) Paginator - - // Cursor defines the column that is going to be taken as basis for - // cursor-based pagination. - // - // Example: - // - // a = q.Paginate(10).Cursor("id") - // b = q.Paginate(12).Cursor("-id") - // - // You can set "" as cursorColumn to disable cursors. - Cursor(cursorColumn string) Paginator - - // NextPage returns the next page according to the cursor. It expects a - // cursorValue, which is the value the cursor column has on the last item of - // the current result set (lower bound). - // - // Example: - // - // p = q.NextPage(items[len(items)-1].ID) - NextPage(cursorValue interface{}) Paginator - - // PrevPage returns the previous page according to the cursor. It expects a - // cursorValue, which is the value the cursor column has on the fist item of - // the current result set (upper bound). - // - // Example: - // - // p = q.PrevPage(items[0].ID) - PrevPage(cursorValue interface{}) Paginator - - // TotalPages returns the total number of pages in the query. - TotalPages() (uint, error) - - // TotalEntries returns the total number of entries in the query. - TotalEntries() (uint64, error) - - // Preparer provides methods for creating prepared statements. - Preparer - - // Getter provides methods to compile and execute a query that returns - // results. - Getter - - // Iterator provides methods to iterate over the results returned by the - // Selector. - Iterator() Iterator - - // IteratorContext provides methods to iterate over the results returned by - // the Selector. - IteratorContext(ctx context.Context) Iterator - - // ResultMapper provides methods to retrieve and map results. - ResultMapper - - // fmt.Stringer provides `String() string`, you can use `String()` to compile - // the `Selector` into a string. - fmt.Stringer - - // Arguments returns the arguments that are prepared for this query. - Arguments() []interface{} -} - -// ResultMapper defined methods for a result mapper. -type ResultMapper interface { - // All dumps all the results into the given slice, All() expects a pointer to - // slice of maps or structs. - // - // The behaviour of One() extends to each one of the results. - All(destSlice interface{}) error - - // One maps the row that is in the current query cursor into the - // given interface, which can be a pointer to either a map or a - // struct. - // - // If dest is a pointer to map, each one of the columns will create a new map - // key and the values of the result will be set as values for the keys. - // - // Depending on the type of map key and value, the results columns and values - // may need to be transformed. - // - // If dest if a pointer to struct, each one of the fields will be tested for - // a `db` tag which defines the column mapping. The value of the result will - // be set as the value of the field. - One(dest interface{}) error -} - -// Iterator provides methods for iterating over query results. -type Iterator interface { - // ResultMapper provides methods to retrieve and map results. - ResultMapper - - // Scan dumps the current result into the given pointer variable pointers. - Scan(dest ...interface{}) error - - // NextScan advances the iterator and performs Scan. - NextScan(dest ...interface{}) error - - // ScanOne advances the iterator, performs Scan and closes the iterator. - ScanOne(dest ...interface{}) error - - // Next dumps the current element into the given destination, which could be - // a pointer to either a map or a struct. - Next(dest ...interface{}) bool - - // Err returns the last error produced by the cursor. - Err() error - - // Close closes the iterator and frees up the cursor. - Close() error -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/paginate.go b/vendor/upper.io/db.v3/lib/sqlbuilder/paginate.go deleted file mode 100644 index 1aa9ada08b2..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/paginate.go +++ /dev/null @@ -1,340 +0,0 @@ -package sqlbuilder - -import ( - "context" - "database/sql" - "errors" - "math" - "strings" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/immutable" -) - -var ( - errMissingCursorColumn = errors.New("Missing cursor column") -) - -type paginatorQuery struct { - sel Selector - - cursorColumn string - cursorValue interface{} - cursorCond db.Cond - cursorReverseOrder bool - - pageSize uint - pageNumber uint -} - -func newPaginator(sel Selector, pageSize uint) Paginator { - pag := &paginator{} - return pag.frame(func(pq *paginatorQuery) error { - pq.pageSize = pageSize - pq.sel = sel - return nil - }).Page(1) -} - -func (pq *paginatorQuery) count() (uint64, error) { - var count uint64 - - row, err := pq.sel.(*selector).setColumns(db.Raw("count(1) AS _t")). - Limit(0). - Offset(0). - OrderBy(nil). - QueryRow() - if err != nil { - return 0, err - } - - err = row.Scan(&count) - if err != nil { - return 0, err - } - - return count, nil -} - -type paginator struct { - fn func(*paginatorQuery) error - prev *paginator -} - -var _ = immutable.Immutable(&paginator{}) - -func (pag *paginator) frame(fn func(*paginatorQuery) error) *paginator { - return &paginator{prev: pag, fn: fn} -} - -func (pag *paginator) Page(pageNumber uint) Paginator { - return pag.frame(func(pq *paginatorQuery) error { - if pageNumber < 1 { - pageNumber = 1 - } - pq.pageNumber = pageNumber - return nil - }) -} - -func (pag *paginator) Cursor(column string) Paginator { - return pag.frame(func(pq *paginatorQuery) error { - pq.cursorColumn = column - pq.cursorValue = nil - return nil - }) -} - -func (pag *paginator) NextPage(cursorValue interface{}) Paginator { - return pag.frame(func(pq *paginatorQuery) error { - if pq.cursorValue != nil && pq.cursorColumn == "" { - return errMissingCursorColumn - } - pq.cursorValue = cursorValue - pq.cursorReverseOrder = false - if strings.HasPrefix(pq.cursorColumn, "-") { - pq.cursorCond = db.Cond{ - pq.cursorColumn[1:]: db.Lt(cursorValue), - } - } else { - pq.cursorCond = db.Cond{ - pq.cursorColumn: db.Gt(cursorValue), - } - } - return nil - }) -} - -func (pag *paginator) PrevPage(cursorValue interface{}) Paginator { - return pag.frame(func(pq *paginatorQuery) error { - if pq.cursorValue != nil && pq.cursorColumn == "" { - return errMissingCursorColumn - } - pq.cursorValue = cursorValue - pq.cursorReverseOrder = true - if strings.HasPrefix(pq.cursorColumn, "-") { - pq.cursorCond = db.Cond{ - pq.cursorColumn[1:]: db.Gt(cursorValue), - } - } else { - pq.cursorCond = db.Cond{ - pq.cursorColumn: db.Lt(cursorValue), - } - } - return nil - }) -} - -func (pag *paginator) TotalPages() (uint, error) { - pq, err := pag.build() - if err != nil { - return 0, err - } - - count, err := pq.count() - if err != nil { - return 0, err - } - if count < 1 { - return 0, nil - } - - if pq.pageSize < 1 { - return 1, nil - } - - pages := uint(math.Ceil(float64(count) / float64(pq.pageSize))) - return pages, nil -} - -func (pag *paginator) All(dest interface{}) error { - pq, err := pag.buildWithCursor() - if err != nil { - return err - } - err = pq.sel.All(dest) - if err != nil { - return err - } - return nil -} - -func (pag *paginator) One(dest interface{}) error { - pq, err := pag.buildWithCursor() - if err != nil { - return err - } - return pq.sel.One(dest) -} - -func (pag *paginator) Iterator() Iterator { - pq, err := pag.buildWithCursor() - if err != nil { - sess := pq.sel.(*selector).SQLBuilder().sess - return &iterator{sess, nil, err} - } - return pq.sel.Iterator() -} - -func (pag *paginator) IteratorContext(ctx context.Context) Iterator { - pq, err := pag.buildWithCursor() - if err != nil { - sess := pq.sel.(*selector).SQLBuilder().sess - return &iterator{sess, nil, err} - } - return pq.sel.IteratorContext(ctx) -} - -func (pag *paginator) String() string { - pq, err := pag.buildWithCursor() - if err != nil { - panic(err.Error()) - } - return pq.sel.String() -} - -func (pag *paginator) Arguments() []interface{} { - pq, err := pag.buildWithCursor() - if err != nil { - return nil - } - return pq.sel.Arguments() -} - -func (pag *paginator) Compile() (string, error) { - pq, err := pag.buildWithCursor() - if err != nil { - return "", err - } - return pq.sel.(*selector).Compile() -} - -func (pag *paginator) Query() (*sql.Rows, error) { - pq, err := pag.buildWithCursor() - if err != nil { - return nil, err - } - return pq.sel.Query() -} - -func (pag *paginator) QueryContext(ctx context.Context) (*sql.Rows, error) { - pq, err := pag.buildWithCursor() - if err != nil { - return nil, err - } - return pq.sel.QueryContext(ctx) -} - -func (pag *paginator) QueryRow() (*sql.Row, error) { - pq, err := pag.buildWithCursor() - if err != nil { - return nil, err - } - return pq.sel.QueryRow() -} - -func (pag *paginator) QueryRowContext(ctx context.Context) (*sql.Row, error) { - pq, err := pag.buildWithCursor() - if err != nil { - return nil, err - } - return pq.sel.QueryRowContext(ctx) -} - -func (pag *paginator) Prepare() (*sql.Stmt, error) { - pq, err := pag.buildWithCursor() - if err != nil { - return nil, err - } - return pq.sel.Prepare() -} - -func (pag *paginator) PrepareContext(ctx context.Context) (*sql.Stmt, error) { - pq, err := pag.buildWithCursor() - if err != nil { - return nil, err - } - return pq.sel.PrepareContext(ctx) -} - -func (pag *paginator) TotalEntries() (uint64, error) { - pq, err := pag.build() - if err != nil { - return 0, err - } - return pq.count() -} - -func (pag *paginator) build() (*paginatorQuery, error) { - pq, err := immutable.FastForward(pag) - if err != nil { - return nil, err - } - return pq.(*paginatorQuery), nil -} - -func (pag *paginator) buildWithCursor() (*paginatorQuery, error) { - pq, err := immutable.FastForward(pag) - if err != nil { - return nil, err - } - - pqq := pq.(*paginatorQuery) - - if pqq.cursorReverseOrder { - orderBy := pqq.cursorColumn - - if orderBy == "" { - return nil, errMissingCursorColumn - } - - if strings.HasPrefix(orderBy, "-") { - orderBy = orderBy[1:] - } else { - orderBy = "-" + orderBy - } - - pqq.sel = pqq.sel.OrderBy(orderBy) - } - - if pqq.pageSize > 0 { - pqq.sel = pqq.sel.Limit(int(pqq.pageSize)) - if pqq.pageNumber > 1 { - pqq.sel = pqq.sel.Offset(int(pqq.pageSize * (pqq.pageNumber - 1))) - } - } - - if pqq.cursorCond != nil { - pqq.sel = pqq.sel.Where(pqq.cursorCond).Offset(0) - } - - if pqq.cursorColumn != "" { - if pqq.cursorReverseOrder { - pqq.sel = pqq.sel.(*selector).SQLBuilder(). - SelectFrom(db.Raw("? AS p0", pqq.sel)). - OrderBy(pqq.cursorColumn) - } else { - pqq.sel = pqq.sel.OrderBy(pqq.cursorColumn) - } - } - - return pqq, nil -} - -func (pag *paginator) Prev() immutable.Immutable { - if pag == nil { - return nil - } - return pag.prev -} - -func (pag *paginator) Fn(in interface{}) error { - if pag.fn == nil { - return nil - } - return pag.fn(in.(*paginatorQuery)) -} - -func (pag *paginator) Base() interface{} { - return &paginatorQuery{} -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/scanner.go b/vendor/upper.io/db.v3/lib/sqlbuilder/scanner.go deleted file mode 100644 index 076c3822649..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/scanner.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package sqlbuilder - -import ( - "database/sql" - - db "upper.io/db.v3" -) - -type scanner struct { - v db.Unmarshaler -} - -func (u scanner) Scan(v interface{}) error { - return u.v.UnmarshalDB(v) -} - -var _ sql.Scanner = scanner{} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/select.go b/vendor/upper.io/db.v3/lib/sqlbuilder/select.go deleted file mode 100644 index fa0436bd535..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/select.go +++ /dev/null @@ -1,523 +0,0 @@ -package sqlbuilder - -import ( - "context" - "database/sql" - "errors" - "fmt" - "strings" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/immutable" - "upper.io/db.v3/internal/sqladapter/exql" -) - -type selectorQuery struct { - table *exql.Columns - tableArgs []interface{} - - distinct bool - - where *exql.Where - whereArgs []interface{} - - groupBy *exql.GroupBy - groupByArgs []interface{} - - orderBy *exql.OrderBy - orderByArgs []interface{} - - limit exql.Limit - offset exql.Offset - - columns *exql.Columns - columnsArgs []interface{} - - joins []*exql.Join - joinsArgs []interface{} - - amendFn func(string) string -} - -func (sq *selectorQuery) and(b *sqlBuilder, terms ...interface{}) error { - where, whereArgs := b.t.toWhereWithArguments(terms) - - if sq.where == nil { - sq.where, sq.whereArgs = &exql.Where{}, []interface{}{} - } - sq.where.Append(&where) - sq.whereArgs = append(sq.whereArgs, whereArgs...) - - return nil -} - -func (sq *selectorQuery) arguments() []interface{} { - return joinArguments( - sq.columnsArgs, - sq.tableArgs, - sq.joinsArgs, - sq.whereArgs, - sq.groupByArgs, - sq.orderByArgs, - ) -} - -func (sq *selectorQuery) statement() *exql.Statement { - stmt := &exql.Statement{ - Type: exql.Select, - Table: sq.table, - Columns: sq.columns, - Distinct: sq.distinct, - Limit: sq.limit, - Offset: sq.offset, - Where: sq.where, - OrderBy: sq.orderBy, - GroupBy: sq.groupBy, - } - - if len(sq.joins) > 0 { - stmt.Joins = exql.JoinConditions(sq.joins...) - } - - stmt.SetAmendment(sq.amendFn) - - return stmt -} - -func (sq *selectorQuery) pushJoin(t string, tables []interface{}) error { - fragments, args, err := columnFragments(tables) - if err != nil { - return err - } - - if sq.joins == nil { - sq.joins = []*exql.Join{} - } - sq.joins = append(sq.joins, - &exql.Join{ - Type: t, - Table: exql.JoinColumns(fragments...), - }, - ) - - sq.joinsArgs = append(sq.joinsArgs, args...) - - return nil -} - -type selector struct { - builder *sqlBuilder - - fn func(*selectorQuery) error - prev *selector -} - -var _ = immutable.Immutable(&selector{}) - -func (sel *selector) SQLBuilder() *sqlBuilder { - if sel.prev == nil { - return sel.builder - } - return sel.prev.SQLBuilder() -} - -func (sel *selector) String() string { - s, err := sel.Compile() - if err != nil { - panic(err.Error()) - } - return prepareQueryForDisplay(s) -} - -func (sel *selector) frame(fn func(*selectorQuery) error) *selector { - return &selector{prev: sel, fn: fn} -} - -func (sel *selector) clone() Selector { - return sel.frame(func(*selectorQuery) error { - return nil - }) -} - -func (sel *selector) From(tables ...interface{}) Selector { - return sel.frame( - func(sq *selectorQuery) error { - fragments, args, err := columnFragments(tables) - if err != nil { - return err - } - sq.table = exql.JoinColumns(fragments...) - sq.tableArgs = args - return nil - }, - ) -} - -func (sel *selector) setColumns(columns ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - sq.columns = nil - return sq.pushColumns(columns...) - }) -} - -func (sel *selector) Columns(columns ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - return sq.pushColumns(columns...) - }) -} - -func (sq *selectorQuery) pushColumns(columns ...interface{}) error { - f, args, err := columnFragments(columns) - if err != nil { - return err - } - - c := exql.JoinColumns(f...) - - if sq.columns != nil { - sq.columns.Append(c) - } else { - sq.columns = c - } - - sq.columnsArgs = append(sq.columnsArgs, args...) - return nil -} - -func (sel *selector) Distinct(exps ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - sq.distinct = true - return sq.pushColumns(exps...) - }) -} - -func (sel *selector) Where(terms ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - if len(terms) == 1 && terms[0] == nil { - sq.where, sq.whereArgs = &exql.Where{}, []interface{}{} - return nil - } - return sq.and(sel.SQLBuilder(), terms...) - }) -} - -func (sel *selector) And(terms ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - return sq.and(sel.SQLBuilder(), terms...) - }) -} - -func (sel *selector) Amend(fn func(string) string) Selector { - return sel.frame(func(sq *selectorQuery) error { - sq.amendFn = fn - return nil - }) -} - -func (sel *selector) Arguments() []interface{} { - sq, err := sel.build() - if err != nil { - return nil - } - return sq.arguments() -} - -func (sel *selector) GroupBy(columns ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - fragments, args, err := columnFragments(columns) - if err != nil { - return err - } - - if fragments != nil { - sq.groupBy = exql.GroupByColumns(fragments...) - } - sq.groupByArgs = args - - return nil - }) -} - -func (sel *selector) OrderBy(columns ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - - if len(columns) == 1 && columns[0] == nil { - sq.orderBy = nil - sq.orderByArgs = nil - return nil - } - - var sortColumns exql.SortColumns - - for i := range columns { - var sort *exql.SortColumn - - switch value := columns[i].(type) { - case db.RawValue: - query, args := Preprocess(value.Raw(), value.Arguments()) - sort = &exql.SortColumn{ - Column: exql.RawValue(query), - } - sq.orderByArgs = append(sq.orderByArgs, args...) - case db.Function: - fnName, fnArgs := value.Name(), value.Arguments() - if len(fnArgs) == 0 { - fnName = fnName + "()" - } else { - fnName = fnName + "(?" + strings.Repeat("?, ", len(fnArgs)-1) + ")" - } - fnName, fnArgs = Preprocess(fnName, fnArgs) - sort = &exql.SortColumn{ - Column: exql.RawValue(fnName), - } - sq.orderByArgs = append(sq.orderByArgs, fnArgs...) - case string: - if strings.HasPrefix(value, "-") { - sort = &exql.SortColumn{ - Column: exql.ColumnWithName(value[1:]), - Order: exql.Descendent, - } - } else { - chunks := strings.SplitN(value, " ", 2) - - order := exql.Ascendent - if len(chunks) > 1 && strings.ToUpper(chunks[1]) == "DESC" { - order = exql.Descendent - } - - sort = &exql.SortColumn{ - Column: exql.ColumnWithName(chunks[0]), - Order: order, - } - } - default: - return fmt.Errorf("Can't sort by type %T", value) - } - sortColumns.Columns = append(sortColumns.Columns, sort) - } - - sq.orderBy = &exql.OrderBy{ - SortColumns: &sortColumns, - } - return nil - }) -} - -func (sel *selector) Using(columns ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - - joins := len(sq.joins) - if joins == 0 { - return errors.New(`cannot use Using() without a preceding Join() expression`) - } - - lastJoin := sq.joins[joins-1] - if lastJoin.On != nil { - return errors.New(`cannot use Using() and On() with the same Join() expression`) - } - - fragments, args, err := columnFragments(columns) - if err != nil { - return err - } - - sq.joinsArgs = append(sq.joinsArgs, args...) - lastJoin.Using = exql.UsingColumns(fragments...) - - return nil - }) -} - -func (sel *selector) FullJoin(tables ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - return sq.pushJoin("FULL", tables) - }) -} - -func (sel *selector) CrossJoin(tables ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - return sq.pushJoin("CROSS", tables) - }) -} - -func (sel *selector) RightJoin(tables ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - return sq.pushJoin("RIGHT", tables) - }) -} - -func (sel *selector) LeftJoin(tables ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - return sq.pushJoin("LEFT", tables) - }) -} - -func (sel *selector) Join(tables ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - return sq.pushJoin("", tables) - }) -} - -func (sel *selector) On(terms ...interface{}) Selector { - return sel.frame(func(sq *selectorQuery) error { - joins := len(sq.joins) - - if joins == 0 { - return errors.New(`cannot use On() without a preceding Join() expression`) - } - - lastJoin := sq.joins[joins-1] - if lastJoin.On != nil { - return errors.New(`cannot use Using() and On() with the same Join() expression`) - } - - w, a := sel.SQLBuilder().t.toWhereWithArguments(terms) - o := exql.On(w) - - lastJoin.On = &o - - sq.joinsArgs = append(sq.joinsArgs, a...) - - return nil - }) -} - -func (sel *selector) Limit(n int) Selector { - return sel.frame(func(sq *selectorQuery) error { - if n < 0 { - n = 0 - } - sq.limit = exql.Limit(n) - return nil - }) -} - -func (sel *selector) Offset(n int) Selector { - return sel.frame(func(sq *selectorQuery) error { - if n < 0 { - n = 0 - } - sq.offset = exql.Offset(n) - return nil - }) -} - -func (sel *selector) template() *exql.Template { - return sel.SQLBuilder().t.Template -} - -func (sel *selector) As(alias string) Selector { - return sel.frame(func(sq *selectorQuery) error { - if sq.table == nil { - return errors.New("Cannot use As() without a preceding From() expression") - } - last := len(sq.table.Columns) - 1 - if raw, ok := sq.table.Columns[last].(*exql.Raw); ok { - compiled, err := exql.ColumnWithName(alias).Compile(sel.template()) - if err != nil { - return err - } - sq.table.Columns[last] = exql.RawValue(raw.Value + " AS " + compiled) - } - return nil - }) -} - -func (sel *selector) statement() *exql.Statement { - sq, _ := sel.build() - return sq.statement() -} - -func (sel *selector) QueryRow() (*sql.Row, error) { - return sel.QueryRowContext(sel.SQLBuilder().sess.Context()) -} - -func (sel *selector) QueryRowContext(ctx context.Context) (*sql.Row, error) { - sq, err := sel.build() - if err != nil { - return nil, err - } - - return sel.SQLBuilder().sess.StatementQueryRow(ctx, sq.statement(), sq.arguments()...) -} - -func (sel *selector) Prepare() (*sql.Stmt, error) { - return sel.PrepareContext(sel.SQLBuilder().sess.Context()) -} - -func (sel *selector) PrepareContext(ctx context.Context) (*sql.Stmt, error) { - sq, err := sel.build() - if err != nil { - return nil, err - } - return sel.SQLBuilder().sess.StatementPrepare(ctx, sq.statement()) -} - -func (sel *selector) Query() (*sql.Rows, error) { - return sel.QueryContext(sel.SQLBuilder().sess.Context()) -} - -func (sel *selector) QueryContext(ctx context.Context) (*sql.Rows, error) { - sq, err := sel.build() - if err != nil { - return nil, err - } - return sel.SQLBuilder().sess.StatementQuery(ctx, sq.statement(), sq.arguments()...) -} - -func (sel *selector) Iterator() Iterator { - return sel.IteratorContext(sel.SQLBuilder().sess.Context()) -} - -func (sel *selector) IteratorContext(ctx context.Context) Iterator { - sess := sel.SQLBuilder().sess - sq, err := sel.build() - if err != nil { - return &iterator{sess, nil, err} - } - - rows, err := sess.StatementQuery(ctx, sq.statement(), sq.arguments()...) - return &iterator{sess, rows, err} -} - -func (sel *selector) Paginate(pageSize uint) Paginator { - return newPaginator(sel.clone(), pageSize) -} - -func (sel *selector) All(destSlice interface{}) error { - return sel.Iterator().All(destSlice) -} - -func (sel *selector) One(dest interface{}) error { - return sel.Iterator().One(dest) -} - -func (sel *selector) build() (*selectorQuery, error) { - sq, err := immutable.FastForward(sel) - if err != nil { - return nil, err - } - return sq.(*selectorQuery), nil -} - -func (sel *selector) Compile() (string, error) { - return sel.statement().Compile(sel.template()) -} - -func (sel *selector) Prev() immutable.Immutable { - if sel == nil { - return nil - } - return sel.prev -} - -func (sel *selector) Fn(in interface{}) error { - if sel.fn == nil { - return nil - } - return sel.fn(in.(*selectorQuery)) -} - -func (sel *selector) Base() interface{} { - return &selectorQuery{} -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/template.go b/vendor/upper.io/db.v3/lib/sqlbuilder/template.go deleted file mode 100644 index 398b5985c98..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/template.go +++ /dev/null @@ -1,311 +0,0 @@ -package sqlbuilder - -import ( - "database/sql/driver" - "fmt" - "strings" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter/exql" -) - -type templateWithUtils struct { - *exql.Template -} - -func newTemplateWithUtils(template *exql.Template) *templateWithUtils { - return &templateWithUtils{template} -} - -func (tu *templateWithUtils) PlaceholderValue(in interface{}) (exql.Fragment, []interface{}) { - switch t := in.(type) { - case db.RawValue: - return exql.RawValue(t.String()), t.Arguments() - case db.Function: - fnName := t.Name() - fnArgs := []interface{}{} - args, _ := toInterfaceArguments(t.Arguments()) - fragments := []string{} - for i := range args { - frag, args := tu.PlaceholderValue(args[i]) - fragment, err := frag.Compile(tu.Template) - if err == nil { - fragments = append(fragments, fragment) - fnArgs = append(fnArgs, args...) - } - } - return exql.RawValue(fnName + `(` + strings.Join(fragments, `, `) + `)`), fnArgs - default: - // Value must be escaped. - return sqlPlaceholder, []interface{}{in} - } -} - -// toWhereWithArguments converts the given parameters into a exql.Where -// value. -func (tu *templateWithUtils) toWhereWithArguments(term interface{}) (where exql.Where, args []interface{}) { - args = []interface{}{} - - switch t := term.(type) { - case []interface{}: - if len(t) > 0 { - if s, ok := t[0].(string); ok { - if strings.ContainsAny(s, "?") || len(t) == 1 { - s, args = Preprocess(s, t[1:]) - where.Conditions = []exql.Fragment{exql.RawValue(s)} - } else { - var val interface{} - key := s - if len(t) > 2 { - val = t[1:] - } else { - val = t[1] - } - cv, v := tu.toColumnValues(db.NewConstraint(key, val)) - args = append(args, v...) - for i := range cv.ColumnValues { - where.Conditions = append(where.Conditions, cv.ColumnValues[i]) - } - } - return - } - } - for i := range t { - w, v := tu.toWhereWithArguments(t[i]) - if len(w.Conditions) == 0 { - continue - } - args = append(args, v...) - where.Conditions = append(where.Conditions, w.Conditions...) - } - return - case db.RawValue: - r, v := Preprocess(t.Raw(), t.Arguments()) - where.Conditions = []exql.Fragment{exql.RawValue(r)} - args = append(args, v...) - return - case db.Constraints: - for _, c := range t.Constraints() { - w, v := tu.toWhereWithArguments(c) - if len(w.Conditions) == 0 { - continue - } - args = append(args, v...) - where.Conditions = append(where.Conditions, w.Conditions...) - } - return - case db.Compound: - var cond exql.Where - - for _, c := range t.Sentences() { - w, v := tu.toWhereWithArguments(c) - if len(w.Conditions) == 0 { - continue - } - args = append(args, v...) - cond.Conditions = append(cond.Conditions, w.Conditions...) - } - - if len(cond.Conditions) > 0 { - var frag exql.Fragment - switch t.Operator() { - case db.OperatorNone, db.OperatorAnd: - q := exql.And(cond) - frag = &q - case db.OperatorOr: - q := exql.Or(cond) - frag = &q - default: - panic(fmt.Sprintf("Unknown type %T", t)) - } - where.Conditions = append(where.Conditions, frag) - } - - return - case db.Constraint: - cv, v := tu.toColumnValues(t) - args = append(args, v...) - where.Conditions = append(where.Conditions, cv.ColumnValues...) - return where, args - } - - panic(fmt.Sprintf("Unknown condition type %T", term)) -} - -func (tu *templateWithUtils) comparisonOperatorMapper(t db.ComparisonOperator) string { - if t == db.ComparisonOperatorNone { - return "" - } - if tu.ComparisonOperator != nil { - if op, ok := tu.ComparisonOperator[t]; ok { - return op - } - } - if op, ok := comparisonOperators[t]; ok { - return op - } - panic(fmt.Sprintf("unsupported comparison operator %v", t)) -} - -func (tu *templateWithUtils) toColumnValues(term interface{}) (cv exql.ColumnValues, args []interface{}) { - args = []interface{}{} - - switch t := term.(type) { - case db.Constraint: - columnValue := exql.ColumnValue{} - - // Getting column and operator. - if column, ok := t.Key().(string); ok { - chunks := strings.SplitN(strings.TrimSpace(column), " ", 2) - columnValue.Column = exql.ColumnWithName(chunks[0]) - if len(chunks) > 1 { - columnValue.Operator = chunks[1] - } - } else { - if rawValue, ok := t.Key().(db.RawValue); ok { - columnValue.Column = exql.RawValue(rawValue.Raw()) - args = append(args, rawValue.Arguments()...) - } else { - columnValue.Column = exql.RawValue(fmt.Sprintf("%v", t.Key())) - } - } - - switch value := t.Value().(type) { - case db.Function: - fnName, fnArgs := value.Name(), value.Arguments() - if len(fnArgs) == 0 { - // A function with no arguments. - fnName = fnName + "()" - } else { - // A function with one or more arguments. - fnName = fnName + "(?" + strings.Repeat("?, ", len(fnArgs)-1) + ")" - } - fnName, fnArgs = Preprocess(fnName, fnArgs) - columnValue.Value = exql.RawValue(fnName) - args = append(args, fnArgs...) - case db.RawValue: - q, a := Preprocess(value.Raw(), value.Arguments()) - columnValue.Value = exql.RawValue(q) - args = append(args, a...) - case driver.Valuer: - columnValue.Value = exql.RawValue("?") - args = append(args, value) - case db.Comparison: - wrapper := &operatorWrapper{ - tu: tu, - cv: &columnValue, - op: value, - } - - q, a := wrapper.preprocess() - q, a = Preprocess(q, a) - - columnValue = exql.ColumnValue{ - Column: exql.RawValue(q), - } - if a != nil { - args = append(args, a...) - } - - cv.ColumnValues = append(cv.ColumnValues, &columnValue) - return cv, args - default: - wrapper := &operatorWrapper{ - tu: tu, - cv: &columnValue, - v: value, - } - - q, a := wrapper.preprocess() - q, a = Preprocess(q, a) - - columnValue = exql.ColumnValue{ - Column: exql.RawValue(q), - } - if a != nil { - args = append(args, a...) - } - - cv.ColumnValues = append(cv.ColumnValues, &columnValue) - return cv, args - } - - if columnValue.Operator == "" { - columnValue.Operator = tu.comparisonOperatorMapper(db.ComparisonOperatorEqual) - } - cv.ColumnValues = append(cv.ColumnValues, &columnValue) - return cv, args - case db.RawValue: - columnValue := exql.ColumnValue{} - p, q := Preprocess(t.Raw(), t.Arguments()) - columnValue.Column = exql.RawValue(p) - cv.ColumnValues = append(cv.ColumnValues, &columnValue) - args = append(args, q...) - return cv, args - case db.Constraints: - for _, constraint := range t.Constraints() { - p, q := tu.toColumnValues(constraint) - cv.ColumnValues = append(cv.ColumnValues, p.ColumnValues...) - args = append(args, q...) - } - return cv, args - } - - panic(fmt.Sprintf("Unknown term type %T.", term)) -} - -func (tu *templateWithUtils) setColumnValues(term interface{}) (cv exql.ColumnValues, args []interface{}) { - args = []interface{}{} - - switch t := term.(type) { - case []interface{}: - l := len(t) - for i := 0; i < l; i++ { - column, isString := t[i].(string) - - if !isString { - p, q := tu.setColumnValues(t[i]) - cv.ColumnValues = append(cv.ColumnValues, p.ColumnValues...) - args = append(args, q...) - continue - } - - if !strings.ContainsAny(column, tu.AssignmentOperator) { - column = column + " " + tu.AssignmentOperator + " ?" - } - - chunks := strings.SplitN(column, tu.AssignmentOperator, 2) - - column = chunks[0] - format := strings.TrimSpace(chunks[1]) - - columnValue := exql.ColumnValue{ - Column: exql.ColumnWithName(column), - Operator: tu.AssignmentOperator, - Value: exql.RawValue(format), - } - - ps := strings.Count(format, "?") - if i+ps < l { - for j := 0; j < ps; j++ { - args = append(args, t[i+j+1]) - } - i = i + ps - } else { - panic(fmt.Sprintf("Format string %q has more placeholders than given arguments.", format)) - } - - cv.ColumnValues = append(cv.ColumnValues, &columnValue) - } - return cv, args - case db.RawValue: - columnValue := exql.ColumnValue{} - p, q := Preprocess(t.Raw(), t.Arguments()) - columnValue.Column = exql.RawValue(p) - cv.ColumnValues = append(cv.ColumnValues, &columnValue) - args = append(args, q...) - return cv, args - } - - panic(fmt.Sprintf("Unknown term type %T.", term)) -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/update.go b/vendor/upper.io/db.v3/lib/sqlbuilder/update.go deleted file mode 100644 index ec45810772d..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/update.go +++ /dev/null @@ -1,241 +0,0 @@ -package sqlbuilder - -import ( - "context" - "database/sql" - - "upper.io/db.v3/internal/immutable" - "upper.io/db.v3/internal/sqladapter/exql" -) - -type updaterQuery struct { - table string - - columnValues *exql.ColumnValues - columnValuesArgs []interface{} - - limit int - - where *exql.Where - whereArgs []interface{} - - amendFn func(string) string -} - -func (uq *updaterQuery) and(b *sqlBuilder, terms ...interface{}) error { - where, whereArgs := b.t.toWhereWithArguments(terms) - - if uq.where == nil { - uq.where, uq.whereArgs = &exql.Where{}, []interface{}{} - } - uq.where.Append(&where) - uq.whereArgs = append(uq.whereArgs, whereArgs...) - - return nil -} - -func (uq *updaterQuery) statement() *exql.Statement { - stmt := &exql.Statement{ - Type: exql.Update, - Table: exql.TableWithName(uq.table), - ColumnValues: uq.columnValues, - } - - if uq.where != nil { - stmt.Where = uq.where - } - - if uq.limit != 0 { - stmt.Limit = exql.Limit(uq.limit) - } - - stmt.SetAmendment(uq.amendFn) - - return stmt -} - -func (uq *updaterQuery) arguments() []interface{} { - return joinArguments( - uq.columnValuesArgs, - uq.whereArgs, - ) -} - -type updater struct { - builder *sqlBuilder - - fn func(*updaterQuery) error - prev *updater -} - -var _ = immutable.Immutable(&updater{}) - -func (upd *updater) SQLBuilder() *sqlBuilder { - if upd.prev == nil { - return upd.builder - } - return upd.prev.SQLBuilder() -} - -func (upd *updater) template() *exql.Template { - return upd.SQLBuilder().t.Template -} - -func (upd *updater) String() string { - s, err := upd.Compile() - if err != nil { - panic(err.Error()) - } - return prepareQueryForDisplay(s) -} - -func (upd *updater) setTable(table string) *updater { - return upd.frame(func(uq *updaterQuery) error { - uq.table = table - return nil - }) -} - -func (upd *updater) frame(fn func(*updaterQuery) error) *updater { - return &updater{prev: upd, fn: fn} -} - -func (upd *updater) Set(terms ...interface{}) Updater { - return upd.frame(func(uq *updaterQuery) error { - if uq.columnValues == nil { - uq.columnValues = &exql.ColumnValues{} - } - - if len(terms) == 1 { - ff, vv, err := Map(terms[0], nil) - if err == nil && len(ff) > 0 { - cvs := make([]exql.Fragment, 0, len(ff)) - args := make([]interface{}, 0, len(vv)) - - for i := range ff { - cv := &exql.ColumnValue{ - Column: exql.ColumnWithName(ff[i]), - Operator: upd.SQLBuilder().t.AssignmentOperator, - } - - var localArgs []interface{} - cv.Value, localArgs = upd.SQLBuilder().t.PlaceholderValue(vv[i]) - - args = append(args, localArgs...) - cvs = append(cvs, cv) - } - - uq.columnValues.Insert(cvs...) - uq.columnValuesArgs = append(uq.columnValuesArgs, args...) - - return nil - } - } - - cv, arguments := upd.SQLBuilder().t.setColumnValues(terms) - uq.columnValues.Insert(cv.ColumnValues...) - uq.columnValuesArgs = append(uq.columnValuesArgs, arguments...) - return nil - }) -} - -func (upd *updater) Amend(fn func(string) string) Updater { - return upd.frame(func(uq *updaterQuery) error { - uq.amendFn = fn - return nil - }) -} - -func (upd *updater) Arguments() []interface{} { - uq, err := upd.build() - if err != nil { - return nil - } - return uq.arguments() -} - -func (upd *updater) Where(terms ...interface{}) Updater { - return upd.frame(func(uq *updaterQuery) error { - uq.where, uq.whereArgs = &exql.Where{}, []interface{}{} - return uq.and(upd.SQLBuilder(), terms...) - }) -} - -func (upd *updater) And(terms ...interface{}) Updater { - return upd.frame(func(uq *updaterQuery) error { - return uq.and(upd.SQLBuilder(), terms...) - }) -} - -func (upd *updater) Prepare() (*sql.Stmt, error) { - return upd.PrepareContext(upd.SQLBuilder().sess.Context()) -} - -func (upd *updater) PrepareContext(ctx context.Context) (*sql.Stmt, error) { - uq, err := upd.build() - if err != nil { - return nil, err - } - return upd.SQLBuilder().sess.StatementPrepare(ctx, uq.statement()) -} - -func (upd *updater) Exec() (sql.Result, error) { - return upd.ExecContext(upd.SQLBuilder().sess.Context()) -} - -func (upd *updater) ExecContext(ctx context.Context) (sql.Result, error) { - uq, err := upd.build() - if err != nil { - return nil, err - } - return upd.SQLBuilder().sess.StatementExec(ctx, uq.statement(), uq.arguments()...) -} - -func (upd *updater) Limit(limit int) Updater { - return upd.frame(func(uq *updaterQuery) error { - uq.limit = limit - return nil - }) -} - -func (upd *updater) statement() (*exql.Statement, error) { - iq, err := upd.build() - if err != nil { - return nil, err - } - return iq.statement(), nil -} - -func (upd *updater) build() (*updaterQuery, error) { - uq, err := immutable.FastForward(upd) - if err != nil { - return nil, err - } - return uq.(*updaterQuery), nil -} - -func (upd *updater) Compile() (string, error) { - s, err := upd.statement() - if err != nil { - return "", err - } - return s.Compile(upd.template()) -} - -func (upd *updater) Prev() immutable.Immutable { - if upd == nil { - return nil - } - return upd.prev -} - -func (upd *updater) Fn(in interface{}) error { - if upd.fn == nil { - return nil - } - return upd.fn(in.(*updaterQuery)) -} - -func (upd *updater) Base() interface{} { - return &updaterQuery{} -} diff --git a/vendor/upper.io/db.v3/lib/sqlbuilder/wrapper.go b/vendor/upper.io/db.v3/lib/sqlbuilder/wrapper.go deleted file mode 100644 index cf540436ae3..00000000000 --- a/vendor/upper.io/db.v3/lib/sqlbuilder/wrapper.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package sqlbuilder - -import ( - "context" - "database/sql" - "fmt" - "sync" - - db "upper.io/db.v3" -) - -var ( - adapters map[string]*AdapterFuncMap - adaptersMu sync.RWMutex -) - -func init() { - adapters = make(map[string]*AdapterFuncMap) -} - -// Tx represents a transaction on a SQL database. A transaction is like a -// regular Database except it has two extra methods: Commit and Rollback. -// -// A transaction needs to be committed (with Commit) to make changes permanent, -// changes can be discarded before committing by rolling back (with Rollback). -// After either committing or rolling back a transaction it can not longer be -// used and it's automatically closed. -type Tx interface { - // All db.Database methods are available on transaction sessions. They will - // run on the same transaction. - db.Database - - // All SQLBuilder methods are available on transaction sessions. They will - // run on the same transaction. - SQLBuilder - - // db.Tx adds Commit and Rollback methods to the transaction. - db.Tx - - // Context returns the context used as default for queries on this transaction. - // If no context has been set, a default context.Background() is returned. - Context() context.Context - - // WithContext returns a copy of the transaction that uses the given context - // as default. Copies are safe to use concurrently but they're backed by the - // same *sql.Tx, so any copy may commit or rollback the parent transaction. - WithContext(context.Context) Tx - - // SetTxOptions sets the default TxOptions that is going to be used for new - // transactions created in the session. - SetTxOptions(sql.TxOptions) - - // TxOptions returns the defaultx TxOptions. - TxOptions() *sql.TxOptions -} - -// Database represents a SQL database. -type Database interface { - // All db.Database methods are available on this session. - db.Database - - // All SQLBuilder methods are available on this session. - SQLBuilder - - // NewTx creates and returns a transaction that runs on the given context. - // If a nil context is given, then the transaction will use the session's - // default context. The user is responsible for committing or rolling back - // the session. - NewTx(ctx context.Context) (Tx, error) - - // Tx creates a new transaction that is passed as argument to the fn - // function. The fn function defines a transactional operation. If the fn - // function returns nil, the transaction is committed, else the transaction - // is rolled back. The transaction session is closed after the function - // exits, regardless of the error value returned by fn. - Tx(ctx context.Context, fn func(sess Tx) error) error - - // Context returns the context used as default for queries on this session - // and for new transactions. If no context has been set, a default - // context.Background() is returned. - Context() context.Context - - // WithContext returns a copy of the session that uses the given context as - // default. Copies are safe to use concurrently but they're backed by the - // same *sql.DB. You may close a copy at any point but that won't close the - // parent session. - WithContext(context.Context) Database - - // SetTxOptions sets the default TxOptions that is going to be used for new - // transactions created in the session. - SetTxOptions(sql.TxOptions) - - // TxOptions returns the defaultx TxOptions. - TxOptions() *sql.TxOptions -} - -// AdapterFuncMap is a struct that defines a set of functions that adapters -// need to provide. -type AdapterFuncMap struct { - New func(sqlDB *sql.DB) (Database, error) - NewTx func(sqlTx *sql.Tx) (Tx, error) - Open func(settings db.ConnectionURL) (Database, error) -} - -// RegisterAdapter registers a SQL database adapter. This function must be -// called from adapter packages upon initialization. RegisterAdapter calls -// RegisterAdapter automatically. -func RegisterAdapter(name string, adapter *AdapterFuncMap) { - adaptersMu.Lock() - defer adaptersMu.Unlock() - - if name == "" { - panic(`Missing adapter name`) - } - if _, ok := adapters[name]; ok { - panic(`db.RegisterAdapter() called twice for adapter: ` + name) - } - adapters[name] = adapter - - db.RegisterAdapter(name, &db.AdapterFuncMap{ - Open: func(settings db.ConnectionURL) (db.Database, error) { - return adapter.Open(settings) - }, - }) -} - -// adapter returns SQL database functions. -func adapter(name string) AdapterFuncMap { - adaptersMu.RLock() - defer adaptersMu.RUnlock() - - if fn, ok := adapters[name]; ok { - return *fn - } - return missingAdapter(name) -} - -// Open opens a SQL database. -func Open(adapterName string, settings db.ConnectionURL) (Database, error) { - return adapter(adapterName).Open(settings) -} - -// New wraps an active *sql.DB session and returns a SQLBuilder database. The -// adapter needs to be imported to the blank namespace in order for it to be -// used here. -// -// This method is internally used by upper-db to create a builder backed by the -// given database. You may want to use your adapter's New function instead of -// this one. -func New(adapterName string, sqlDB *sql.DB) (Database, error) { - return adapter(adapterName).New(sqlDB) -} - -// NewTx wraps an active *sql.Tx transation and returns a SQLBuilder -// transaction. The adapter needs to be imported to the blank namespace in -// order for it to be used. -// -// This method is internally used by upper-db to create a builder backed by the -// given transaction. You may want to use your adapter's NewTx function -// instead of this one. -func NewTx(adapterName string, sqlTx *sql.Tx) (Tx, error) { - return adapter(adapterName).NewTx(sqlTx) -} - -func missingAdapter(name string) AdapterFuncMap { - err := fmt.Errorf("upper: Missing SQL adapter %q, forgot to import?", name) - return AdapterFuncMap{ - New: func(*sql.DB) (Database, error) { - return nil, err - }, - NewTx: func(*sql.Tx) (Tx, error) { - return nil, err - }, - Open: func(db.ConnectionURL) (Database, error) { - return nil, err - }, - } -} diff --git a/vendor/upper.io/db.v3/logger.go b/vendor/upper.io/db.v3/logger.go deleted file mode 100644 index d5fd2f3fa6c..00000000000 --- a/vendor/upper.io/db.v3/logger.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "context" - "fmt" - "log" - "regexp" - "strings" - "time" -) - -const ( - fmtLogSessID = `Session ID: %05d` - fmtLogTxID = `Transaction ID: %05d` - fmtLogQuery = `Query: %s` - fmtLogArgs = `Arguments: %#v` - fmtLogRowsAffected = `Rows affected: %d` - fmtLogLastInsertID = `Last insert ID: %d` - fmtLogError = `Error: %v` - fmtLogTimeTaken = `Time taken: %0.5fs` - fmtLogContext = `Context: %v` -) - -var ( - reInvisibleChars = regexp.MustCompile(`[\s\r\n\t]+`) -) - -// QueryStatus represents the status of a query after being executed. -type QueryStatus struct { - SessID uint64 - TxID uint64 - - RowsAffected *int64 - LastInsertID *int64 - - Query string - Args []interface{} - - Err error - - Start time.Time - End time.Time - - Context context.Context -} - -// String returns a formatted log message. -func (q *QueryStatus) String() string { - lines := make([]string, 0, 8) - - if q.SessID > 0 { - lines = append(lines, fmt.Sprintf(fmtLogSessID, q.SessID)) - } - - if q.TxID > 0 { - lines = append(lines, fmt.Sprintf(fmtLogTxID, q.TxID)) - } - - if query := q.Query; query != "" { - query = reInvisibleChars.ReplaceAllString(query, ` `) - query = strings.TrimSpace(query) - lines = append(lines, fmt.Sprintf(fmtLogQuery, query)) - } - - if len(q.Args) > 0 { - lines = append(lines, fmt.Sprintf(fmtLogArgs, q.Args)) - } - - if q.RowsAffected != nil { - lines = append(lines, fmt.Sprintf(fmtLogRowsAffected, *q.RowsAffected)) - } - if q.LastInsertID != nil { - lines = append(lines, fmt.Sprintf(fmtLogLastInsertID, *q.LastInsertID)) - } - - if q.Err != nil { - lines = append(lines, fmt.Sprintf(fmtLogError, q.Err)) - } - - lines = append(lines, fmt.Sprintf(fmtLogTimeTaken, float64(q.End.UnixNano()-q.Start.UnixNano())/float64(1e9))) - - if q.Context != nil { - lines = append(lines, fmt.Sprintf(fmtLogContext, q.Context)) - } - - return strings.Join(lines, "\n") -} - -// EnvEnableDebug can be used by adapters to determine if the user has enabled -// debugging. -// -// If the user sets the `UPPERIO_DB_DEBUG` environment variable to a -// non-empty value, all generated statements will be printed at runtime to -// the standard logger. -// -// Example: -// -// UPPERIO_DB_DEBUG=1 go test -// -// UPPERIO_DB_DEBUG=1 ./go-program -const ( - EnvEnableDebug = `UPPERIO_DB_DEBUG` -) - -// Logger represents a logging collector. You can pass a logging collector to -// db.DefaultSettings.SetLogger(myCollector) to make it collect db.QueryStatus messages -// after executing a query. -type Logger interface { - Log(*QueryStatus) -} - -type defaultLogger struct { -} - -func (lg *defaultLogger) Log(m *QueryStatus) { - log.Printf("\n\t%s\n\n", strings.Replace(m.String(), "\n", "\n\t", -1)) -} - -var _ = Logger(&defaultLogger{}) - -func init() { - if envEnabled(EnvEnableDebug) { - DefaultSettings.SetLogging(true) - } -} diff --git a/vendor/upper.io/db.v3/marshal.go b/vendor/upper.io/db.v3/marshal.go deleted file mode 100644 index 2422d12fa60..00000000000 --- a/vendor/upper.io/db.v3/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Marshaler is the interface implemented by struct fields that can transform -// themselves into values that can be stored on a database. -type Marshaler interface { - // MarshalDB returns the internal database representation of the Go value. - MarshalDB() (interface{}, error) -} - -// Unmarshaler is the interface implemented by struct fields that can transform -// themselves from stored database values into Go values. -type Unmarshaler interface { - // UnmarshalDB receives an internal database representation of a value and - // must transform that into a Go value. - UnmarshalDB(interface{}) error -} diff --git a/vendor/upper.io/db.v3/mysql/Makefile b/vendor/upper.io/db.v3/mysql/Makefile deleted file mode 100644 index 02f8a25549e..00000000000 --- a/vendor/upper.io/db.v3/mysql/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -SHELL := bash - -MYSQL_VERSION ?= 8 -MYSQL_SUPPORTED ?= $(MYSQL_VERSION) 5.7 -PROJECT ?= upper_mysql_$(MYSQL_VERSION) - -DB_HOST ?= 127.0.0.1 -DB_PORT ?= 3306 - -DB_NAME ?= upperio -DB_USERNAME ?= upperio_user -DB_PASSWORD ?= upperio//s3cr37 - -TEST_FLAGS ?= -PARALLEL_FLAGS ?= --halt-on-error 2 --jobs 1 - -export MYSQL_VERSION - -export DB_HOST -export DB_NAME -export DB_PASSWORD -export DB_PORT -export DB_USERNAME - -export TEST_FLAGS - -test: - go test -v $(TEST_FLAGS) - -server-up: server-down - docker-compose -p $(PROJECT) up -d && \ - sleep 15 - -server-down: - docker-compose -p $(PROJECT) down - -test-extended: - parallel $(PARALLEL_FLAGS) \ - "MYSQL_VERSION={} DB_PORT=\$$((3306+{#})) $(MAKE) server-up test server-down" ::: \ - $(MYSQL_SUPPORTED) diff --git a/vendor/upper.io/db.v3/mysql/README.md b/vendor/upper.io/db.v3/mysql/README.md deleted file mode 100644 index e5853b10641..00000000000 --- a/vendor/upper.io/db.v3/mysql/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# MySQL adapter for upper.io/db - -See the full docs, acknowledgements and examples at -[https://upper.io/db.v3/mysql][1] - -[1]: https://upper.io/db.v3/mysql - diff --git a/vendor/upper.io/db.v3/mysql/collection.go b/vendor/upper.io/db.v3/mysql/collection.go deleted file mode 100644 index 4eeeefecdc6..00000000000 --- a/vendor/upper.io/db.v3/mysql/collection.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package mysql - -import ( - "database/sql" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter" - "upper.io/db.v3/lib/sqlbuilder" -) - -// table is the actual implementation of a collection. -type table struct { - sqladapter.BaseCollection // Leveraged by sqladapter - - d *database - name string -} - -var ( - _ = sqladapter.Collection(&table{}) - _ = db.Collection(&table{}) -) - -// newTable binds *table with sqladapter. -func newTable(d *database, name string) *table { - t := &table{ - name: name, - d: d, - } - t.BaseCollection = sqladapter.NewBaseCollection(t) - return t -} - -func (t *table) Name() string { - return t.name -} - -func (t *table) Database() sqladapter.Database { - return t.d -} - -// Insert inserts an item (map or struct) into the collection. -func (t *table) Insert(item interface{}) (interface{}, error) { - columnNames, columnValues, err := sqlbuilder.Map(item, nil) - if err != nil { - return nil, err - } - - pKey := t.BaseCollection.PrimaryKeys() - - q := t.d.InsertInto(t.Name()). - Columns(columnNames...). - Values(columnValues...) - - var res sql.Result - if res, err = q.Exec(); err != nil { - return nil, err - } - - lastID, err := res.LastInsertId() - if err == nil && len(pKey) <= 1 { - return lastID, nil - } - - keyMap := db.Cond{} - for i := range columnNames { - for j := 0; j < len(pKey); j++ { - if pKey[j] == columnNames[i] { - keyMap[pKey[j]] = columnValues[i] - } - } - } - - // There was an auto column among primary keys, let's search for it. - if lastID > 0 { - for j := 0; j < len(pKey); j++ { - if keyMap[pKey[j]] == nil { - keyMap[pKey[j]] = lastID - } - } - } - - return keyMap, nil -} diff --git a/vendor/upper.io/db.v3/mysql/connection.go b/vendor/upper.io/db.v3/mysql/connection.go deleted file mode 100644 index 65154145b9b..00000000000 --- a/vendor/upper.io/db.v3/mysql/connection.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package mysql - -import ( - "errors" - "fmt" - "net" - "net/url" - "strings" -) - -// From https://github.com/go-sql-driver/mysql/blob/master/utils.go -var ( - errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?") - errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)") - errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name") -) - -// From https://github.com/go-sql-driver/mysql/blob/master/utils.go -type config struct { - user string - passwd string - net string - addr string - dbname string - params map[string]string -} - -// ConnectionURL implements a MySQL connection struct. -type ConnectionURL struct { - User string - Password string - Database string - Host string - Socket string - Options map[string]string -} - -func (c ConnectionURL) String() (s string) { - - if c.Database == "" { - return "" - } - - // Adding username. - if c.User != "" { - s = s + c.User - // Adding password. - if c.Password != "" { - s = s + ":" + c.Password - } - s = s + "@" - } - - // Adding protocol and address - if c.Socket != "" { - s = s + fmt.Sprintf("unix(%s)", c.Socket) - } else if c.Host != "" { - host, port, err := net.SplitHostPort(c.Host) - if err != nil { - host = c.Host - port = "3306" - } - s = s + fmt.Sprintf("tcp(%s:%s)", host, port) - } - - // Adding database - s = s + "/" + c.Database - - // Do we have any options? - if c.Options == nil { - c.Options = map[string]string{} - } - - // Default options. - if _, ok := c.Options["charset"]; !ok { - c.Options["charset"] = "utf8" - } - - if _, ok := c.Options["parseTime"]; !ok { - c.Options["parseTime"] = "true" - } - - // Converting options into URL values. - vv := url.Values{} - - for k, v := range c.Options { - vv.Set(k, v) - } - - // Inserting options. - if p := vv.Encode(); p != "" { - s = s + "?" + p - } - - return s -} - -// ParseURL parses s into a ConnectionURL struct. -func ParseURL(s string) (conn ConnectionURL, err error) { - var cfg *config - - if cfg, err = parseDSN(s); err != nil { - return - } - - conn.User = cfg.user - conn.Password = cfg.passwd - - if cfg.net == "unix" { - conn.Socket = cfg.addr - } else if cfg.net == "tcp" { - conn.Host = cfg.addr - } - - conn.Database = cfg.dbname - - conn.Options = map[string]string{} - - for k, v := range cfg.params { - conn.Options[k] = v - } - - return -} - -// from https://github.com/go-sql-driver/mysql/blob/master/utils.go -// parseDSN parses the DSN string to a config -func parseDSN(dsn string) (cfg *config, err error) { - // New config with some default values - cfg = &config{} - - // TODO: use strings.IndexByte when we can depend on Go 1.2 - - // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] - // Find the last '/' (since the password or the net addr might contain a '/') - foundSlash := false - for i := len(dsn) - 1; i >= 0; i-- { - if dsn[i] == '/' { - foundSlash = true - var j, k int - - // left part is empty if i <= 0 - if i > 0 { - // [username[:password]@][protocol[(address)]] - // Find the last '@' in dsn[:i] - for j = i; j >= 0; j-- { - if dsn[j] == '@' { - // username[:password] - // Find the first ':' in dsn[:j] - for k = 0; k < j; k++ { - if dsn[k] == ':' { - cfg.passwd = dsn[k+1 : j] - break - } - } - cfg.user = dsn[:k] - - break - } - } - - // [protocol[(address)]] - // Find the first '(' in dsn[j+1:i] - for k = j + 1; k < i; k++ { - if dsn[k] == '(' { - // dsn[i-1] must be == ')' if an address is specified - if dsn[i-1] != ')' { - if strings.ContainsRune(dsn[k+1:i], ')') { - return nil, errInvalidDSNUnescaped - } - return nil, errInvalidDSNAddr - } - cfg.addr = dsn[k+1 : i-1] - break - } - } - cfg.net = dsn[j+1 : k] - } - - // dbname[?param1=value1&...¶mN=valueN] - // Find the first '?' in dsn[i+1:] - for j = i + 1; j < len(dsn); j++ { - if dsn[j] == '?' { - if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { - return - } - break - } - } - cfg.dbname = dsn[i+1 : j] - - break - } - } - - if !foundSlash && len(dsn) > 0 { - return nil, errInvalidDSNNoSlash - } - - // Set default network if empty - if cfg.net == "" { - cfg.net = "tcp" - } - - // Set default address if empty - if cfg.addr == "" { - switch cfg.net { - case "tcp": - cfg.addr = "127.0.0.1:3306" - case "unix": - cfg.addr = "/tmp/mysql.sock" - default: - return nil, errors.New("Default addr for network '" + cfg.net + "' unknown") - } - - } - - return -} - -// From https://github.com/go-sql-driver/mysql/blob/master/utils.go -// parseDSNParams parses the DSN "query string" -// Values must be url.QueryEscape'ed -func parseDSNParams(cfg *config, params string) (err error) { - for _, v := range strings.Split(params, "&") { - param := strings.SplitN(v, "=", 2) - if len(param) != 2 { - continue - } - - value := param[1] - - // lazy init - if cfg.params == nil { - cfg.params = make(map[string]string) - } - - if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { - return - } - } - - return -} diff --git a/vendor/upper.io/db.v3/mysql/custom_types.go b/vendor/upper.io/db.v3/mysql/custom_types.go deleted file mode 100644 index fe63ab3ae39..00000000000 --- a/vendor/upper.io/db.v3/mysql/custom_types.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package mysql - -import ( - "database/sql/driver" - "encoding/json" - "errors" - "reflect" - - "upper.io/db.v3/lib/sqlbuilder" -) - -// JSON represents a MySQL's JSON value: -// https://www.mysql.org/docs/9.6/static/datatype-json.html. JSON -// satisfies sqlbuilder.ScannerValuer. -type JSON struct { - V interface{} -} - -// MarshalJSON encodes the wrapper value as JSON. -func (j JSON) MarshalJSON() ([]byte, error) { - return json.Marshal(j.V) -} - -// UnmarshalJSON decodes the given JSON into the wrapped value. -func (j *JSON) UnmarshalJSON(b []byte) error { - var v interface{} - if err := json.Unmarshal(b, &v); err != nil { - return err - } - j.V = v - return nil -} - -// Scan satisfies the sql.Scanner interface. -func (j *JSON) Scan(src interface{}) error { - if src == nil { - j.V = nil - return nil - } - - b, ok := src.([]byte) - if !ok { - return errors.New("Scan source was not []bytes") - } - - if err := json.Unmarshal(b, &j.V); err != nil { - return err - } - return nil -} - -// Value satisfies the driver.Valuer interface. -func (j JSON) Value() (driver.Value, error) { - // See https://github.com/lib/pq/issues/528#issuecomment-257197239 on why are - // we returning string instead of []byte. - if j.V == nil { - return nil, nil - } - if v, ok := j.V.(json.RawMessage); ok { - return string(v), nil - } - b, err := json.Marshal(j.V) - if err != nil { - return nil, err - } - return string(b), nil -} - -// JSONMap represents a map of interfaces with string keys -// (`map[string]interface{}`) that is compatible with MySQL's JSON type. -// JSONMap satisfies sqlbuilder.ScannerValuer. -type JSONMap map[string]interface{} - -// Value satisfies the driver.Valuer interface. -func (m JSONMap) Value() (driver.Value, error) { - return JSONValue(m) -} - -// Scan satisfies the sql.Scanner interface. -func (m *JSONMap) Scan(src interface{}) error { - *m = map[string]interface{}(nil) - return ScanJSON(m, src) -} - -// JSONArray represents an array of any type (`[]interface{}`) that is -// compatible with MySQL's JSON type. JSONArray satisfies -// sqlbuilder.ScannerValuer. -type JSONArray []interface{} - -// Value satisfies the driver.Valuer interface. -func (a JSONArray) Value() (driver.Value, error) { - return JSONValue(a) -} - -// Scan satisfies the sql.Scanner interface. -func (a *JSONArray) Scan(src interface{}) error { - return ScanJSON(a, src) -} - -// JSONValue takes an interface and provides a driver.Value that can be -// stored as a JSON column. -func JSONValue(i interface{}) (driver.Value, error) { - v := JSON{i} - return v.Value() -} - -// ScanJSON decodes a JSON byte stream into the passed dst value. -func ScanJSON(dst interface{}, src interface{}) error { - v := JSON{dst} - return v.Scan(src) -} - -// EncodeJSON is deprecated and going to be removed. Use ScanJSON instead. -func EncodeJSON(i interface{}) (driver.Value, error) { - return JSONValue(i) -} - -// DecodeJSON is deprecated and going to be removed. Use JSONValue instead. -func DecodeJSON(dst interface{}, src interface{}) error { - return ScanJSON(dst, src) -} - -// JSONConverter provides a helper method WrapValue that satisfies -// sqlbuilder.ValueWrapper, can be used to encode Go structs into JSON -// MySQL types and vice versa. -// -// Example: -// -// type MyCustomStruct struct { -// ID int64 `db:"id" json:"id"` -// Name string `db:"name" json:"name"` -// ... -// mysql.JSONConverter -// } -type JSONConverter struct { -} - -// WrapValue satisfies sqlbuilder.ValueWrapper -func (obj *JSONConverter) WrapValue(src interface{}) interface{} { - return &JSON{src} -} - -func autoWrap(elem reflect.Value, v interface{}) interface{} { - kind := elem.Kind() - - if kind == reflect.Invalid { - return v - } - - if elem.Type().Implements(sqlbuilder.ScannerType) { - return v - } - - if elem.Type().Implements(sqlbuilder.ValuerType) { - return v - } - - if elem.Type().Implements(sqlbuilder.ValueWrapperType) { - if elem.Type().Kind() == reflect.Ptr { - w := reflect.ValueOf(v) - if w.Kind() == reflect.Ptr { - z := reflect.Zero(w.Elem().Type()) - w.Elem().Set(z) - return &JSON{v} - } - } - vw := elem.Interface().(sqlbuilder.ValueWrapper) - return vw.WrapValue(elem.Interface()) - } - - switch kind { - case reflect.Ptr: - return autoWrap(elem.Elem(), v) - case reflect.Slice: - return &JSON{v} - case reflect.Map: - if reflect.TypeOf(v).Kind() == reflect.Ptr { - w := reflect.ValueOf(v) - z := reflect.New(w.Elem().Type()) - w.Elem().Set(z.Elem()) - } - return &JSON{v} - } - - return v -} - -// Type checks. -var ( - _ sqlbuilder.ValueWrapper = &JSONConverter{} - _ sqlbuilder.ScannerValuer = &JSONMap{} - _ sqlbuilder.ScannerValuer = &JSONArray{} -) diff --git a/vendor/upper.io/db.v3/mysql/database.go b/vendor/upper.io/db.v3/mysql/database.go deleted file mode 100644 index e7dcb2f957c..00000000000 --- a/vendor/upper.io/db.v3/mysql/database.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package mysql wraps the github.com/go-sql-driver/mysql MySQL driver. See -// https://upper.io/db.v3/mysql for documentation, particularities and usage -// examples. -package mysql - -import ( - "context" - "database/sql/driver" - "reflect" - "strings" - "sync" - "time" - - "database/sql" - - _ "github.com/go-sql-driver/mysql" // MySQL driver. - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter" - "upper.io/db.v3/internal/sqladapter/compat" - "upper.io/db.v3/internal/sqladapter/exql" - "upper.io/db.v3/lib/sqlbuilder" -) - -// database is the actual implementation of Database -type database struct { - sqladapter.BaseDatabase - - sqlbuilder.SQLBuilder - - connURL db.ConnectionURL - mu sync.Mutex -} - -var ( - _ = sqlbuilder.Database(&database{}) - _ = sqlbuilder.Database(&database{}) -) - -// newDatabase creates a new *database session for internal use. -func newDatabase(settings db.ConnectionURL) *database { - return &database{ - connURL: settings, - } -} - -// ConnectionURL returns this database session's connection URL, if any. -func (d *database) ConnectionURL() db.ConnectionURL { - return d.connURL -} - -// Open attempts to open a connection with the database server. -func (d *database) Open(connURL db.ConnectionURL) error { - if connURL == nil { - return db.ErrMissingConnURL - } - d.connURL = connURL - return d.open() -} - -// NewTx begins a transaction block with the given context. -func (d *database) NewTx(ctx context.Context) (sqlbuilder.Tx, error) { - if ctx == nil { - ctx = d.Context() - } - nTx, err := d.NewDatabaseTx(ctx) - if err != nil { - return nil, err - } - return &tx{DatabaseTx: nTx}, nil -} - -// Collections returns a list of non-system tables from the database. -func (d *database) Collections() (collections []string, err error) { - q := d.Select("table_name"). - From("information_schema.tables"). - Where("table_schema = ?", d.BaseDatabase.Name()) - - iter := q.Iterator() - defer iter.Close() - - for iter.Next() { - var tableName string - if err := iter.Scan(&tableName); err != nil { - return nil, err - } - collections = append(collections, tableName) - } - - return collections, nil -} - -// open attempts to establish a connection with the MySQL server. -func (d *database) open() error { - // Binding with sqladapter's logic. - d.BaseDatabase = sqladapter.NewBaseDatabase(d) - - // Binding with sqlbuilder. - d.SQLBuilder = sqlbuilder.WithSession(d.BaseDatabase, template) - - connFn := func() error { - sess, err := sql.Open("mysql", d.ConnectionURL().String()) - if err == nil { - sess.SetConnMaxLifetime(db.DefaultSettings.ConnMaxLifetime()) - sess.SetMaxIdleConns(db.DefaultSettings.MaxIdleConns()) - sess.SetMaxOpenConns(db.DefaultSettings.MaxOpenConns()) - return d.BaseDatabase.BindSession(sess) - } - return err - } - - if err := d.BaseDatabase.WaitForConnection(connFn); err != nil { - return err - } - - return nil -} - -// Clone creates a copy of the database session on the given context. -func (d *database) clone(ctx context.Context, checkConn bool) (*database, error) { - clone := newDatabase(d.connURL) - - var err error - clone.BaseDatabase, err = d.NewClone(clone, checkConn) - if err != nil { - return nil, err - } - - clone.SetContext(ctx) - - clone.SQLBuilder = sqlbuilder.WithSession(clone.BaseDatabase, template) - - return clone, nil -} - -func (d *database) ConvertValues(values []interface{}) []interface{} { - for i := range values { - switch v := values[i].(type) { - case *string, *bool, *int, *uint, *int64, *uint64, *int32, *uint32, *int16, *uint16, *int8, *uint8, *float32, *float64, *[]uint8, sql.Scanner, *sql.Scanner, *time.Time: - case string, bool, int, uint, int64, uint64, int32, uint32, int16, uint16, int8, uint8, float32, float64, []uint8, driver.Valuer, *driver.Valuer, time.Time: - case *map[string]interface{}: - values[i] = (*JSONMap)(v) - - case map[string]interface{}: - values[i] = (*JSONMap)(&v) - - case sqlbuilder.ValueWrapper: - values[i] = v.WrapValue(v) - - default: - values[i] = autoWrap(reflect.ValueOf(values[i]), values[i]) - } - } - return values -} - -// CompileStatement compiles a *exql.Statement into arguments that sql/database -// accepts. -func (d *database) CompileStatement(stmt *exql.Statement, args []interface{}) (string, []interface{}) { - compiled, err := stmt.Compile(template) - if err != nil { - panic(err.Error()) - } - return sqlbuilder.Preprocess(compiled, args) -} - -// Err allows sqladapter to translate specific MySQL string errors into custom -// error values. -func (d *database) Err(err error) error { - if err != nil { - // This error is not exported so we have to check it by its string value. - s := err.Error() - if strings.Contains(s, `many connections`) { - return db.ErrTooManyClients - } - } - return err -} - -// NewCollection creates a db.Collection by name. -func (d *database) NewCollection(name string) db.Collection { - return newTable(d, name) -} - -// Tx creates a transaction block on the given context and passes it to the -// function fn. If fn returns no error the transaction is commited, else the -// transaction is rolled back. After being commited or rolled back the -// transaction is closed automatically. -func (d *database) Tx(ctx context.Context, fn func(tx sqlbuilder.Tx) error) error { - return sqladapter.RunTx(d, ctx, fn) -} - -// NewDatabaseTx begins a transaction block. -func (d *database) NewDatabaseTx(ctx context.Context) (sqladapter.DatabaseTx, error) { - clone, err := d.clone(ctx, true) - if err != nil { - return nil, err - } - clone.mu.Lock() - defer clone.mu.Unlock() - - connFn := func() error { - sqlTx, err := compat.BeginTx(clone.BaseDatabase.Session(), ctx, clone.TxOptions()) - if err == nil { - return clone.BindTx(ctx, sqlTx) - } - return err - } - - if err := d.BaseDatabase.WaitForConnection(connFn); err != nil { - return nil, err - } - - return sqladapter.NewDatabaseTx(clone), nil -} - -// LookupName looks for the name of the database and it's often used as a -// test to determine if the connection settings are valid. -func (d *database) LookupName() (string, error) { - q := d.Select(db.Raw("DATABASE() AS name")) - - iter := q.Iterator() - defer iter.Close() - - if iter.Next() { - var name string - err := iter.Scan(&name) - return name, err - } - - return "", iter.Err() -} - -// TableExists returns an error if the given table name does not exist on the -// database. -func (d *database) TableExists(name string) error { - q := d.Select("table_name"). - From("information_schema.tables"). - Where("table_schema = ? AND table_name = ?", d.BaseDatabase.Name(), name) - - iter := q.Iterator() - defer iter.Close() - - if iter.Next() { - var name string - if err := iter.Scan(&name); err != nil { - return err - } - return nil - } - return db.ErrCollectionDoesNotExist -} - -// PrimaryKeys returns the names of all the primary keys on the table. -func (d *database) PrimaryKeys(tableName string) ([]string, error) { - q := d.Select("k.column_name"). - From("information_schema.key_column_usage AS k"). - Where(` - k.constraint_name = 'PRIMARY' - AND k.table_schema = ? - AND k.table_name = ? - `, d.BaseDatabase.Name(), tableName). - OrderBy("k.ordinal_position") - - iter := q.Iterator() - defer iter.Close() - - pk := []string{} - - for iter.Next() { - var k string - if err := iter.Scan(&k); err != nil { - return nil, err - } - pk = append(pk, k) - } - - return pk, nil -} - -// WithContext creates a copy of the session on the given context. -func (d *database) WithContext(ctx context.Context) sqlbuilder.Database { - newDB, _ := d.clone(ctx, false) - return newDB -} diff --git a/vendor/upper.io/db.v3/mysql/docker-compose.yml b/vendor/upper.io/db.v3/mysql/docker-compose.yml deleted file mode 100644 index 18ab34999f0..00000000000 --- a/vendor/upper.io/db.v3/mysql/docker-compose.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: '3' - -services: - - server: - image: mysql:${MYSQL_VERSION:-5} - environment: - MYSQL_USER: ${DB_USERNAME:-upperio_user} - MYSQL_PASSWORD: ${DB_PASSWORD:-upperio//s3cr37} - MYSQL_ALLOW_EMPTY_PASSWORD: 1 - MYSQL_DATABASE: ${DB_NAME:-upperio} - ports: - - '${DB_HOST:-127.0.0.1}:${DB_PORT:-3306}:3306' - diff --git a/vendor/upper.io/db.v3/mysql/mysql.go b/vendor/upper.io/db.v3/mysql/mysql.go deleted file mode 100644 index be0ff0f9bfc..00000000000 --- a/vendor/upper.io/db.v3/mysql/mysql.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package mysql // import "upper.io/db.v3/mysql" - -import ( - "database/sql" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter" - "upper.io/db.v3/lib/sqlbuilder" -) - -const sqlDriver = `mysql` - -// Adapter is the public name of the adapter. -const Adapter = sqlDriver - -func init() { - sqlbuilder.RegisterAdapter(Adapter, &sqlbuilder.AdapterFuncMap{ - New: New, - NewTx: NewTx, - Open: Open, - }) -} - -// Open stablishes a new connection with the SQL server. -func Open(settings db.ConnectionURL) (sqlbuilder.Database, error) { - d := newDatabase(settings) - if err := d.Open(settings); err != nil { - return nil, err - } - return d, nil -} - -// NewTx wraps a regular *sql.Tx transaction and returns a new upper-db -// transaction backed by it. -func NewTx(sqlTx *sql.Tx) (sqlbuilder.Tx, error) { - d := newDatabase(nil) - - // Binding with sqladapter's logic. - d.BaseDatabase = sqladapter.NewBaseDatabase(d) - - // Binding with sqlbuilder. - d.SQLBuilder = sqlbuilder.WithSession(d.BaseDatabase, template) - - if err := d.BaseDatabase.BindTx(d.Context(), sqlTx); err != nil { - return nil, err - } - - newTx := sqladapter.NewDatabaseTx(d) - return &tx{DatabaseTx: newTx}, nil -} - -// New wraps the given *sql.DB session and creates a new db session. -func New(sess *sql.DB) (sqlbuilder.Database, error) { - d := newDatabase(nil) - - // Binding with sqladapter's logic. - d.BaseDatabase = sqladapter.NewBaseDatabase(d) - - // Binding with sqlbuilder. - d.SQLBuilder = sqlbuilder.WithSession(d.BaseDatabase, template) - - if err := d.BaseDatabase.BindSession(sess); err != nil { - return nil, err - } - return d, nil -} diff --git a/vendor/upper.io/db.v3/mysql/template.go b/vendor/upper.io/db.v3/mysql/template.go deleted file mode 100644 index 7a0f69e9ff3..00000000000 --- a/vendor/upper.io/db.v3/mysql/template.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package mysql - -import ( - "upper.io/db.v3/internal/cache" - "upper.io/db.v3/internal/sqladapter/exql" -) - -const ( - adapterColumnSeparator = `.` - adapterIdentifierSeparator = `, ` - adapterIdentifierQuote = "`{{.Value}}`" - adapterValueSeparator = `, ` - adapterValueQuote = `'{{.}}'` - adapterAndKeyword = `AND` - adapterOrKeyword = `OR` - adapterDescKeyword = `DESC` - adapterAscKeyword = `ASC` - adapterAssignmentOperator = `=` - adapterClauseGroup = `({{.}})` - adapterClauseOperator = ` {{.}} ` - adapterColumnValue = `{{.Column}} {{.Operator}} {{.Value}}` - adapterTableAliasLayout = `{{.Name}}{{if .Alias}} AS {{.Alias}}{{end}}` - adapterColumnAliasLayout = `{{.Name}}{{if .Alias}} AS {{.Alias}}{{end}}` - adapterSortByColumnLayout = `{{.Column}} {{.Order}}` - - adapterOrderByLayout = ` - {{if .SortColumns}} - ORDER BY {{.SortColumns}} - {{end}} - ` - - adapterWhereLayout = ` - {{if .Conds}} - WHERE {{.Conds}} - {{end}} - ` - - adapterUsingLayout = ` - {{if .Columns}} - USING ({{.Columns}}) - {{end}} - ` - - adapterJoinLayout = ` - {{if .Table}} - {{ if .On }} - {{.Type}} JOIN {{.Table}} - {{.On}} - {{ else if .Using }} - {{.Type}} JOIN {{.Table}} - {{.Using}} - {{ else if .Type | eq "CROSS" }} - {{.Type}} JOIN {{.Table}} - {{else}} - NATURAL {{.Type}} JOIN {{.Table}} - {{end}} - {{end}} - ` - - adapterOnLayout = ` - {{if .Conds}} - ON {{.Conds}} - {{end}} - ` - - adapterSelectLayout = ` - SELECT - {{if .Distinct}} - DISTINCT - {{end}} - - {{if defined .Columns}} - {{.Columns | compile}} - {{else}} - * - {{end}} - - {{if defined .Table}} - FROM {{.Table | compile}} - {{end}} - - {{.Joins | compile}} - - {{.Where | compile}} - - {{if defined .GroupBy}} - {{.GroupBy | compile}} - {{end}} - - {{.OrderBy | compile}} - - {{if .Limit}} - LIMIT {{.Limit}} - {{end}} - ` + - // The argument for LIMIT when only OFFSET is specified is a pretty odd magic - // number; this comes directly from MySQL's manual, see: - // https://dev.mysql.com/doc/refman/5.7/en/select.html - // - // "To retrieve all rows from a certain offset up to the end of the result - // set, you can use some large number for the second parameter. This - // statement retrieves all rows from the 96th row to the last: - // SELECT * FROM tbl LIMIT 95,18446744073709551615; " - // - // ¯\_(ツ)_/¯ - ` - {{if .Offset}} - {{if not .Limit}} - LIMIT 18446744073709551615 - {{end}} - OFFSET {{.Offset}} - {{end}} - ` - adapterDeleteLayout = ` - DELETE - FROM {{.Table | compile}} - {{.Where | compile}} - ` - adapterUpdateLayout = ` - UPDATE - {{.Table | compile}} - SET {{.ColumnValues | compile}} - {{.Where | compile}} - ` - - adapterSelectCountLayout = ` - SELECT - COUNT(1) AS _t - FROM {{.Table | compile}} - {{.Where | compile}} - ` - - adapterInsertLayout = ` - INSERT INTO {{.Table | compile}} - {{if defined .Columns}}({{.Columns | compile}}){{end}} - VALUES - {{if defined .Values}} - {{.Values | compile}} - {{else}} - () - {{end}} - {{if defined .Returning}} - RETURNING {{.Returning | compile}} - {{end}} - ` - - adapterTruncateLayout = ` - TRUNCATE TABLE {{.Table | compile}} - ` - - adapterDropDatabaseLayout = ` - DROP DATABASE {{.Database | compile}} - ` - - adapterDropTableLayout = ` - DROP TABLE {{.Table | compile}} - ` - - adapterGroupByLayout = ` - {{if .GroupColumns}} - GROUP BY {{.GroupColumns}} - {{end}} - ` -) - -var template = &exql.Template{ - ColumnSeparator: adapterColumnSeparator, - IdentifierSeparator: adapterIdentifierSeparator, - IdentifierQuote: adapterIdentifierQuote, - ValueSeparator: adapterValueSeparator, - ValueQuote: adapterValueQuote, - AndKeyword: adapterAndKeyword, - OrKeyword: adapterOrKeyword, - DescKeyword: adapterDescKeyword, - AscKeyword: adapterAscKeyword, - AssignmentOperator: adapterAssignmentOperator, - ClauseGroup: adapterClauseGroup, - ClauseOperator: adapterClauseOperator, - ColumnValue: adapterColumnValue, - TableAliasLayout: adapterTableAliasLayout, - ColumnAliasLayout: adapterColumnAliasLayout, - SortByColumnLayout: adapterSortByColumnLayout, - WhereLayout: adapterWhereLayout, - JoinLayout: adapterJoinLayout, - OnLayout: adapterOnLayout, - UsingLayout: adapterUsingLayout, - OrderByLayout: adapterOrderByLayout, - InsertLayout: adapterInsertLayout, - SelectLayout: adapterSelectLayout, - UpdateLayout: adapterUpdateLayout, - DeleteLayout: adapterDeleteLayout, - TruncateLayout: adapterTruncateLayout, - DropDatabaseLayout: adapterDropDatabaseLayout, - DropTableLayout: adapterDropTableLayout, - CountLayout: adapterSelectCountLayout, - GroupByLayout: adapterGroupByLayout, - Cache: cache.NewCache(), -} diff --git a/vendor/upper.io/db.v3/mysql/tx.go b/vendor/upper.io/db.v3/mysql/tx.go deleted file mode 100644 index c8d27cbcc70..00000000000 --- a/vendor/upper.io/db.v3/mysql/tx.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package mysql - -import ( - "context" - - "upper.io/db.v3/internal/sqladapter" - "upper.io/db.v3/lib/sqlbuilder" -) - -type tx struct { - sqladapter.DatabaseTx -} - -var ( - _ = sqlbuilder.Tx(&tx{}) -) - -func (t *tx) WithContext(ctx context.Context) sqlbuilder.Tx { - newTx := *t - newTx.DatabaseTx.SetContext(ctx) - return &newTx -} diff --git a/vendor/upper.io/db.v3/postgresql/Makefile b/vendor/upper.io/db.v3/postgresql/Makefile deleted file mode 100644 index 6638f5e16f6..00000000000 --- a/vendor/upper.io/db.v3/postgresql/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -SHELL := bash - -POSTGRES_VERSION ?= 11 -POSTGRES_SUPPORTED ?= 12 $(POSTGRES_VERSION) 10 9 -PROJECT ?= upper_postgres_$(POSTGRES_VERSION) - -DB_HOST ?= 127.0.0.1 -DB_PORT ?= 5432 - -DB_NAME ?= upperio -DB_USERNAME ?= upperio_user -DB_PASSWORD ?= upperio//s3cr37 - -TEST_FLAGS ?= -PARALLEL_FLAGS ?= --halt-on-error 2 --jobs 1 - -export POSTGRES_VERSION - -export DB_HOST -export DB_NAME -export DB_PASSWORD -export DB_PORT -export DB_USERNAME - -export TEST_FLAGS - -test: - go test -v $(TEST_FLAGS) - -server-up: server-down - docker-compose -p $(PROJECT) up -d && \ - sleep 10 - -server-down: - docker-compose -p $(PROJECT) down - -test-extended: - parallel $(PARALLEL_FLAGS) \ - "POSTGRES_VERSION={} DB_PORT=\$$((5432+{#})) $(MAKE) server-up test server-down" ::: \ - $(POSTGRES_SUPPORTED) diff --git a/vendor/upper.io/db.v3/postgresql/README.md b/vendor/upper.io/db.v3/postgresql/README.md deleted file mode 100644 index 4610b74d960..00000000000 --- a/vendor/upper.io/db.v3/postgresql/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# PostgreSQL adapter for upper.io/db - -Please read the full docs, acknowledgements and examples at -[https://upper.io/db.v3/postgresql][1] - -[1]: https://upper.io/db.v3/postgresql diff --git a/vendor/upper.io/db.v3/postgresql/collection.go b/vendor/upper.io/db.v3/postgresql/collection.go deleted file mode 100644 index 3659e2d694f..00000000000 --- a/vendor/upper.io/db.v3/postgresql/collection.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package postgresql - -import ( - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter" -) - -// collection is the actual implementation of a collection. -type collection struct { - sqladapter.BaseCollection // Leveraged by sqladapter - - d *database - name string -} - -var ( - _ = sqladapter.Collection(&collection{}) - _ = db.Collection(&collection{}) -) - -// newCollection binds *collection with sqladapter. -func newCollection(d *database, name string) *collection { - c := &collection{ - name: name, - d: d, - } - c.BaseCollection = sqladapter.NewBaseCollection(c) - return c -} - -func (c *collection) Name() string { - return c.name -} - -func (c *collection) Database() sqladapter.Database { - return c.d -} - -// Insert inserts an item (map or struct) into the collection. -func (c *collection) Insert(item interface{}) (interface{}, error) { - pKey := c.BaseCollection.PrimaryKeys() - - q := c.d.InsertInto(c.Name()).Values(item) - - if len(pKey) == 0 { - // There is no primary key. - res, err := q.Exec() - if err != nil { - return nil, err - } - - // Attempt to use LastInsertId() (probably won't work, but the Exec() - // succeeded, so we can safely ignore the error from LastInsertId()). - lastID, err := res.LastInsertId() - if err != nil { - return 0, nil - } - return lastID, nil - } - - // Asking the database to return the primary key after insertion. - q = q.Returning(pKey...) - - var keyMap db.Cond - if err := q.Iterator().One(&keyMap); err != nil { - return nil, err - } - - // The IDSetter interface does not match, look for another interface match. - if len(keyMap) == 1 { - return keyMap[pKey[0]], nil - } - - // This was a compound key and no interface matched it, let's return a map. - return keyMap, nil -} diff --git a/vendor/upper.io/db.v3/postgresql/connection.go b/vendor/upper.io/db.v3/postgresql/connection.go deleted file mode 100644 index 7f4a5520f5e..00000000000 --- a/vendor/upper.io/db.v3/postgresql/connection.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package postgresql - -import ( - "fmt" - "net" - "strings" - "unicode" - - "github.com/lib/pq" -) - -// scanner implements a tokenizer for libpq-style option strings. -type scanner struct { - s []rune - i int -} - -// Next returns the next rune. It returns 0, false if the end of the text has -// been reached. -func (s *scanner) Next() (rune, bool) { - if s.i >= len(s.s) { - return 0, false - } - r := s.s[s.i] - s.i++ - return r, true -} - -// SkipSpaces returns the next non-whitespace rune. It returns 0, false if the -// end of the text has been reached. -func (s *scanner) SkipSpaces() (rune, bool) { - r, ok := s.Next() - for unicode.IsSpace(r) && ok { - r, ok = s.Next() - } - return r, ok -} - -type values map[string]string - -func (vs values) Set(k, v string) { - vs[k] = v -} - -func (vs values) Get(k string) (v string) { - return vs[k] -} - -func (vs values) Isset(k string) bool { - _, ok := vs[k] - return ok -} - -// ConnectionURL represents a parsed PostgreSQL connection URL. -// -// You can use a ConnectionURL struct as an argument for Open: -// -// var settings = postgresql.ConnectionURL{ -// Host: "localhost", // PostgreSQL server IP or name. -// Database: "peanuts", // Database name. -// User: "cbrown", // Optional user name. -// Password: "snoopy", // Optional user password. -// } -// -// sess, err = postgresql.Open(settings) -// -// If you already have a valid DSN, you can use ParseURL to convert it into -// a ConnectionURL before passing it to Open. -type ConnectionURL struct { - User string - Password string - Host string - Socket string - Database string - Options map[string]string -} - -var escaper = strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`) - -// String reassembles the parsed PostgreSQL connection URL into a valid DSN. -func (c ConnectionURL) String() (s string) { - u := make([]string, 0, 6) - - // TODO: This surely needs some sort of escaping. - - if c.User != "" { - u = append(u, "user="+escaper.Replace(c.User)) - } - - if c.Password != "" { - u = append(u, "password="+escaper.Replace(c.Password)) - } - - if c.Host != "" { - host, port, err := net.SplitHostPort(c.Host) - if err == nil { - if host == "" { - host = "127.0.0.1" - } - if port == "" { - port = "5432" - } - u = append(u, "host="+escaper.Replace(host)) - u = append(u, "port="+escaper.Replace(port)) - } else { - u = append(u, "host="+escaper.Replace(c.Host)) - } - } - - if c.Socket != "" { - u = append(u, "host="+escaper.Replace(c.Socket)) - } - - if c.Database != "" { - u = append(u, "dbname="+escaper.Replace(c.Database)) - } - - // Is there actually any connection data? - if len(u) == 0 { - return "" - } - - if c.Options == nil { - c.Options = map[string]string{} - } - - // If not present, SSL mode is assumed disabled. - if sslMode, ok := c.Options["sslmode"]; !ok || sslMode == "" { - c.Options["sslmode"] = "disable" - } - - for k, v := range c.Options { - u = append(u, escaper.Replace(k)+"="+escaper.Replace(v)) - } - - return strings.Join(u, " ") -} - -// ParseURL parses the given DSN into a ConnectionURL struct. -// A typical PostgreSQL connection URL looks like: -// -// postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full -func ParseURL(s string) (u ConnectionURL, err error) { - o := make(values) - - if strings.HasPrefix(s, "postgres://") { - s, err = pq.ParseURL(s) - if err != nil { - return u, err - } - } - - if err := parseOpts(s, o); err != nil { - return u, err - } - - u.User = o.Get("user") - u.Password = o.Get("password") - - h := o.Get("host") - p := o.Get("port") - - if strings.HasPrefix(h, "/") { - u.Socket = h - } else { - if p == "" { - u.Host = h - } else { - u.Host = fmt.Sprintf("%s:%s", h, p) - } - } - - u.Database = o.Get("dbname") - - u.Options = make(map[string]string) - - for k := range o { - switch k { - case "user", "password", "host", "port", "dbname": - // Skip - default: - u.Options[k] = o[k] - } - } - - return u, err -} - -// parseOpts parses the options from name and adds them to the values. -// -// The parsing code is based on conninfo_parse from libpq's fe-connect.c -func parseOpts(name string, o values) error { - s := newScanner(name) - - for { - var ( - keyRunes, valRunes []rune - r rune - ok bool - ) - - if r, ok = s.SkipSpaces(); !ok { - break - } - - // Scan the key - for !unicode.IsSpace(r) && r != '=' { - keyRunes = append(keyRunes, r) - if r, ok = s.Next(); !ok { - break - } - } - - // Skip any whitespace if we're not at the = yet - if r != '=' { - r, ok = s.SkipSpaces() - } - - // The current character should be = - if r != '=' || !ok { - return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) - } - - // Skip any whitespace after the = - if r, ok = s.SkipSpaces(); !ok { - // If we reach the end here, the last value is just an empty string as per libpq. - o.Set(string(keyRunes), "") - break - } - - if r != '\'' { - for !unicode.IsSpace(r) { - if r == '\\' { - if r, ok = s.Next(); !ok { - return fmt.Errorf(`missing character after backslash`) - } - } - valRunes = append(valRunes, r) - - if r, ok = s.Next(); !ok { - break - } - } - } else { - quote: - for { - if r, ok = s.Next(); !ok { - return fmt.Errorf(`unterminated quoted string literal in connection string`) - } - switch r { - case '\'': - break quote - case '\\': - r, _ = s.Next() - fallthrough - default: - valRunes = append(valRunes, r) - } - } - } - - o.Set(string(keyRunes), string(valRunes)) - } - - return nil -} - -// newScanner returns a new scanner initialized with the option string s. -func newScanner(s string) *scanner { - return &scanner{[]rune(s), 0} -} diff --git a/vendor/upper.io/db.v3/postgresql/custom_types.go b/vendor/upper.io/db.v3/postgresql/custom_types.go deleted file mode 100644 index 28c47a8a0bf..00000000000 --- a/vendor/upper.io/db.v3/postgresql/custom_types.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package postgresql - -import ( - "database/sql/driver" - "encoding/json" - "errors" - "reflect" - - "github.com/lib/pq" - "upper.io/db.v3/lib/sqlbuilder" -) - -// Array returns a sqlbuilder.ScannerValuer for any given slice. Slice elements -// may require their own sqlbuilder.ScannerValuer. -func Array(in interface{}) sqlbuilder.ScannerValuer { - return pq.Array(in) -} - -// JSONB represents a PostgreSQL's JSONB value: -// https://www.postgresql.org/docs/9.6/static/datatype-json.html. JSONB -// satisfies sqlbuilder.ScannerValuer. -type JSONB struct { - V interface{} -} - -// MarshalJSON encodes the wrapper value as JSON. -func (j JSONB) MarshalJSON() ([]byte, error) { - return json.Marshal(j.V) -} - -// UnmarshalJSON decodes the given JSON into the wrapped value. -func (j *JSONB) UnmarshalJSON(b []byte) error { - var v interface{} - if err := json.Unmarshal(b, &v); err != nil { - return err - } - j.V = v - return nil -} - -// Scan satisfies the sql.Scanner interface. -func (j *JSONB) Scan(src interface{}) error { - if src == nil { - j.V = nil - return nil - } - - b, ok := src.([]byte) - if !ok { - return errors.New("Scan source was not []bytes") - } - - if err := json.Unmarshal(b, &j.V); err != nil { - return err - } - return nil -} - -// Value satisfies the driver.Valuer interface. -func (j JSONB) Value() (driver.Value, error) { - // See https://github.com/lib/pq/issues/528#issuecomment-257197239 on why are - // we returning string instead of []byte. - if j.V == nil { - return nil, nil - } - if v, ok := j.V.(json.RawMessage); ok { - return string(v), nil - } - b, err := json.Marshal(j.V) - if err != nil { - return nil, err - } - return string(b), nil -} - -// StringArray represents a one-dimensional array of strings (`[]string{}`) -// that is compatible with PostgreSQL's text array (`text[]`). StringArray -// satisfies sqlbuilder.ScannerValuer. -type StringArray pq.StringArray - -// Value satisfies the driver.Valuer interface. -func (a StringArray) Value() (driver.Value, error) { - return pq.StringArray(a).Value() -} - -// Scan satisfies the sql.Scanner interface. -func (a *StringArray) Scan(src interface{}) error { - s := pq.StringArray(*a) - if err := s.Scan(src); err != nil { - return err - } - *a = StringArray(s) - return nil -} - -// Int64Array represents a one-dimensional array of int64s (`[]int64{}`) that -// is compatible with PostgreSQL's integer array (`integer[]`). Int64Array -// satisfies sqlbuilder.ScannerValuer. -type Int64Array pq.Int64Array - -// Value satisfies the driver.Valuer interface. -func (i Int64Array) Value() (driver.Value, error) { - return pq.Int64Array(i).Value() -} - -// Scan satisfies the sql.Scanner interface. -func (i *Int64Array) Scan(src interface{}) error { - s := pq.Int64Array(*i) - if err := s.Scan(src); err != nil { - return err - } - *i = Int64Array(s) - return nil -} - -// Float64Array represents a one-dimensional array of float64s (`[]float64{}`) -// that is compatible with PostgreSQL's double precision array (`double -// precision[]`). Float64Array satisfies sqlbuilder.ScannerValuer. -type Float64Array pq.Float64Array - -// Value satisfies the driver.Valuer interface. -func (f Float64Array) Value() (driver.Value, error) { - return pq.Float64Array(f).Value() -} - -// Scan satisfies the sql.Scanner interface. -func (f *Float64Array) Scan(src interface{}) error { - s := pq.Float64Array(*f) - if err := s.Scan(src); err != nil { - return err - } - *f = Float64Array(s) - return nil -} - -// BoolArray represents a one-dimensional array of int64s (`[]bool{}`) that -// is compatible with PostgreSQL's boolean type (`boolean[]`). BoolArray -// satisfies sqlbuilder.ScannerValuer. -type BoolArray pq.BoolArray - -// Value satisfies the driver.Valuer interface. -func (b BoolArray) Value() (driver.Value, error) { - return pq.BoolArray(b).Value() -} - -// Scan satisfies the sql.Scanner interface. -func (b *BoolArray) Scan(src interface{}) error { - s := pq.BoolArray(*b) - if err := s.Scan(src); err != nil { - return err - } - *b = BoolArray(s) - return nil -} - -// GenericArray represents a one-dimensional array of any type -// (`[]interface{}`) that is compatible with PostgreSQL's array type. -// GenericArray satisfies sqlbuilder.ScannerValuer and its elements may need to -// satisfy sqlbuilder.ScannerValuer too. -type GenericArray pq.GenericArray - -// Value satisfies the driver.Valuer interface. -func (g GenericArray) Value() (driver.Value, error) { - return pq.GenericArray(g).Value() -} - -// Scan satisfies the sql.Scanner interface. -func (g *GenericArray) Scan(src interface{}) error { - s := pq.GenericArray(*g) - if err := s.Scan(src); err != nil { - return err - } - *g = GenericArray(s) - return nil -} - -// JSONBMap represents a map of interfaces with string keys -// (`map[string]interface{}`) that is compatible with PostgreSQL's JSONB type. -// JSONBMap satisfies sqlbuilder.ScannerValuer. -type JSONBMap map[string]interface{} - -// Value satisfies the driver.Valuer interface. -func (m JSONBMap) Value() (driver.Value, error) { - return JSONBValue(m) -} - -// Scan satisfies the sql.Scanner interface. -func (m *JSONBMap) Scan(src interface{}) error { - *m = map[string]interface{}(nil) - return ScanJSONB(m, src) -} - -// JSONBArray represents an array of any type (`[]interface{}`) that is -// compatible with PostgreSQL's JSONB type. JSONBArray satisfies -// sqlbuilder.ScannerValuer. -type JSONBArray []interface{} - -// Value satisfies the driver.Valuer interface. -func (a JSONBArray) Value() (driver.Value, error) { - return JSONBValue(a) -} - -// Scan satisfies the sql.Scanner interface. -func (a *JSONBArray) Scan(src interface{}) error { - return ScanJSONB(a, src) -} - -// JSONBValue takes an interface and provides a driver.Value that can be -// stored as a JSONB column. -func JSONBValue(i interface{}) (driver.Value, error) { - v := JSONB{i} - return v.Value() -} - -// ScanJSONB decodes a JSON byte stream into the passed dst value. -func ScanJSONB(dst interface{}, src interface{}) error { - v := JSONB{dst} - return v.Scan(src) -} - -// EncodeJSONB is deprecated and going to be removed. Use ScanJSONB instead. -func EncodeJSONB(i interface{}) (driver.Value, error) { - return JSONBValue(i) -} - -// DecodeJSONB is deprecated and going to be removed. Use JSONBValue instead. -func DecodeJSONB(dst interface{}, src interface{}) error { - return ScanJSONB(dst, src) -} - -// JSONBConverter provides a helper method WrapValue that satisfies -// sqlbuilder.ValueWrapper, can be used to encode Go structs into JSONB -// PostgreSQL types and vice versa. -// -// Example: -// -// type MyCustomStruct struct { -// ID int64 `db:"id" json:"id"` -// Name string `db:"name" json:"name"` -// ... -// postgresql.JSONBConverter -// } -type JSONBConverter struct { -} - -// WrapValue satisfies sqlbuilder.ValueWrapper -func (obj *JSONBConverter) WrapValue(src interface{}) interface{} { - return &JSONB{src} -} - -func autoWrap(elem reflect.Value, v interface{}) interface{} { - kind := elem.Kind() - - if kind == reflect.Invalid { - return v - } - - if elem.Type().Implements(sqlbuilder.ScannerType) { - return v - } - - if elem.Type().Implements(sqlbuilder.ValuerType) { - return v - } - - if elem.Type().Implements(sqlbuilder.ValueWrapperType) { - if elem.Type().Kind() == reflect.Ptr { - w := reflect.ValueOf(v) - if w.Kind() == reflect.Ptr { - z := reflect.Zero(w.Elem().Type()) - w.Elem().Set(z) - return &JSONB{v} - } - } - vw := elem.Interface().(sqlbuilder.ValueWrapper) - return vw.WrapValue(elem.Interface()) - } - - switch kind { - case reflect.Ptr: - return autoWrap(elem.Elem(), v) - case reflect.Slice: - return &JSONB{v} - case reflect.Map: - if reflect.TypeOf(v).Kind() == reflect.Ptr { - w := reflect.ValueOf(v) - z := reflect.New(w.Elem().Type()) - w.Elem().Set(z.Elem()) - } - return &JSONB{v} - } - - return v -} - -// Type checks. -var ( - _ sqlbuilder.ValueWrapper = &JSONBConverter{} - - _ sqlbuilder.ScannerValuer = &StringArray{} - _ sqlbuilder.ScannerValuer = &Int64Array{} - _ sqlbuilder.ScannerValuer = &Float64Array{} - _ sqlbuilder.ScannerValuer = &BoolArray{} - _ sqlbuilder.ScannerValuer = &GenericArray{} - _ sqlbuilder.ScannerValuer = &JSONBMap{} - _ sqlbuilder.ScannerValuer = &JSONBArray{} -) diff --git a/vendor/upper.io/db.v3/postgresql/database.go b/vendor/upper.io/db.v3/postgresql/database.go deleted file mode 100644 index 0855eb55be6..00000000000 --- a/vendor/upper.io/db.v3/postgresql/database.go +++ /dev/null @@ -1,340 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package postgresql wraps the github.com/lib/pq PostgreSQL driver. See -// https://upper.io/db.v3/postgresql for documentation, particularities and -// usage examples. -package postgresql - -import ( - "context" - "database/sql" - "database/sql/driver" - "fmt" - "reflect" - "strings" - "sync" - "time" - - _ "github.com/lib/pq" // PostgreSQL driver. - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter" - "upper.io/db.v3/internal/sqladapter/compat" - "upper.io/db.v3/internal/sqladapter/exql" - "upper.io/db.v3/lib/sqlbuilder" -) - -// database is the actual implementation of Database -type database struct { - sqladapter.BaseDatabase - - sqlbuilder.SQLBuilder - - connURL db.ConnectionURL - mu sync.Mutex -} - -var ( - _ = sqlbuilder.Database(&database{}) - _ = sqladapter.Database(&database{}) -) - -// newDatabase creates a new *database session for internal use. -func newDatabase(settings db.ConnectionURL) *database { - return &database{ - connURL: settings, - } -} - -// ConnectionURL returns this database session's connection URL, if any. -func (d *database) ConnectionURL() db.ConnectionURL { - return d.connURL -} - -// Open attempts to open a connection with the database server. -func (d *database) Open(connURL db.ConnectionURL) error { - if connURL == nil { - return db.ErrMissingConnURL - } - d.connURL = connURL - return d.open() -} - -// NewTx begins a transaction block with the given context. -func (d *database) NewTx(ctx context.Context) (sqlbuilder.Tx, error) { - if ctx == nil { - ctx = context.Background() - } - nTx, err := d.NewDatabaseTx(ctx) - if err != nil { - return nil, err - } - return &tx{DatabaseTx: nTx}, nil -} - -// Collections returns a list of non-system tables from the database. -func (d *database) Collections() (collections []string, err error) { - q := d.Select("table_name"). - From("information_schema.tables"). - Where("table_schema = ?", "public") - - iter := q.Iterator() - defer iter.Close() - - for iter.Next() { - var tableName string - if err := iter.Scan(&tableName); err != nil { - return nil, err - } - collections = append(collections, tableName) - } - - return collections, nil -} - -// open attempts to establish a connection with the PostgreSQL server. -func (d *database) open() error { - // Binding with sqladapter's logic. - d.BaseDatabase = sqladapter.NewBaseDatabase(d) - - // Binding with sqlbuilder. - d.SQLBuilder = sqlbuilder.WithSession(d.BaseDatabase, template) - - connFn := func() error { - sess, err := sql.Open("postgres", d.ConnectionURL().String()) - if err == nil { - sess.SetConnMaxLifetime(db.DefaultSettings.ConnMaxLifetime()) - sess.SetMaxIdleConns(db.DefaultSettings.MaxIdleConns()) - sess.SetMaxOpenConns(db.DefaultSettings.MaxOpenConns()) - return d.BaseDatabase.BindSession(sess) - } - return err - } - - if err := d.BaseDatabase.WaitForConnection(connFn); err != nil { - return err - } - - return nil -} - -// Clone creates a copy of the database session on the given context. -func (d *database) clone(ctx context.Context, checkConn bool) (*database, error) { - clone := newDatabase(d.connURL) - - var err error - clone.BaseDatabase, err = d.NewClone(clone, checkConn) - if err != nil { - return nil, err - } - - clone.SetContext(ctx) - - clone.SQLBuilder = sqlbuilder.WithSession(clone.BaseDatabase, template) - - return clone, nil -} - -func (d *database) ConvertValues(values []interface{}) []interface{} { - for i := range values { - switch v := values[i].(type) { - case *string, *bool, *int, *uint, *int64, *uint64, *int32, *uint32, *int16, *uint16, *int8, *uint8, *float32, *float64, *[]uint8, sql.Scanner, *sql.Scanner, *time.Time: - // Handled by pq. - case string, bool, int, uint, int64, uint64, int32, uint32, int16, uint16, int8, uint8, float32, float64, []uint8, driver.Valuer, *driver.Valuer, time.Time: - // Handled by pq. - - case *[]int64: - values[i] = (*Int64Array)(v) - case *[]string: - values[i] = (*StringArray)(v) - case *[]float64: - values[i] = (*Float64Array)(v) - case *[]bool: - values[i] = (*BoolArray)(v) - case *map[string]interface{}: - values[i] = (*JSONBMap)(v) - - case []int64: - values[i] = (*Int64Array)(&v) - case []string: - values[i] = (*StringArray)(&v) - case []float64: - values[i] = (*Float64Array)(&v) - case []bool: - values[i] = (*BoolArray)(&v) - case map[string]interface{}: - values[i] = (*JSONBMap)(&v) - - case sqlbuilder.ValueWrapper: - values[i] = v.WrapValue(v) - - default: - values[i] = autoWrap(reflect.ValueOf(values[i]), values[i]) - } - - } - return values -} - -// CompileStatement compiles a *exql.Statement into arguments that sql/database -// accepts. -func (d *database) CompileStatement(stmt *exql.Statement, args []interface{}) (string, []interface{}) { - compiled, err := stmt.Compile(template) - if err != nil { - panic(err.Error()) - } - query, args := sqlbuilder.Preprocess(compiled, args) - return sqladapter.ReplaceWithDollarSign(query), args -} - -// Err allows sqladapter to translate specific PostgreSQL string errors into -// custom error values. -func (d *database) Err(err error) error { - if err != nil { - s := err.Error() - // These errors are not exported so we have to check them by they string value. - if strings.Contains(s, `too many clients`) || strings.Contains(s, `remaining connection slots are reserved`) || strings.Contains(s, `too many open`) { - return db.ErrTooManyClients - } - } - return err -} - -// NewCollection creates a db.Collection by name. -func (d *database) NewCollection(name string) db.Collection { - return newCollection(d, name) -} - -// Tx creates a transaction block on the given context and passes it to the -// function fn. If fn returns no error the transaction is commited, else the -// transaction is rolled back. After being commited or rolled back the -// transaction is closed automatically. -func (d *database) Tx(ctx context.Context, fn func(tx sqlbuilder.Tx) error) error { - return sqladapter.RunTx(d, ctx, fn) -} - -// NewDatabaseTx begins a transaction block. -func (d *database) NewDatabaseTx(ctx context.Context) (sqladapter.DatabaseTx, error) { - clone, err := d.clone(ctx, true) - if err != nil { - return nil, err - } - clone.mu.Lock() - defer clone.mu.Unlock() - - connFn := func() error { - sqlTx, err := compat.BeginTx(clone.BaseDatabase.Session(), ctx, clone.TxOptions()) - if err == nil { - return clone.BindTx(ctx, sqlTx) - } - return err - } - - if err := clone.BaseDatabase.WaitForConnection(connFn); err != nil { - return nil, err - } - - return sqladapter.NewDatabaseTx(clone), nil -} - -// LookupName looks for the name of the database and it's often used as a -// test to determine if the connection settings are valid. -func (d *database) LookupName() (string, error) { - q := d.Select(db.Raw("CURRENT_DATABASE() AS name")) - - iter := q.Iterator() - defer iter.Close() - - if iter.Next() { - var name string - err := iter.Scan(&name) - return name, err - } - - return "", iter.Err() -} - -// TableExists returns an error if the given table name does not exist on the -// database. -func (d *database) TableExists(name string) error { - q := d.Select("table_name"). - From("information_schema.tables"). - Where("table_catalog = ? AND table_name = ?", d.BaseDatabase.Name(), name) - - iter := q.Iterator() - defer iter.Close() - - if iter.Next() { - var name string - if err := iter.Scan(&name); err != nil { - return err - } - return nil - } - return db.ErrCollectionDoesNotExist -} - -// quotedTableName returns a valid regclass name for both regular tables and -// for schemas. -func quotedTableName(s string) string { - chunks := strings.Split(s, ".") - for i := range chunks { - chunks[i] = fmt.Sprintf("%q", chunks[i]) - } - return strings.Join(chunks, ".") -} - -// PrimaryKeys returns the names of all the primary keys on the table. -func (d *database) PrimaryKeys(tableName string) ([]string, error) { - q := d.Select("pg_attribute.attname AS pkey"). - From("pg_index", "pg_class", "pg_attribute"). - Where(` - pg_class.oid = '` + quotedTableName(tableName) + `'::regclass - AND indrelid = pg_class.oid - AND pg_attribute.attrelid = pg_class.oid - AND pg_attribute.attnum = ANY(pg_index.indkey) - AND indisprimary - `).OrderBy("pkey") - - iter := q.Iterator() - defer iter.Close() - - pk := []string{} - - for iter.Next() { - var k string - if err := iter.Scan(&k); err != nil { - return nil, err - } - pk = append(pk, k) - } - if err := iter.Err(); err != nil { - return nil, err - } - - return pk, nil -} - -// WithContext creates a copy of the session on the given context. -func (d *database) WithContext(ctx context.Context) sqlbuilder.Database { - newDB, _ := d.clone(ctx, false) - return newDB -} diff --git a/vendor/upper.io/db.v3/postgresql/docker-compose.yml b/vendor/upper.io/db.v3/postgresql/docker-compose.yml deleted file mode 100644 index 4f4884a32e7..00000000000 --- a/vendor/upper.io/db.v3/postgresql/docker-compose.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: '3' - -services: - - server: - image: postgres:${POSTGRES_VERSION:-11} - environment: - POSTGRES_USER: ${DB_USERNAME:-upperio_user} - POSTGRES_PASSWORD: ${DB_PASSWORD:-upperio//s3cr37} - POSTGRES_DB: ${DB_NAME:-upperio} - ports: - - '${DB_HOST:-127.0.0.1}:${DB_PORT:-5432}:5432' - diff --git a/vendor/upper.io/db.v3/postgresql/postgresql.go b/vendor/upper.io/db.v3/postgresql/postgresql.go deleted file mode 100644 index 214412ec575..00000000000 --- a/vendor/upper.io/db.v3/postgresql/postgresql.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package postgresql // import "upper.io/db.v3/postgresql" - -import ( - "database/sql" - - db "upper.io/db.v3" - "upper.io/db.v3/internal/sqladapter" - "upper.io/db.v3/lib/sqlbuilder" -) - -// Adapter is the unique name that you can use to refer to this adapter. -const Adapter = `postgresql` - -func init() { - sqlbuilder.RegisterAdapter(Adapter, &sqlbuilder.AdapterFuncMap{ - New: New, - NewTx: NewTx, - Open: Open, - }) -} - -// Open opens a new connection with the PostgreSQL server. The returned session -// is validated first by Ping and then with a test query before being returned. -// You may call Open() just once and use it on multiple goroutines on a -// long-running program. See https://golang.org/pkg/database/sql/#Open and -// http://go-database-sql.org/accessing.html -func Open(settings db.ConnectionURL) (sqlbuilder.Database, error) { - d := newDatabase(settings) - if err := d.Open(settings); err != nil { - return nil, err - } - return d, nil -} - -// NewTx wraps a regular *sql.Tx transaction and returns a new upper-db -// transaction backed by it. -func NewTx(sqlTx *sql.Tx) (sqlbuilder.Tx, error) { - d := newDatabase(nil) - - // Binding with sqladapter's logic. - d.BaseDatabase = sqladapter.NewBaseDatabase(d) - - // Binding with sqlbuilder. - d.SQLBuilder = sqlbuilder.WithSession(d.BaseDatabase, template) - - if err := d.BaseDatabase.BindTx(d.Context(), sqlTx); err != nil { - return nil, err - } - - newTx := sqladapter.NewDatabaseTx(d) - return &tx{DatabaseTx: newTx}, nil -} - -// New wraps a regular *sql.DB session and creates a new upper-db session -// backed by it. -func New(sess *sql.DB) (sqlbuilder.Database, error) { - d := newDatabase(nil) - - // Binding with sqladapter's logic. - d.BaseDatabase = sqladapter.NewBaseDatabase(d) - - // Binding with sqlbuilder. - d.SQLBuilder = sqlbuilder.WithSession(d.BaseDatabase, template) - - if err := d.BaseDatabase.BindSession(sess); err != nil { - return nil, err - } - return d, nil -} diff --git a/vendor/upper.io/db.v3/postgresql/template.go b/vendor/upper.io/db.v3/postgresql/template.go deleted file mode 100644 index 55909f85ebc..00000000000 --- a/vendor/upper.io/db.v3/postgresql/template.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package postgresql - -import ( - db "upper.io/db.v3" - "upper.io/db.v3/internal/cache" - "upper.io/db.v3/internal/sqladapter/exql" -) - -const ( - adapterColumnSeparator = `.` - adapterIdentifierSeparator = `, ` - adapterIdentifierQuote = `"{{.Value}}"` - adapterValueSeparator = `, ` - adapterValueQuote = `'{{.}}'` - adapterAndKeyword = `AND` - adapterOrKeyword = `OR` - adapterDescKeyword = `DESC` - adapterAscKeyword = `ASC` - adapterAssignmentOperator = `=` - adapterClauseGroup = `({{.}})` - adapterClauseOperator = ` {{.}} ` - adapterColumnValue = `{{.Column}} {{.Operator}} {{.Value}}` - adapterTableAliasLayout = `{{.Name}}{{if .Alias}} AS {{.Alias}}{{end}}` - adapterColumnAliasLayout = `{{.Name}}{{if .Alias}} AS {{.Alias}}{{end}}` - adapterSortByColumnLayout = `{{.Column}} {{.Order}}` - - adapterOrderByLayout = ` - {{if .SortColumns}} - ORDER BY {{.SortColumns}} - {{end}} - ` - - adapterWhereLayout = ` - {{if .Conds}} - WHERE {{.Conds}} - {{end}} - ` - - adapterUsingLayout = ` - {{if .Columns}} - USING ({{.Columns}}) - {{end}} - ` - - adapterJoinLayout = ` - {{if .Table}} - {{ if .On }} - {{.Type}} JOIN {{.Table}} - {{.On}} - {{ else if .Using }} - {{.Type}} JOIN {{.Table}} - {{.Using}} - {{ else if .Type | eq "CROSS" }} - {{.Type}} JOIN {{.Table}} - {{else}} - NATURAL {{.Type}} JOIN {{.Table}} - {{end}} - {{end}} - ` - - adapterOnLayout = ` - {{if .Conds}} - ON {{.Conds}} - {{end}} - ` - - adapterSelectLayout = ` - SELECT - {{if .Distinct}} - DISTINCT - {{end}} - - {{if defined .Columns}} - {{.Columns | compile}} - {{else}} - * - {{end}} - - {{if defined .Table}} - FROM {{.Table | compile}} - {{end}} - - {{.Joins | compile}} - - {{.Where | compile}} - - {{if defined .GroupBy}} - {{.GroupBy | compile}} - {{end}} - - {{.OrderBy | compile}} - - {{if .Limit}} - LIMIT {{.Limit}} - {{end}} - - {{if .Offset}} - OFFSET {{.Offset}} - {{end}} - ` - adapterDeleteLayout = ` - DELETE - FROM {{.Table | compile}} - {{.Where | compile}} - ` - adapterUpdateLayout = ` - UPDATE - {{.Table | compile}} - SET {{.ColumnValues | compile}} - {{.Where | compile}} - ` - - adapterSelectCountLayout = ` - SELECT - COUNT(1) AS _t - FROM {{.Table | compile}} - {{.Where | compile}} - ` - - adapterInsertLayout = ` - INSERT INTO {{.Table | compile}} - {{if defined .Columns}}({{.Columns | compile}}){{end}} - VALUES - {{if defined .Values}} - {{.Values | compile}} - {{else}} - (default) - {{end}} - {{if defined .Returning}} - RETURNING {{.Returning | compile}} - {{end}} - ` - - adapterTruncateLayout = ` - TRUNCATE TABLE {{.Table | compile}} RESTART IDENTITY - ` - - adapterDropDatabaseLayout = ` - DROP DATABASE {{.Database | compile}} - ` - - adapterDropTableLayout = ` - DROP TABLE {{.Table | compile}} - ` - - adapterGroupByLayout = ` - {{if .GroupColumns}} - GROUP BY {{.GroupColumns}} - {{end}} - ` -) - -var template = &exql.Template{ - ColumnSeparator: adapterColumnSeparator, - IdentifierSeparator: adapterIdentifierSeparator, - IdentifierQuote: adapterIdentifierQuote, - ValueSeparator: adapterValueSeparator, - ValueQuote: adapterValueQuote, - AndKeyword: adapterAndKeyword, - OrKeyword: adapterOrKeyword, - DescKeyword: adapterDescKeyword, - AscKeyword: adapterAscKeyword, - AssignmentOperator: adapterAssignmentOperator, - ClauseGroup: adapterClauseGroup, - ClauseOperator: adapterClauseOperator, - ColumnValue: adapterColumnValue, - TableAliasLayout: adapterTableAliasLayout, - ColumnAliasLayout: adapterColumnAliasLayout, - SortByColumnLayout: adapterSortByColumnLayout, - WhereLayout: adapterWhereLayout, - JoinLayout: adapterJoinLayout, - OnLayout: adapterOnLayout, - UsingLayout: adapterUsingLayout, - OrderByLayout: adapterOrderByLayout, - InsertLayout: adapterInsertLayout, - SelectLayout: adapterSelectLayout, - UpdateLayout: adapterUpdateLayout, - DeleteLayout: adapterDeleteLayout, - TruncateLayout: adapterTruncateLayout, - DropDatabaseLayout: adapterDropDatabaseLayout, - DropTableLayout: adapterDropTableLayout, - CountLayout: adapterSelectCountLayout, - GroupByLayout: adapterGroupByLayout, - Cache: cache.NewCache(), - ComparisonOperator: map[db.ComparisonOperator]string{ - db.ComparisonOperatorRegExp: "~", - db.ComparisonOperatorNotRegExp: "!~", - }, -} diff --git a/vendor/upper.io/db.v3/postgresql/tx.go b/vendor/upper.io/db.v3/postgresql/tx.go deleted file mode 100644 index 408e9f82e10..00000000000 --- a/vendor/upper.io/db.v3/postgresql/tx.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package postgresql - -import ( - "context" - - "upper.io/db.v3/internal/sqladapter" - "upper.io/db.v3/lib/sqlbuilder" -) - -type tx struct { - sqladapter.DatabaseTx -} - -var ( - _ = sqlbuilder.Tx(&tx{}) -) - -func (t *tx) WithContext(ctx context.Context) sqlbuilder.Tx { - newTx := *t - newTx.DatabaseTx.SetContext(ctx) - return &newTx -} diff --git a/vendor/upper.io/db.v3/raw.go b/vendor/upper.io/db.v3/raw.go deleted file mode 100644 index 09aa0617594..00000000000 --- a/vendor/upper.io/db.v3/raw.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "fmt" -) - -// RawValue interface represents values that can bypass SQL filters. This is an -// exported interface but it's rarely used directly, you may want to use -// the `db.Raw()` function instead. -type RawValue interface { - fmt.Stringer - Compound - - // Raw returns the string representation of the value that the user wants to - // pass without any escaping. - Raw() string - - // Arguments returns the arguments to be replaced on the query. - Arguments() []interface{} -} - -type rawValue struct { - v string - a *[]interface{} // This may look ugly but allows us to use db.Raw() as keys for db.Cond{}. -} - -func (r rawValue) Arguments() []interface{} { - if r.a != nil { - return *r.a - } - return nil -} - -func (r rawValue) Raw() string { - return r.v -} - -func (r rawValue) String() string { - return r.Raw() -} - -// Sentences return each one of the map records as a compound. -func (r rawValue) Sentences() []Compound { - return []Compound{r} -} - -// Operator returns the default compound operator. -func (r rawValue) Operator() CompoundOperator { - return OperatorNone -} - -// Empty return false if this struct holds no value. -func (r rawValue) Empty() bool { - return r.v == "" -} - -// Raw marks chunks of data as protected, so they pass directly to the query -// without any filtering. Use with care. -// -// Example: -// -// // SOUNDEX('Hello') -// Raw("SOUNDEX('Hello')") -// -// Raw returns a value that satifies the db.RawValue interface. -func Raw(value string, args ...interface{}) RawValue { - r := rawValue{v: value, a: nil} - if len(args) > 0 { - r.a = &args - } - return r -} - -var _ = RawValue(&rawValue{}) diff --git a/vendor/upper.io/db.v3/result.go b/vendor/upper.io/db.v3/result.go deleted file mode 100644 index e72e7e923df..00000000000 --- a/vendor/upper.io/db.v3/result.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Result is an interface that defines methods which are useful for working -// with result sets. -type Result interface { - // String satisfies fmt.Stringer and returns a SELECT statement for - // the result. - String() string - - // Limit defines the maximum number of results for this set. It only has - // effect on `One()`, `All()` and `Next()`. A negative limit cancels any - // previous limit settings. - Limit(int) Result - - // Offset ignores the first *n* results. It only has effect on `One()`, - // `All()` and `Next()`. A negative offset cancels any previous offset - // settings. - Offset(int) Result - - // OrderBy receives one or more field names that define the order in which - // elements will be returned in a query, field names may be prefixed with a - // minus sign (-) indicating descending order, ascending order will be used - // otherwise. - OrderBy(...interface{}) Result - - // Select defines specific columns to be returned from the elements of the - // set. - Select(...interface{}) Result - - // Where discards all the previously set filtering constraints (if any) and - // sets new ones. Commonly used when the conditions of the result depend on - // external parameters that are yet to be evaluated: - // - // res := col.Find() - // - // if ... { - // res.Where(...) - // } else { - // res.Where(...) - // } - Where(...interface{}) Result - - // And adds more filtering conditions on top of the existing constraints. - // - // res := col.Find(...).And(...) - And(...interface{}) Result - - // Group is used to group results that have the same value in the same column - // or columns. - Group(...interface{}) Result - - // Delete deletes all items within the result set. `Offset()` and `Limit()` are - // not honoured by `Delete()`. - Delete() error - - // Update modifies all items within the result set. `Offset()` and `Limit()` - // are not honoured by `Update()`. - Update(interface{}) error - - // Count returns the number of items that match the set conditions. `Offset()` - // and `Limit()` are not honoured by `Count()` - Count() (uint64, error) - - // Exists returns true if at least one item on the collection exists. False - // otherwise. - Exists() (bool, error) - - // Next fetches the next result within the result set and dumps it into the - // given pointer to struct or pointer to map. You must call - // `Close()` after finishing using `Next()`. - Next(ptrToStruct interface{}) bool - - // Err returns the last error that has happened with the result set, nil - // otherwise. - Err() error - - // One fetches the first result within the result set and dumps it into the - // given pointer to struct or pointer to map. The result set is automatically - // closed after picking the element, so there is no need to call Close() - // after using One(). - One(ptrToStruct interface{}) error - - // All fetches all results within the result set and dumps them into the - // given pointer to slice of maps or structs. The result set is - // automatically closed, so there is no need to call Close() after - // using All(). - All(sliceOfStructs interface{}) error - - // Paginate splits the results of the query into pages containing pageSize - // items. When using pagination previous settings for Limit and Offset are - // ignored. Page numbering starts at 1. - // - // Use Page() to define the specific page to get results from. - // - // Example: - // - // r = q.Paginate(12) - // - // You can provide constraints an order settings when using pagination: - // - // Example: - // - // res := q.Where(conds).OrderBy("-id").Paginate(12) - // err := res.Page(4).All(&items) - Paginate(pageSize uint) Result - - // Page makes the result set return results only from the page identified by - // pageNumber. Page numbering starts from 0. - // - // Example: - // - // r = q.Paginate(12).Page(4) - Page(pageNumber uint) Result - - // Cursor defines the column that is going to be taken as basis for - // cursor-based pagination. - // - // Example: - // - // a = q.Paginate(10).Cursor("id") - // b = q.Paginate(12).Cursor("-id") - // - // You can set "" as cursorColumn to disable cursors. - Cursor(cursorColumn string) Result - - // NextPage returns the next results page according to the cursor. It expects - // a cursorValue, which is the value the cursor column had on the last item - // of the current result set (lower bound). - // - // Example: - // - // cursor = q.Paginate(12).Cursor("id") - // res = cursor.NextPage(items[len(items)-1].ID) - // - // Note that NextPage requires a cursor, any column with an absolute order - // (given two values one always precedes the other) can be a cursor. - // - // You can define the pagination order and add constraints to your result: - // - // cursor = q.Where(...).OrderBy("id").Paginate(10).Cursor("id") - // res = cursor.NextPage(lowerBound) - NextPage(cursorValue interface{}) Result - - // PrevPage returns the previous results page according to the cursor. It - // expects a cursorValue, which is the value the cursor column had on the - // fist item of the current result set (upper bound). - // - // Example: - // - // current = current.PrevPage(items[0].ID) - // - // Note that PrevPage requires a cursor, any column with an absolute order - // (given two values one always precedes the other) can be a cursor. - // - // You can define the pagination order and add constraints to your result: - // - // cursor = q.Where(...).OrderBy("id").Paginate(10).Cursor("id") - // res = cursor.PrevPage(upperBound) - PrevPage(cursorValue interface{}) Result - - // TotalPages returns the total number of pages the result could produce. If - // no pagination has been set this value equals 1. - TotalPages() (uint, error) - - // TotalEntries returns the total number of entries in the query. - TotalEntries() (uint64, error) - - // Close closes the result set and frees all locked resources. - Close() error -} diff --git a/vendor/upper.io/db.v3/settings.go b/vendor/upper.io/db.v3/settings.go deleted file mode 100644 index 73f653bab05..00000000000 --- a/vendor/upper.io/db.v3/settings.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "sync" - "sync/atomic" - "time" -) - -// Settings defines methods to get or set configuration values. -type Settings interface { - // SetLogging enables or disables logging. - SetLogging(bool) - // LoggingEnabled returns true if logging is enabled, false otherwise. - LoggingEnabled() bool - - // SetLogger defines which logger to use. - SetLogger(Logger) - // Returns the currently configured logger. - Logger() Logger - - // SetPreparedStatementCache enables or disables the prepared statement - // cache. - SetPreparedStatementCache(bool) - // PreparedStatementCacheEnabled returns true if the prepared statement cache - // is enabled, false otherwise. - PreparedStatementCacheEnabled() bool - - // SetConnMaxLifetime sets the default maximum amount of time a connection - // may be reused. - SetConnMaxLifetime(time.Duration) - - // ConnMaxLifetime returns the default maximum amount of time a connection - // may be reused. - ConnMaxLifetime() time.Duration - - // SetMaxIdleConns sets the default maximum number of connections in the idle - // connection pool. - SetMaxIdleConns(int) - - // MaxIdleConns returns the default maximum number of connections in the idle - // connection pool. - MaxIdleConns() int - - // SetMaxOpenConns sets the default maximum number of open connections to the - // database. - SetMaxOpenConns(int) - - // MaxOpenConns returns the default maximum number of open connections to the - // database. - MaxOpenConns() int -} - -type settings struct { - sync.RWMutex - - preparedStatementCacheEnabled uint32 - - connMaxLifetime time.Duration - maxOpenConns int - maxIdleConns int - - loggingEnabled uint32 - queryLogger Logger - queryLoggerMu sync.RWMutex - defaultLogger defaultLogger -} - -func (c *settings) Logger() Logger { - c.queryLoggerMu.RLock() - defer c.queryLoggerMu.RUnlock() - - if c.queryLogger == nil { - return &c.defaultLogger - } - - return c.queryLogger -} - -func (c *settings) SetLogger(lg Logger) { - c.queryLoggerMu.Lock() - defer c.queryLoggerMu.Unlock() - - c.queryLogger = lg -} - -func (c *settings) binaryOption(opt *uint32) bool { - return atomic.LoadUint32(opt) == 1 -} - -func (c *settings) setBinaryOption(opt *uint32, value bool) { - if value { - atomic.StoreUint32(opt, 1) - return - } - atomic.StoreUint32(opt, 0) -} - -func (c *settings) SetLogging(value bool) { - c.setBinaryOption(&c.loggingEnabled, value) -} - -func (c *settings) LoggingEnabled() bool { - return c.binaryOption(&c.loggingEnabled) -} - -func (c *settings) SetPreparedStatementCache(value bool) { - c.setBinaryOption(&c.preparedStatementCacheEnabled, value) -} - -func (c *settings) PreparedStatementCacheEnabled() bool { - return c.binaryOption(&c.preparedStatementCacheEnabled) -} - -func (c *settings) SetConnMaxLifetime(t time.Duration) { - c.Lock() - c.connMaxLifetime = t - c.Unlock() -} - -func (c *settings) ConnMaxLifetime() time.Duration { - c.RLock() - defer c.RUnlock() - return c.connMaxLifetime -} - -func (c *settings) SetMaxIdleConns(n int) { - c.Lock() - c.maxIdleConns = n - c.Unlock() -} - -func (c *settings) MaxIdleConns() int { - c.RLock() - defer c.RUnlock() - return c.maxIdleConns -} - -func (c *settings) SetMaxOpenConns(n int) { - c.Lock() - c.maxOpenConns = n - c.Unlock() -} - -func (c *settings) MaxOpenConns() int { - c.RLock() - defer c.RUnlock() - return c.maxOpenConns -} - -// NewSettings returns a new settings value prefilled with the current default -// settings. -func NewSettings() Settings { - def := DefaultSettings.(*settings) - return &settings{ - loggingEnabled: def.loggingEnabled, - preparedStatementCacheEnabled: def.preparedStatementCacheEnabled, - connMaxLifetime: def.connMaxLifetime, - maxIdleConns: def.maxIdleConns, - maxOpenConns: def.maxOpenConns, - } -} - -// DefaultSettings provides default global configuration settings for database -// sessions. -var DefaultSettings Settings = &settings{ - preparedStatementCacheEnabled: 0, - connMaxLifetime: time.Duration(0), - maxIdleConns: 10, - maxOpenConns: 0, -} diff --git a/vendor/upper.io/db.v3/tx.go b/vendor/upper.io/db.v3/tx.go deleted file mode 100644 index a179bad15f2..00000000000 --- a/vendor/upper.io/db.v3/tx.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Tx has methods for transactions that can be either committed or rolled back. -type Tx interface { - // Rollback discards all the instructions on the current transaction. - Rollback() error - - // Commit commits the current transaction. - Commit() error -} diff --git a/vendor/upper.io/db.v3/union.go b/vendor/upper.io/db.v3/union.go deleted file mode 100644 index 50b888aeb31..00000000000 --- a/vendor/upper.io/db.v3/union.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -// Union represents a compound joined by OR. -type Union struct { - *compound -} - -// Or adds more terms to the compound. -func (o *Union) Or(orConds ...Compound) *Union { - var fn func(*[]Compound) error - if len(orConds) > 0 { - fn = func(in *[]Compound) error { - *in = append(*in, orConds...) - return nil - } - } - return &Union{o.compound.frame(fn)} -} - -// Operator returns the OR operator. -func (o *Union) Operator() CompoundOperator { - return OperatorOr -} - -// Empty returns false if this struct holds no conditions. -func (o *Union) Empty() bool { - return o.compound.Empty() -} - -// Or joins conditions under logical disjunction. Conditions can be represented -// by db.Cond{}, db.Or() or db.And(). -// -// Example: -// -// // year = 2012 OR year = 1987 -// db.Or( -// db.Cond{"year": 2012}, -// db.Cond{"year": 1987}, -// ) -func Or(conds ...Compound) *Union { - return &Union{newCompound(defaultJoin(conds...)...)} -} diff --git a/vendor/upper.io/db.v3/wrapper.go b/vendor/upper.io/db.v3/wrapper.go deleted file mode 100644 index f5974ec7e14..00000000000 --- a/vendor/upper.io/db.v3/wrapper.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright (c) 2012-present The upper.io/db authors. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package db - -import ( - "fmt" - "sync" -) - -var ( - adapters map[string]*AdapterFuncMap - adaptersMu sync.RWMutex -) - -func init() { - adapters = make(map[string]*AdapterFuncMap) -} - -// AdapterFuncMap defines functions that need to be implemented by adapters. -type AdapterFuncMap struct { - Open func(settings ConnectionURL) (Database, error) -} - -// RegisterAdapter registers a generic Database adapter. This function must be -// called from adapter packages upon initialization. -func RegisterAdapter(name string, adapter *AdapterFuncMap) { - adaptersMu.Lock() - defer adaptersMu.Unlock() - - if name == "" { - panic(`Missing adapter name`) - } - if _, ok := adapters[name]; ok { - panic(`db.RegisterAdapter() called twice for adapter: ` + name) - } - adapters[name] = adapter -} - -func adapter(name string) AdapterFuncMap { - adaptersMu.RLock() - defer adaptersMu.RUnlock() - - if fn, ok := adapters[name]; ok { - return *fn - } - return missingAdapter(name) -} - -func missingAdapter(name string) AdapterFuncMap { - err := fmt.Errorf("upper: Missing adapter %q, forgot to import?", name) - return AdapterFuncMap{ - Open: func(ConnectionURL) (Database, error) { - return nil, err - }, - } -} - -// Open attempts to open a database. Returns a generic Database instance on -// success. -func Open(adapterName string, settings ConnectionURL) (Database, error) { - return adapter(adapterName).Open(settings) -} diff --git a/wire_gen.go b/wire_gen.go index d83a360fd09..0bc013a9cf0 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -137,6 +137,7 @@ import ( "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/clusterTerminalAccess" "github.com/devtron-labs/devtron/pkg/commonService" + "github.com/devtron-labs/devtron/pkg/configDiff" delete2 "github.com/devtron-labs/devtron/pkg/delete" "github.com/devtron-labs/devtron/pkg/deployment/common" "github.com/devtron-labs/devtron/pkg/deployment/deployedApp" @@ -942,6 +943,12 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } + deploymentConfigurationServiceImpl, err := configDiff.NewDeploymentConfigurationServiceImpl(sugaredLogger, configMapServiceImpl, appRepositoryImpl, environmentRepositoryImpl, chartServiceImpl, generateManifestDeploymentTemplateServiceImpl) + if err != nil { + return nil, err + } + deploymentConfigurationRestHandlerImpl := restHandler.NewDeploymentConfigurationRestHandlerImpl(sugaredLogger, userServiceImpl, enforcerUtilImpl, deploymentConfigurationServiceImpl, enforcerImpl) + deploymentConfigurationRouterImpl := router.NewDeploymentConfigurationRouter(deploymentConfigurationRestHandlerImpl) infraConfigRestHandlerImpl := infraConfig2.NewInfraConfigRestHandlerImpl(sugaredLogger, infraConfigServiceImpl, userServiceImpl, enforcerImpl, enforcerUtilImpl, validate) infraConfigRouterImpl := infraConfig2.NewInfraProfileRouterImpl(infraConfigRestHandlerImpl) argoApplicationRestHandlerImpl := argoApplication2.NewArgoApplicationRestHandlerImpl(argoApplicationServiceImpl, sugaredLogger, enforcerImpl) @@ -953,7 +960,7 @@ func InitializeApp() (*App, error) { devtronResourceRouterImpl := devtronResource2.NewDevtronResourceRouterImpl(historyRouterImpl) fluxApplicationRestHandlerImpl := fluxApplication2.NewFluxApplicationRestHandlerImpl(fluxApplicationServiceImpl, sugaredLogger, enforcerImpl) fluxApplicationRouterImpl := fluxApplication2.NewFluxApplicationRouterImpl(fluxApplicationRestHandlerImpl) - muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl, devtronResourceRouterImpl, fluxApplicationRouterImpl) + muxRouter := router.NewMuxRouter(sugaredLogger, environmentRouterImpl, clusterRouterImpl, webhookRouterImpl, userAuthRouterImpl, gitProviderRouterImpl, gitHostRouterImpl, dockerRegRouterImpl, notificationRouterImpl, teamRouterImpl, userRouterImpl, chartRefRouterImpl, configMapRouterImpl, appStoreRouterImpl, chartRepositoryRouterImpl, releaseMetricsRouterImpl, deploymentGroupRouterImpl, batchOperationRouterImpl, chartGroupRouterImpl, imageScanRouterImpl, policyRouterImpl, gitOpsConfigRouterImpl, dashboardRouterImpl, attributesRouterImpl, userAttributesRouterImpl, commonRouterImpl, grafanaRouterImpl, ssoLoginRouterImpl, telemetryRouterImpl, telemetryEventClientImplExtended, bulkUpdateRouterImpl, webhookListenerRouterImpl, appRouterImpl, coreAppRouterImpl, helmAppRouterImpl, k8sApplicationRouterImpl, pProfRouterImpl, deploymentConfigRouterImpl, dashboardTelemetryRouterImpl, commonDeploymentRouterImpl, externalLinkRouterImpl, globalPluginRouterImpl, moduleRouterImpl, serverRouterImpl, apiTokenRouterImpl, cdApplicationStatusUpdateHandlerImpl, k8sCapacityRouterImpl, webhookHelmRouterImpl, globalCMCSRouterImpl, userTerminalAccessRouterImpl, jobRouterImpl, ciStatusUpdateCronImpl, resourceGroupingRouterImpl, rbacRoleRouterImpl, scopedVariableRouterImpl, ciTriggerCronImpl, proxyRouterImpl, deploymentConfigurationRouterImpl, infraConfigRouterImpl, argoApplicationRouterImpl, devtronResourceRouterImpl, fluxApplicationRouterImpl) loggingMiddlewareImpl := util4.NewLoggingMiddlewareImpl(userServiceImpl) cdWorkflowServiceImpl := cd.NewCdWorkflowServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) cdWorkflowRunnerServiceImpl := cd.NewCdWorkflowRunnerServiceImpl(sugaredLogger, cdWorkflowRepositoryImpl) From 8a61bac1985e49af999f53652fbc6bbf3d78bd19 Mon Sep 17 00:00:00 2001 From: Bhushan Nemade Date: Wed, 28 Aug 2024 16:03:22 +0530 Subject: [PATCH 15/61] doc: Update prerequisites of code-scan (#5625) * Update prerequisites of code-scan * Hyperlinked the Vulnerability scanning doc --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> --- docs/user-guide/plugins/code-scan.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/user-guide/plugins/code-scan.md b/docs/user-guide/plugins/code-scan.md index 18eafedf922..cbefde15af9 100644 --- a/docs/user-guide/plugins/code-scan.md +++ b/docs/user-guide/plugins/code-scan.md @@ -4,7 +4,7 @@ The Code Scan plugin of Devtron allows you to perform the code scanning using Trivy. By integrating the **Code Scan** plugin into your workflow you can detect common Vulnerabilities, Misconfigurations, License Risks, and Exposed Secrets in your code. ### Prerequisites -No prerequisites are required for integrating **Code Scan** plugin. +Before integrating the **Code Scan** plugin, install the [Vulnerability Scanning (Trivy/Clair)](https://docs.devtron.ai/usage/integrations/clair) integration from Devtron Stack Manager. --- @@ -34,11 +34,11 @@ e.g., `Code Scanning` ### Description Add a brief explanation of the task and the reason for choosing the plugin. Include information for someone else to understand the purpose of the task. -e.g., `The Code Scan plugin is integrated for scanning the in-code vulnerablities.` +e.g., `The Code Scan plugin is integrated for scanning the in-code vulnerabilities.` ### Input Variables -No input variables are required for Code Scan plugin. +No input variables are required for the Code Scan plugin. ### Output Variables Code Scan will not be generating an output variable. From 6da544fd53791d8b97680d37f7aab752c1982082 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Wed, 28 Aug 2024 23:18:02 +0530 Subject: [PATCH 16/61] fix: ci patch rbac for branch update (#5759) --- .../configure/BuildPipelineRestHandler.go | 15 ++++----------- pkg/bean/app.go | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go index 47626841d9e..1ae6d4f33d1 100644 --- a/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/BuildPipelineRestHandler.go @@ -21,7 +21,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/devtron-labs/devtron/internal/sql/repository/appWorkflow" "golang.org/x/exp/maps" "io" "net/http" @@ -496,19 +495,13 @@ func (handler *PipelineConfigRestHandlerImpl) getCdPipelinesForCIPatchRbac(patch // find the workflow in which we are patching and use the workflow id to fetch all the workflow mappings using the workflow. // get cd pipeline ids from those workflows and fetch the cd pipelines. - // get the ciPipeline - switchFromPipelineId, switchFromType := patchRequest.SwitchSourceInfo() - - // in app workflow mapping all the build source types are 'CI_PIPELINE' type, except external -> WEBHOOK. - componentType := appWorkflow.CIPIPELINE - if switchFromType == CiPipeline.EXTERNAL { - componentType = appWorkflow.WEBHOOK - } + // get the ciPipeline patch source info + componentId, componentType := patchRequest.PatchSourceInfo() // the appWorkflowId can be taken from patchRequest.AppWorkflowId but doing this can make 2 sources of truth to find the workflow - sourceAppWorkflowMapping, err := handler.appWorkflowService.FindWFMappingByComponent(componentType, switchFromPipelineId) + sourceAppWorkflowMapping, err := handler.appWorkflowService.FindWFMappingByComponent(componentType, componentId) if err != nil { - handler.Logger.Errorw("error in finding the appWorkflowMapping using componentId and componentType", "componentType", componentType, "componentId", switchFromPipelineId, "err", err) + handler.Logger.Errorw("error in finding the appWorkflowMapping using componentId and componentType", "componentType", componentType, "componentId", componentId, "err", err) return nil, err } diff --git a/pkg/bean/app.go b/pkg/bean/app.go index bbab08513a4..625bf8eef9d 100644 --- a/pkg/bean/app.go +++ b/pkg/bean/app.go @@ -315,6 +315,24 @@ func (ciPatchRequest CiPatchRequest) SwitchSourceInfo() (int, CiPipeline2.Pipeli return switchFromPipelineId, switchFromType } +// PatchSourceInfo returns the CI component ID and component Type, which is being patched +func (ciPatchRequest CiPatchRequest) PatchSourceInfo() (int, string) { + // in app workflow mapping all the build source types are 'CI_PIPELINE' type, except external -> WEBHOOK. + componentType := appWorkflow.CIPIPELINE + var componentId int + // initialize componentId with ciPipeline id + if ciPatchRequest.CiPipeline != nil { + componentId = ciPatchRequest.CiPipeline.Id + } + if ciPatchRequest.SwitchFromExternalCiPipelineId != 0 { + componentType = appWorkflow.WEBHOOK + componentId = ciPatchRequest.SwitchFromExternalCiPipelineId + } else if ciPatchRequest.SwitchFromCiPipelineId != 0 { + componentId = ciPatchRequest.SwitchFromCiPipelineId + } + return componentId, componentType +} + func (ciPatchRequest CiPatchRequest) IsSwitchCiPipelineRequest() bool { return (ciPatchRequest.SwitchFromCiPipelineId != 0 || ciPatchRequest.SwitchFromExternalCiPipelineId != 0) } From 09946c20375df29e5dacdfcd5266856277bea065 Mon Sep 17 00:00:00 2001 From: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Date: Thu, 29 Aug 2024 12:54:36 +0530 Subject: [PATCH 17/61] feat: Added basic auth support for servicemonitor (#5761) * Added support for basic auth in servicemonitor * Added support for namespace selector and custom matchLabels * Fixed indentations --- .../templates/servicemonitor.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml index 9885733262e..7368288e0ca 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml @@ -38,6 +38,10 @@ spec: {{- if .servicemonitor.scrapeTimeout }} scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} {{- if .servicemonitor.metricRelabelings}} metricRelabelings: {{toYaml .servicemonitor.metricRelabelings | indent 8 }} @@ -46,7 +50,16 @@ spec: {{- end }} {{- end }} {{- end }} + {{- if .Values.servicemonitor.namespaceSelector }} + namespaceSelector: + matchNames: + {{- toYaml .Values.servicemonitor.namespaceSelector | nindent 6 }} + {{- end }} selector: matchLabels: + {{- if .Values.servicemonitor.matchLabels }} + {{- toYaml .Values.servicemonitor.matchLabels | nindent 6 }} + {{- else }} app: {{ template ".Chart.Name .name" $ }} + {{- end }} {{- end }} From 80f075854e67797d600f619fde22adf49f110b67 Mon Sep 17 00:00:00 2001 From: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> Date: Thu, 29 Aug 2024 13:50:50 +0530 Subject: [PATCH 18/61] fix: Bitnami chart repo tls issue (#5740) * bitnami_chart_fix * Rename 278_bitnami_chart_fix.down.sql to 282_bitnami_chart_fix.down.sql * Rename 278_bitnami_chart_fix.up.sql to 282_bitnami_chart_fix.up.sql --------- Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> --- scripts/sql/282_bitnami_chart_fix.down.sql | 1 + scripts/sql/282_bitnami_chart_fix.up.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 scripts/sql/282_bitnami_chart_fix.down.sql create mode 100644 scripts/sql/282_bitnami_chart_fix.up.sql diff --git a/scripts/sql/282_bitnami_chart_fix.down.sql b/scripts/sql/282_bitnami_chart_fix.down.sql new file mode 100644 index 00000000000..c1084856e71 --- /dev/null +++ b/scripts/sql/282_bitnami_chart_fix.down.sql @@ -0,0 +1 @@ +update chart_repo set allow_insecure_connection=true where name='bitnami'; diff --git a/scripts/sql/282_bitnami_chart_fix.up.sql b/scripts/sql/282_bitnami_chart_fix.up.sql new file mode 100644 index 00000000000..0729167a19c --- /dev/null +++ b/scripts/sql/282_bitnami_chart_fix.up.sql @@ -0,0 +1 @@ +update chart_repo set allow_insecure_connection=false where name='bitnami'; From 7ee4a32aa7f8d800bb1261e3b62aa61b7ba77faa Mon Sep 17 00:00:00 2001 From: Bhushan Nemade Date: Thu, 29 Aug 2024 18:13:53 +0530 Subject: [PATCH 19/61] doc: Cosign plugin doc (#5665) * doc for cosign plugin * edits in task name * updates in intro and other fixes. * Attached link to Cosign GitHub repo * Hyperlink fixes --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> --- docs/SUMMARY.md | 1 + docs/user-guide/plugins/cosign.md | 59 +++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 docs/user-guide/plugins/cosign.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 2a55ffcad3a..439c026259a 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -136,6 +136,7 @@ * [Code-Scan](user-guide/plugins/code-scan.md) * [Copacetic](user-guide/plugins/copacetic.md) * [Copy Container Image](user-guide/plugins/copy-container-image.md) + * [Cosign](user-guide/plugins/cosign.md) * [Dependency track - Maven & Gradle](user-guide/plugins/dependency-track-maven-gradle.md) * [Dependency track - NodeJS](user-guide/plugins/dependency-track-nodejs.md) * [Dependency track - Python](user-guide/plugins/dependency-track-python.md) diff --git a/docs/user-guide/plugins/cosign.md b/docs/user-guide/plugins/cosign.md new file mode 100644 index 00000000000..d91fbb2d3ee --- /dev/null +++ b/docs/user-guide/plugins/cosign.md @@ -0,0 +1,59 @@ +# Cosign + +## Introduction +The **Cosign** plugin by Devtron enables secure signing of your container images, enhancing supply chain security. It authenticates your identity as the creator and ensures image integrity, allowing users to verify the source and detect any tampering. This provides greater assurance to developers incorporating your artifacts into their workflows. + +### Prerequisites +Before integrating the Cosign plugin, ensure that you have configured the [Cosign](https://github.com/sigstore/cosign) and have a set of private and public keys to sign the container images. + +--- + +## Steps +1. Go to **Applications** → **Devtron Apps**. +2. Click your application. +3. Go to **App Configuration** → **Workflow Editor**. +4. Click **New Workflow** and navigate to the **Build and Deploy from Source Code**. +5. Fill the required fields in the **Create build pipeline** window and navigate to the **Post-build stage**. + +{% hint style="warning" %} +If you have already configured workflow, edit the build pipeline, and navigate to **Post-build stage**. +{% endhint %} + +6. Under 'TASKS', click the **+ Add task** button. +7. Click the **Cosign** plugin. +8. Enter the following [user inputs](#user-inputs) with appropriate values. +--- + +## User Inputs + +### Task Name +Enter the name of your task + +e.g., `Signing of container images` + +### Description +Add a brief explanation of the task and the reason for choosing the plugin. Include information for someone else to understand the purpose of the task. + +e.g., `The Cosign plugin is integrated for ensuring the authenticity of container images.` + +### Input Variables + +| Variable | Format | Description | Sample Value | +| ------------------------ | ------------ | ----------- | ------------ | +| PrivateKeyFilePath | STRING | Path of private key file in Git repo | cosign/cosign.key | +| PostCommand | STRING | Command to run after image is signed by Cosign | cosign verify $DOCKER_IMAGE | +| ExtraArguments | STRING | Arguments for Cosign command | --certificate-identity=name@example.com | +| CosignPassword | STRING | Password for Cosign private key | S3cur3P@ssw0rd123! | +| VariableAsPrivateKey | STRING | base64 encoded private-key | @{{COSIGN_PRIVATE_KEY}} | +| PreCommand | STRING | Command to get the required conditions to execute Cosign command | curl -sLJO https://raw.githubusercontent.com/devtron-labs/sampleRepo/branchName/private | + +### Trigger/Skip Condition +Here you can set conditions to execute or skip the task. You can select `Set trigger conditions` for the execution of a task or `Set skip conditions` to skip the task. + +### Output Variables +Cosign will not be generating an output variable. + +Click **Update Pipeline**. + + + From 99d10f54e73fbc799deed20af5f80e004490905d Mon Sep 17 00:00:00 2001 From: Prakash Date: Thu, 29 Aug 2024 18:57:07 +0530 Subject: [PATCH 20/61] fix: check rbac on env if envName is present (#5765) * admin check fix in config draft * minor fix --- api/restHandler/DeploymentConfigurationRestHandler.go | 8 +++++--- pkg/configDiff/DeploymentConfigurationService.go | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/api/restHandler/DeploymentConfigurationRestHandler.go b/api/restHandler/DeploymentConfigurationRestHandler.go index a29776a6b65..144838da010 100644 --- a/api/restHandler/DeploymentConfigurationRestHandler.go +++ b/api/restHandler/DeploymentConfigurationRestHandler.go @@ -115,9 +115,11 @@ func (handler *DeploymentConfigurationRestHandlerImpl) enforceForAppAndEnv(appNa return false } - object = handler.enforcerUtil.GetEnvRBACNameByAppAndEnvName(appName, envName) - if ok := handler.enforcer.Enforce(token, casbin.ResourceEnvironment, action, object); !ok { - return false + if len(envName) > 0 { + object = handler.enforcerUtil.GetEnvRBACNameByAppAndEnvName(appName, envName) + if ok := handler.enforcer.Enforce(token, casbin.ResourceEnvironment, action, object); !ok { + return false + } } return true } diff --git a/pkg/configDiff/DeploymentConfigurationService.go b/pkg/configDiff/DeploymentConfigurationService.go index 360de7f8b35..f64de5cd2f7 100644 --- a/pkg/configDiff/DeploymentConfigurationService.go +++ b/pkg/configDiff/DeploymentConfigurationService.go @@ -63,6 +63,7 @@ func (impl *DeploymentConfigurationServiceImpl) ConfigAutoComplete(appId int, en if _, ok := cmcsKeyPropertyEnvLevelMap[key]; !ok { if envId > 0 { configProperty.ConfigStage = bean2.Inheriting + configProperty.Id = 0 } } From f1a50b17abd444f8a4aafa06b62285f8ed2b9470 Mon Sep 17 00:00:00 2001 From: Bhushan Nemade Date: Fri, 30 Aug 2024 10:14:09 +0530 Subject: [PATCH 21/61] doc: CraneCopy plugin doc (#5658) * doc for * edits in task name * spelling correction * Updated password --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> --- docs/SUMMARY.md | 1 + docs/user-guide/plugins/crane-copy.md | 58 +++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 docs/user-guide/plugins/crane-copy.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 439c026259a..aa964658864 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -137,6 +137,7 @@ * [Copacetic](user-guide/plugins/copacetic.md) * [Copy Container Image](user-guide/plugins/copy-container-image.md) * [Cosign](user-guide/plugins/cosign.md) + * [CraneCopy](user-guide/plugins/crane-copy.md) * [Dependency track - Maven & Gradle](user-guide/plugins/dependency-track-maven-gradle.md) * [Dependency track - NodeJS](user-guide/plugins/dependency-track-nodejs.md) * [Dependency track - Python](user-guide/plugins/dependency-track-python.md) diff --git a/docs/user-guide/plugins/crane-copy.md b/docs/user-guide/plugins/crane-copy.md new file mode 100644 index 00000000000..7b7de1e0f29 --- /dev/null +++ b/docs/user-guide/plugins/crane-copy.md @@ -0,0 +1,58 @@ +# CraneCopy + +## Introduction +The **CraneCopy** plugin by Devtron facilitates the transfer of multi-architecture container images between registries. When integrated into Devtron's Post-build stage, this plugin allows you to efficiently copy and store your container images to a specified target repository. + +### Prerequisites +No prerequisites are required for integrating the **CraneCopy** plugin. + +--- + +## Steps +1. Go to **Applications** → **Devtron Apps**. +2. Click your application. +3. Go to **App Configuration** → **Workflow Editor**. +4. Click **New Workflow** and navigate to the **Build and Deploy from Source Code**. +5. Fill the required fields in the **Create build pipeline** window and navigate to the **Post-build stage**. + +{% hint style="warning" %} +If you have already configured workflow, edit the build pipeline, and navigate to **Post-build stage**. +{% endhint %} + +6. Under 'TASKS', click the **+ Add task** button. +7. Click the **CraneCopy** plugin. +8. Enter the following [user inputs](#user-inputs) with appropriate values. + +--- + +## User Inputs + +### Task Name +Enter the name of your task + +e.g., `Copy and store container images` + +### Description +Add a brief explanation of the task and the reason for choosing the plugin. Include information for someone else to understand the purpose of the task. + +e.g., `The CraneCopy plugin is integrated to copy the container images from one registry to another.` + +### Input Variables + +| Variable | Format | Description | Sample Value | +| ------------------------ | ------------ | ----------- | ------------ | +| RegistryUsername | STRING | Username of target registry for authentication | admin | +| RegistryPassword | STRING | Password for the target registry for authentication | Tr5$mH7p | +| TargetRegistry | STRING | The target registry to push to image | docker.io/dockertest | + + +### Trigger/Skip Condition +Here you can set conditions to execute or skip the task. You can select `Set trigger conditions` for the execution of a task or `Set skip conditions` to skip the task. + +### Output Variables +CraneCopy will not be generating an output variable. + +Click **Update Pipeline**. + + + From 3ef2b96cb8454135654b05276f948c8e17564651 Mon Sep 17 00:00:00 2001 From: Bhushan Nemade Date: Fri, 30 Aug 2024 10:16:12 +0530 Subject: [PATCH 22/61] doc: Devtron CD Trigger Plugin doc (#5747) * devtron-cd-trigger plugin doc * minor update * Proofreading done * Update devtron-cd-trigger.md * Removed unwanted phrase * Changed wording * Changed plurality * Updated devtron token --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> --- docs/SUMMARY.md | 1 + docs/user-guide/plugins/devtron-cd-trigger.md | 61 +++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 docs/user-guide/plugins/devtron-cd-trigger.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index aa964658864..5e859908c4d 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -141,6 +141,7 @@ * [Dependency track - Maven & Gradle](user-guide/plugins/dependency-track-maven-gradle.md) * [Dependency track - NodeJS](user-guide/plugins/dependency-track-nodejs.md) * [Dependency track - Python](user-guide/plugins/dependency-track-python.md) + * [Devtron CD Trigger](user-guide/plugins/devtron-cd-trigger.md) * [GoLang-migrate](user-guide/plugins/golang-migrate.md) * [Jenkins](user-guide/plugins/jenkins.md) * [K6 Load Testing](user-guide/plugins/k6-load-testing.md) diff --git a/docs/user-guide/plugins/devtron-cd-trigger.md b/docs/user-guide/plugins/devtron-cd-trigger.md new file mode 100644 index 00000000000..8f7cbfcd7d3 --- /dev/null +++ b/docs/user-guide/plugins/devtron-cd-trigger.md @@ -0,0 +1,61 @@ +# Devtron-CD-Trigger + +## Introduction +The **Devtron CD Trigger** plugin allows you to trigger the PRE-CD, CD, or POST-CD stages of target Devtron App from within your current application workflow. This plugin offers flexibility in managing application dependencies and deployment sequences. For example, by incorporating this plugin at the pre-deployment stage of your application workflow, you can deploy another application that contains dependencies required by your current application, ensuring a coordinated deployment process. + +### Prerequisites +Before integrating the Devtron CD Trigger plugin, you need to properly configure the target Devtron App to ensure smooth execution. + +--- + +## Steps +1. Go to **Applications** → **Devtron Apps**. +2. Click your application. +3. Go to **App Configuration** → **Workflow Editor**. +4. Click **New Workflow** and navigate to the **Build and Deploy from Source Code**. +5. Fill the required fields in the **Create build pipeline** window and navigate to the **Create deployment pipeline**. +6. Fill the required fields in the **Deployment Stage** window and navigate to the **Post-Deployment stage**. + +{% hint style="warning" %} +If you have already configured workflow, edit the deployment pipeline, and navigate to **Post-Deployment stage**. +{% endhint %} + +6. Under 'TASKS', click the **+ Add task** button. +7. Select the **Devtron CD Trigger** plugin. +8. Enter the following [user inputs](#user-inputs) with appropriate values. +--- + +## User Inputs + +### Task Name +Enter the name of your task + +e.g., `Triggers CD Pipeline` + +### Description +Add a brief explanation of the task and the reason for choosing the plugin. Include information for someone else to understand the purpose of the task. + +e.g., `The Devtron CD Trigger plugin is integrated for triggering the CD stage of another application.` + +### Input Variables + +| Variable | Format | Description | Sample Value | +| ------------------------ | ------------ | ----------- | ------------ | +| DevtronApiToken | STRING | Enter target Devtron API token. | abc123DEFxyz456token789 | +| DevtronEndpoint | STRING | Enter the target URL of Devtron. | https://devtron.example.com | +| DevtronApp | STRING | Enter the target Devtron Application name/ID | plugin-demo | +| DevtronEnv | STRING | Enter the target Environment name/ID. Required if JobPipeline is not given | preview | +| StatusTimeoutSeconds | STRING | Enter the maximum time (in seconds) a user can wait for the application to deploy. Enter a positive integer value | 120 | +| GitCommitHash | STRING | Enter the git hash from which user wants to deploy its application. By default it takes latest Artifact ID to deploy the application | cf19e4fd348589kjhsdjn092nfse01d2234235sdsg | +| TargetTriggerStage | STRING | Enter the Trigger Stage PRE/DEPLOY/POST. Default value is `Deploy`. | PRE | + +### Trigger/Skip Condition +Here you can set conditions to execute or skip the task. You can select `Set trigger conditions` for the execution of a task or `Set skip conditions` to skip the task. + +### Output Variables +Devtron CD Trigger will not be generating an output variable. + +Click **Update Pipeline**. + + + From 3888a41290463d5d321aa1cf3179c91e1f4a6423 Mon Sep 17 00:00:00 2001 From: Bhushan Nemade Date: Fri, 30 Aug 2024 13:35:58 +0530 Subject: [PATCH 23/61] doc: DockerSlim plugin doc (#5660) * doc for DockerSlim plugin * Updated Docker-Slim to DockerSlim * Minor fixes * url update * Fixes in url --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> --- docs/SUMMARY.md | 3 +- docs/user-guide/plugins/docker-slim.md | 63 ++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 docs/user-guide/plugins/docker-slim.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 5e859908c4d..a34c39f78be 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -142,6 +142,7 @@ * [Dependency track - NodeJS](user-guide/plugins/dependency-track-nodejs.md) * [Dependency track - Python](user-guide/plugins/dependency-track-python.md) * [Devtron CD Trigger](user-guide/plugins/devtron-cd-trigger.md) + * [DockerSlim](user-guide/plugins/docker-slim.md) * [GoLang-migrate](user-guide/plugins/golang-migrate.md) * [Jenkins](user-guide/plugins/jenkins.md) * [K6 Load Testing](user-guide/plugins/k6-load-testing.md) @@ -163,4 +164,4 @@ * [Pull Helm Charts from OCI Registry](user-guide/use-cases/oci-pull.md) * [Telemetry Overview](user-guide/telemetry.md) * [Devtron on Graviton](reference/graviton.md) -* [Release Notes](https://github.com/devtron-labs/devtron/releases) \ No newline at end of file +* [Release Notes](https://github.com/devtron-labs/devtron/releases) diff --git a/docs/user-guide/plugins/docker-slim.md b/docs/user-guide/plugins/docker-slim.md new file mode 100644 index 00000000000..897cae9d14e --- /dev/null +++ b/docs/user-guide/plugins/docker-slim.md @@ -0,0 +1,63 @@ +# DockerSlim + +## Introduction +The **DockerSlim** plugin by Devtron helps you to optimize your container deployments by reducing Docker image size. Now with these lighter Docker images, you can perform faster deployments and enhance overall system efficiency. + +{% hint style="warning" %} +Support for Docker buildx images will be added soon. +{% endhint %} + +### Prerequisites +No prerequisites are required for integrating the **DockerSlim** plugin. + +--- + +## Steps +1. Go to **Applications** → **Devtron Apps**. +2. Click your application. +3. Go to **App Configuration** → **Workflow Editor**. +4. Click **New Workflow** and navigate to the **Build and Deploy from Source Code**. +5. Fill the required fields in the **Create build pipeline** window and navigate to the **Post-build stage**. + +{% hint style="warning" %} +If you have already configured workflow, edit the build pipeline, and navigate to **Post-build stage**. +{% endhint %} + +6. Under 'TASKS', click the **+ Add task** button. +7. Click the **DockerSlim** plugin. +8. Enter the following [user inputs](#user-inputs) with appropriate values. +--- + +## User Inputs + +### Task Name +Enter the name of your task + +e.g., `Reduce Docker image size` + +### Description +Add a brief explanation of the task and the reason for choosing the plugin. Include information for someone else to understand the purpose of the task. + +e.g., `The DockerSlim plugin is integrated for reducing the size of Docker image.` + +### Input Variables + +{% hint style="warning" %} +At `IncludePathFile` input variable list down the file path of essential files from your Dockerfile. Files for which the path is not listed at `IncludePathFile` will may be excluded from the Docker image to reduce size. +{% endhint %} + +| Variable | Format | Description | Sample Value | +| ------------------------ | ------------ | ----------- | ------------ | +| HTTPProbe | BOOL | Indicates whether the port is exposed in Dockerfile or not | false | +| IncludePathFile | STRING | File path of required files | /etc/nginx/include.conf | + +### Trigger/Skip Condition +Here you can set conditions to execute or skip the task. You can select `Set trigger conditions` for the execution of a task or `Set skip conditions` to skip the task. + +### Output Variables +DockerSlim will not be generating an output variable. + +Click **Update Pipeline**. + + + From a625e7e05ca4a73e36f067e36d0e47c2872e7b20 Mon Sep 17 00:00:00 2001 From: Bhushan Nemade Date: Fri, 30 Aug 2024 17:48:54 +0530 Subject: [PATCH 24/61] doc: Devtron Job Trigger Plugin doc (#5742) * devtron-job-trigger plugin doc * summary updated * Updated input variable description * token value updated --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> --- docs/SUMMARY.md | 1 + .../user-guide/plugins/devtron-job-trigger.md | 61 +++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 docs/user-guide/plugins/devtron-job-trigger.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index a34c39f78be..e4ad4067a05 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -142,6 +142,7 @@ * [Dependency track - NodeJS](user-guide/plugins/dependency-track-nodejs.md) * [Dependency track - Python](user-guide/plugins/dependency-track-python.md) * [Devtron CD Trigger](user-guide/plugins/devtron-cd-trigger.md) + * [Devtron Job Trigger](user-guide/plugins/devtron-job-trigger.md) * [DockerSlim](user-guide/plugins/docker-slim.md) * [GoLang-migrate](user-guide/plugins/golang-migrate.md) * [Jenkins](user-guide/plugins/jenkins.md) diff --git a/docs/user-guide/plugins/devtron-job-trigger.md b/docs/user-guide/plugins/devtron-job-trigger.md new file mode 100644 index 00000000000..19b38b77035 --- /dev/null +++ b/docs/user-guide/plugins/devtron-job-trigger.md @@ -0,0 +1,61 @@ +# Devtron-Job-Trigger + +## Introduction +The **Devtron Job Trigger** plugin enables you to trigger Devtron Jobs from your current application workflow. For example, by integrating this plugin at the pre-deployment stage of your application workflow, you can trigger jobs designed to run migration scripts in your database. This ensures that necessary migrations are executed before your application is deployed. + +### Prerequisites +Before integrating the Devtron Job Trigger plugin, you need to properly configure the target Devtron Job to ensure smooth execution. + +--- + +## Steps +1. Go to **Applications** → **Devtron Apps**. +2. Click your application. +3. Go to **App Configuration** → **Workflow Editor**. +4. Click **New Workflow** and navigate to the **Build and Deploy from Source Code**. +5. Fill the required fields in the **Create build pipeline** window and navigate to the **Create deployment pipeline**. +6. Fill the required fields in the **Deployment Stage** window and navigate to the **Pre-Deployment stage**. + +{% hint style="warning" %} +If you have already configured workflow, edit the deployment pipeline, and navigate to **Pre-Deployment stage**. +{% endhint %} + +6. Under 'TASKS', click the **+ Add task** button. +7. Select the **Devtron Job Trigger** plugin. +8. Enter the following [user inputs](#user-inputs) with appropriate values. +--- + +## User Inputs + +### Task Name +Enter the name of your task + +e.g., `Triggers Devtron Job ` + +### Description +Add a brief explanation of the task and the reason for choosing the plugin. Include information for someone else to understand the purpose of the task. + +e.g., `The Devtron Job Trigger plugin is integrated for triggering the Devtron Job.` + +### Input Variables + +| Variable | Format | Description | Sample Value | +| ------------------------ | ------------ | ----------- | ------------ | +| DevtronApiToken | STRING | Enter Devtron API token with required permissions. | abc123def456token789 | +| DevtronEndpoint | STRING | Enter the URL of Devtron dashboard. | https://devtron.example.com | +| DevtronJob | STRING | Enter the name or ID of Devtron Job to be triggered | plugin-test-job | +| DevtronEnv | STRING | Enter the name or ID of the Environment where the job is to be triggered. If JobPipeline is given, ignore this field and do not assign any value | prod | +| JobPipeline | STRING | Enter the name or ID of the Job pipeline to be triggered. If DevtronEnv is given, ignore this field and do not assign any value | hello-world | +| GitCommitHash | STRING | Enter the commit hash from which the job is to be triggered. If not given then, will pick the latest | cf19e4fd348589kjhsdjn092nfse01d2234235sdsg | +| StatusTimeoutSeconds | NUMBER | Enter the maximum time to wait for the job status | 120 | + +### Trigger/Skip Condition +Here you can set conditions to execute or skip the task. You can select `Set trigger conditions` for the execution of a task or `Set skip conditions` to skip the task. + +### Output Variables +Devtron Job Trigger will not be generating an output variable. + +Click **Update Pipeline**. + + + From ff89a26f9368a0e1059c7b3ce1edaa75d11645c9 Mon Sep 17 00:00:00 2001 From: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Date: Fri, 30 Aug 2024 18:52:23 +0530 Subject: [PATCH 25/61] fix: scan tool active check removed (#5771) * scan tool active check removed * query fix --- scripts/sql/281_update_scan_tool_metadata.down.sql | 4 +--- scripts/sql/281_update_scan_tool_metadata.up.sql | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/scripts/sql/281_update_scan_tool_metadata.down.sql b/scripts/sql/281_update_scan_tool_metadata.down.sql index e3afba4ef14..26095248829 100644 --- a/scripts/sql/281_update_scan_tool_metadata.down.sql +++ b/scripts/sql/281_update_scan_tool_metadata.down.sql @@ -11,9 +11,7 @@ SET image_scan_descriptor_template = '[ ]', updated_on = 'now()' WHERE name = 'TRIVY' AND version = 'V1' - AND scan_target = 'IMAGE' - AND active = true - AND deleted = false; + AND scan_target = 'IMAGE'; ALTER TABLE image_scan_execution_result DROP COLUMN class, diff --git a/scripts/sql/281_update_scan_tool_metadata.up.sql b/scripts/sql/281_update_scan_tool_metadata.up.sql index 4d771950995..b9480274400 100644 --- a/scripts/sql/281_update_scan_tool_metadata.up.sql +++ b/scripts/sql/281_update_scan_tool_metadata.up.sql @@ -19,9 +19,7 @@ UPDATE scan_tool_metadata SET result_descriptor_template = '[ WHERE name = 'TRIVY' AND version = 'V1' - AND scan_target = 'IMAGE' - AND active = true - AND deleted = false; + AND scan_target = 'IMAGE'; ALTER TABLE image_scan_execution_result ADD COLUMN class TEXT, From 5170040eafc6826272ccbf5111d73553287b8bd8 Mon Sep 17 00:00:00 2001 From: Prakash Date: Fri, 30 Aug 2024 19:10:20 +0530 Subject: [PATCH 26/61] feat: Docker pull env driven (#5767) * useDockerApiToGetDigest menv driven flag to control pulling image either using docker pull or docker API * UseAppDockerConfigForPrivateRegistries in workflow request * revert * revert --- env_gen.md | 1 + pkg/pipeline/CiService.go | 1 + pkg/pipeline/types/CiCdConfig.go | 1 + pkg/pipeline/types/Workflow.go | 1 + 4 files changed, 4 insertions(+) diff --git a/env_gen.md b/env_gen.md index 187544e5ce1..d6c935a3031 100644 --- a/env_gen.md +++ b/env_gen.md @@ -259,6 +259,7 @@ | USE_CASBIN_V2 | false | | | USE_CUSTOM_HTTP_TRANSPORT | false | | | USE_DEPLOYMENT_CONFIG_DATA | false | | + | USE_DOCKER_API_TO_GET_DIGEST | false | | | USE_EXTERNAL_NODE | false | | | USE_GIT_CLI | false | | | USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD | false | | diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index d16dd55199d..c9607825858 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -743,6 +743,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. PluginArtifactStage: pluginArtifactStage, ImageScanMaxRetries: impl.config.ImageScanMaxRetries, ImageScanRetryDelay: impl.config.ImageScanRetryDelay, + UseDockerApiToGetDigest: impl.config.UseDockerApiToGetDigest, } if pipeline.App.AppType == helper.Job { workflowRequest.AppName = pipeline.App.DisplayName diff --git a/pkg/pipeline/types/CiCdConfig.go b/pkg/pipeline/types/CiCdConfig.go index 50e7d272712..df0f6ebaa50 100644 --- a/pkg/pipeline/types/CiCdConfig.go +++ b/pkg/pipeline/types/CiCdConfig.go @@ -141,6 +141,7 @@ type CiCdConfig struct { ExtBlobStorageSecretName string `env:"EXTERNAL_BLOB_STORAGE_SECRET_NAME" envDefault:"blob-storage-secret"` UseArtifactListingQueryV2 bool `env:"USE_ARTIFACT_LISTING_QUERY_V2" envDefault:"true"` UseImageTagFromGitProviderForTagBasedBuild bool `env:"USE_IMAGE_TAG_FROM_GIT_PROVIDER_FOR_TAG_BASED_BUILD" envDefault:"false"` // this is being done for https://github.com/devtron-labs/devtron/issues/4263 + UseDockerApiToGetDigest bool `env:"USE_DOCKER_API_TO_GET_DIGEST" envDefault:"false"` } type CiConfig struct { diff --git a/pkg/pipeline/types/Workflow.go b/pkg/pipeline/types/Workflow.go index af15733e1c9..cbecb3cd62e 100644 --- a/pkg/pipeline/types/Workflow.go +++ b/pkg/pipeline/types/Workflow.go @@ -144,6 +144,7 @@ type WorkflowRequest struct { Scope resourceQualifiers.Scope BuildxCacheModeMin bool `json:"buildxCacheModeMin"` AsyncBuildxCacheExport bool `json:"asyncBuildxCacheExport"` + UseDockerApiToGetDigest bool `json:"useDockerApiToGetDigest"` } func (workflowRequest *WorkflowRequest) updateExternalRunMetadata() { From c66ccf5750b61ed4c7cb7ae4acedf3cebf7fb40f Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 30 Aug 2024 19:28:16 +0530 Subject: [PATCH 27/61] fix: panic handlings and argocd app delete stuck in partial stage (#5770) * fix: panic handlings * fix: false positive matrics on gitOps failures * fix: for GetConfigForHelmApps err: pg no row --- .../repository/InstalledAppRepository.go | 2 +- .../service/AppStoreDeploymentService.go | 2 +- pkg/deployment/gitOps/git/GitServiceGithub.go | 31 +++++++++++++++---- pkg/pipeline/CdHandler.go | 10 ++++-- pkg/pipeline/CiHandler.go | 10 ++++-- pkg/pipeline/CiLogService.go | 7 +++-- util/helper.go | 8 +++++ 7 files changed, 54 insertions(+), 16 deletions(-) diff --git a/pkg/appStore/installedApp/repository/InstalledAppRepository.go b/pkg/appStore/installedApp/repository/InstalledAppRepository.go index bdf7d047516..da873dd50bc 100644 --- a/pkg/appStore/installedApp/repository/InstalledAppRepository.go +++ b/pkg/appStore/installedApp/repository/InstalledAppRepository.go @@ -714,7 +714,7 @@ func (impl InstalledAppRepositoryImpl) GetDeploymentSuccessfulStatusCountForTele func (impl InstalledAppRepositoryImpl) GetGitOpsInstalledAppsWhereArgoAppDeletedIsTrue(installedAppId int, envId int) (InstalledApps, error) { var installedApps InstalledApps err := impl.dbConnection.Model(&installedApps). - Column("installed_apps.*", "App.app_name", "Environment.namespace", "Environment.cluster_id", "Environment.environment_name"). + Column("installed_apps.*", "App.id", "App.app_name", "Environment.namespace", "Environment.cluster_id", "Environment.environment_name"). Where("deployment_app_delete_request = ?", true). Where("installed_apps.active = ?", true). Where("installed_apps.id = ?", installedAppId). diff --git a/pkg/appStore/installedApp/service/AppStoreDeploymentService.go b/pkg/appStore/installedApp/service/AppStoreDeploymentService.go index 62764056075..72bf9555dba 100644 --- a/pkg/appStore/installedApp/service/AppStoreDeploymentService.go +++ b/pkg/appStore/installedApp/service/AppStoreDeploymentService.go @@ -877,7 +877,7 @@ func (impl *AppStoreDeploymentServiceImpl) MarkGitOpsInstalledAppsDeletedIfArgoA apiError.InternalMessage = "error in fetching partially deleted argoCd apps from installed app repo" return apiError } - deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(installedAppId, envId) + deploymentConfig, err := impl.deploymentConfigService.GetConfigForHelmApps(installedApp.App.Id, envId) if err != nil { impl.logger.Errorw("error in getting deployment config by appId and envId", "appId", installedAppId, "envId", envId, "err", err) apiError.HttpStatusCode = http.StatusInternalServerError diff --git a/pkg/deployment/gitOps/git/GitServiceGithub.go b/pkg/deployment/gitOps/git/GitServiceGithub.go index b48d8b5ab43..06e78162f36 100644 --- a/pkg/deployment/gitOps/git/GitServiceGithub.go +++ b/pkg/deployment/gitOps/git/GitServiceGithub.go @@ -19,7 +19,9 @@ package git import ( "context" "crypto/tls" + "errors" "fmt" + "github.com/devtron-labs/common-lib/utils/runTime" bean2 "github.com/devtron-labs/devtron/api/bean/gitOps" globalUtil "github.com/devtron-labs/devtron/util" "github.com/devtron-labs/devtron/util/retryFunc" @@ -91,6 +93,15 @@ func (impl GitHubClient) DeleteRepository(config *bean2.GitOpsConfigDto) error { return nil } +func IsRepoNotFound(err error) bool { + if err == nil { + return false + } + var responseErr *github.ErrorResponse + ok := errors.As(err, &responseErr) + return ok && responseErr.Response.StatusCode == 404 +} + func (impl GitHubClient) CreateRepository(ctx context.Context, config *bean2.GitOpsConfigDto) (url string, isNew bool, detailedErrorGitOpsConfigActions DetailedErrorGitOpsConfigActions) { var err error start := time.Now() @@ -100,15 +111,14 @@ func (impl GitHubClient) CreateRepository(ctx context.Context, config *bean2.Git detailedErrorGitOpsConfigActions.StageErrorMap = make(map[string]error) repoExists := true - url, err = impl.GetRepoUrl(config) + url, err = impl.getRepoUrl(ctx, config, IsRepoNotFound) if err != nil { - responseErr, ok := err.(*github.ErrorResponse) - if !ok || responseErr.Response.StatusCode != 404 { + if IsRepoNotFound(err) { + repoExists = false + } else { impl.logger.Errorw("error in creating github repo", "err", err) detailedErrorGitOpsConfigActions.StageErrorMap[GetRepoUrlStage] = err return "", false, detailedErrorGitOpsConfigActions - } else { - repoExists = false } } if repoExists { @@ -251,12 +261,21 @@ func (impl GitHubClient) CommitValues(ctx context.Context, config *ChartConfig, } func (impl GitHubClient) GetRepoUrl(config *bean2.GitOpsConfigDto) (repoUrl string, err error) { + ctx := context.Background() + return impl.getRepoUrl(ctx, config, globalUtil.AllPublishableError()) +} + +func (impl GitHubClient) getRepoUrl(ctx context.Context, config *bean2.GitOpsConfigDto, + isNonPublishableError globalUtil.EvalIsNonPublishableErr) (repoUrl string, err error) { start := time.Now() defer func() { + if isNonPublishableError(err) { + impl.logger.Debugw("found non publishable error. skipping metrics publish!", "caller method", runTime.GetCallerFunctionName(), "err", err) + return + } globalUtil.TriggerGitOpsMetrics("GetRepoUrl", "GitHubClient", start, err) }() - ctx := context.Background() repo, _, err := impl.client.Repositories.Get(ctx, impl.org, config.GitRepoName) if err != nil { impl.logger.Errorw("error in getting repo url by repo name", "org", impl.org, "gitRepoName", config.GitRepoName, "err", err) diff --git a/pkg/pipeline/CdHandler.go b/pkg/pipeline/CdHandler.go index 791c32da739..72da885a3dd 100644 --- a/pkg/pipeline/CdHandler.go +++ b/pkg/pipeline/CdHandler.go @@ -475,11 +475,15 @@ func (impl *CdHandlerImpl) getWorkflowLogs(pipelineId int, cdWorkflow *pipelineC if !cdWorkflow.BlobStorageEnabled { return nil, nil, errors.New("logs-not-stored-in-repository") } else if string(v1alpha1.NodeSucceeded) == cdWorkflow.Status || string(v1alpha1.NodeError) == cdWorkflow.Status || string(v1alpha1.NodeFailed) == cdWorkflow.Status || cdWorkflow.Status == executors.WorkflowCancel { - impl.Logger.Debugw("pod is not live ", "err", err) + impl.Logger.Debugw("pod is not live", "podName", cdWorkflow.PodName, "err", err) return impl.getLogsFromRepository(pipelineId, cdWorkflow, clusterConfig, runStageInEnv) } - impl.Logger.Errorw("err on fetch workflow logs", "err", err) - return nil, nil, err + if err != nil { + impl.Logger.Errorw("err on fetch workflow logs", "err", err) + return nil, nil, err + } else if logStream == nil { + return nil, cleanUp, fmt.Errorf("no logs found for pod %s", cdWorkflow.PodName) + } } logReader := bufio.NewReader(logStream) return logReader, cleanUp, err diff --git a/pkg/pipeline/CiHandler.go b/pkg/pipeline/CiHandler.go index c9be42897e0..722de6abb1e 100644 --- a/pkg/pipeline/CiHandler.go +++ b/pkg/pipeline/CiHandler.go @@ -798,11 +798,15 @@ func (impl *CiHandlerImpl) getWorkflowLogs(pipelineId int, ciWorkflow *pipelineC if !ciWorkflow.BlobStorageEnabled { return nil, nil, &util.ApiError{Code: "200", HttpStatusCode: 400, UserMessage: "logs-not-stored-in-repository"} } else if string(v1alpha1.NodeSucceeded) == ciWorkflow.Status || string(v1alpha1.NodeError) == ciWorkflow.Status || string(v1alpha1.NodeFailed) == ciWorkflow.Status || ciWorkflow.Status == executors.WorkflowCancel { - impl.Logger.Errorw("err", "err", err) + impl.Logger.Debugw("pod is not live", "podName", ciWorkflow.PodName, "err", err) return impl.getLogsFromRepository(pipelineId, ciWorkflow, clusterConfig, isExt) } - impl.Logger.Errorw("err", "err", err) - return nil, nil, &util.ApiError{Code: "200", HttpStatusCode: 400, UserMessage: err.Error()} + if err != nil { + impl.Logger.Errorw("err on fetch workflow logs", "err", err) + return nil, nil, &util.ApiError{Code: "200", HttpStatusCode: 400, UserMessage: err.Error()} + } else if logStream == nil { + return nil, cleanUp, fmt.Errorf("no logs found for pod %s", ciWorkflow.PodName) + } } logReader := bufio.NewReader(logStream) return logReader, cleanUp, err diff --git a/pkg/pipeline/CiLogService.go b/pkg/pipeline/CiLogService.go index cb0a500bda3..7d0d7302703 100644 --- a/pkg/pipeline/CiLogService.go +++ b/pkg/pipeline/CiLogService.go @@ -68,9 +68,12 @@ func (impl *CiLogServiceImpl) FetchRunningWorkflowLogs(ciLogRequest types.BuildL } req := impl.k8sUtil.GetLogsForAPod(kubeClient, ciLogRequest.Namespace, ciLogRequest.PodName, CiPipeline.Main, true) podLogs, err := req.Stream(context.Background()) - if podLogs == nil || err != nil { - impl.logger.Errorw("error in opening stream", "name", ciLogRequest.PodName) + if err != nil { + impl.logger.Errorw("error in opening stream", "name", ciLogRequest.PodName, "err", err) return nil, nil, err + } else if podLogs == nil { + impl.logger.Warnw("no stream reader found", "name", ciLogRequest.PodName) + return nil, func() error { return nil }, err } cleanUpFunc := func() error { impl.logger.Info("closing running pod log stream") diff --git a/util/helper.go b/util/helper.go index 1e979a2d42c..bd158ae7884 100644 --- a/util/helper.go +++ b/util/helper.go @@ -278,6 +278,14 @@ func TriggerGitOpsMetrics(operation string, method string, startTime time.Time, middleware.GitOpsDuration.WithLabelValues(operation, method, status).Observe(time.Since(startTime).Seconds()) } +type EvalIsNonPublishableErr func(err error) bool + +func AllPublishableError() EvalIsNonPublishableErr { + return func(err error) bool { + return false + } +} + func InterfaceToString(resp interface{}) string { var dat string b, err := json.Marshal(resp) From 4296366ae288f3a67f87e547d2b946acbcd2dd65 Mon Sep 17 00:00:00 2001 From: Prakash Date: Mon, 2 Sep 2024 13:20:21 +0530 Subject: [PATCH 28/61] feat: plugin creation support (#5630) * wip: new plugin creation api and min plugin api with only shared plugin list * wip: create new plugin version code * wip:plugin type SHARED by default * wip:find plugin either by identifier or by id while creating a new version of existing plugin * wip: create new plugin tag logic improved * wip: optimize GetAllFilteredPluginParentMetadata query * wip: create plugin tag new flow * wip: minor fix * wip: minor fix * wip: minor fix * wip: newTagsPresent -> areNewTagsPresent * wip: icon is not mandatory code incorporated * wip:minor refactoring * wip: prevent duplicate version from being created and save tags relation only when * wip: minor fix * wip: details api, get all plugin data or non * wip: code review incorp part -1 * wip: code review incorp part -2 * wip: code review incorp part -3 * wip: remove code duplication * wip: hardcode isExposed to true * wip: hardcode StepType= inline * wip: set default VariableStepIndex= 1 --- api/restHandler/GlobalPluginRestHandler.go | 67 +++ api/router/GlobalPluginRouter.go | 5 +- internal/util/ErrorUtil.go | 9 + pkg/plugin/GlobalPluginService.go | 412 +++++++++++++++--- pkg/plugin/adaptor/adaptor.go | 80 ++++ pkg/plugin/bean/bean.go | 84 ++-- .../repository/GlobalPluginRepository.go | 178 +++++++- pkg/plugin/utils/utils.go | 25 ++ 8 files changed, 757 insertions(+), 103 deletions(-) create mode 100644 pkg/plugin/adaptor/adaptor.go diff --git a/api/restHandler/GlobalPluginRestHandler.go b/api/restHandler/GlobalPluginRestHandler.go index 7fc25ec6b16..44a2305c0ab 100644 --- a/api/restHandler/GlobalPluginRestHandler.go +++ b/api/restHandler/GlobalPluginRestHandler.go @@ -35,6 +35,7 @@ import ( type GlobalPluginRestHandler interface { PatchPlugin(w http.ResponseWriter, r *http.Request) + CreatePlugin(w http.ResponseWriter, r *http.Request) GetAllGlobalVariables(w http.ResponseWriter, r *http.Request) ListAllPlugins(w http.ResponseWriter, r *http.Request) @@ -46,6 +47,7 @@ type GlobalPluginRestHandler interface { GetPluginDetailByIds(w http.ResponseWriter, r *http.Request) GetAllUniqueTags(w http.ResponseWriter, r *http.Request) MigratePluginData(w http.ResponseWriter, r *http.Request) + GetAllPluginMinData(w http.ResponseWriter, r *http.Request) } func NewGlobalPluginRestHandler(logger *zap.SugaredLogger, globalPluginService plugin.GlobalPluginService, @@ -420,3 +422,68 @@ func (handler *GlobalPluginRestHandlerImpl) MigratePluginData(w http.ResponseWri } common.WriteJsonResp(w, nil, nil, http.StatusOK) } + +func (handler *GlobalPluginRestHandlerImpl) CreatePlugin(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + token := r.Header.Get("token") + appId, err := common.ExtractIntQueryParam(w, r, "appId", 0) + if err != nil { + return + } + ok, err := handler.IsUserAuthorized(token, appId) + if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + if !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) + return + } + decoder := json.NewDecoder(r.Body) + var pluginDataDto bean.PluginParentMetadataDto + err = decoder.Decode(&pluginDataDto) + if err != nil { + handler.logger.Errorw("request err, CreatePlugin", "error", err, "payload", pluginDataDto) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + handler.logger.Infow("request payload received for creating plugins", pluginDataDto, "userId", userId) + + pluginVersionId, err := handler.globalPluginService.CreatePluginOrVersions(&pluginDataDto, userId) + if err != nil { + handler.logger.Errorw("service error, error in creating plugin", "pluginCreateRequestDto", pluginDataDto, "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, bean.NewPluginMinDto().WithPluginVersionId(pluginVersionId), http.StatusOK) +} + +func (handler *GlobalPluginRestHandlerImpl) GetAllPluginMinData(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("token") + appId, err := common.ExtractIntQueryParam(w, r, "appId", 0) + if err != nil { + return + } + ok, err := handler.IsUserAuthorized(token, appId) + if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + if !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) + return + } + + pluginDetail, err := handler.globalPluginService.GetAllPluginMinData() + if err != nil { + handler.logger.Errorw("error in getting all unique tags", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, nil, pluginDetail, http.StatusOK) +} diff --git a/api/router/GlobalPluginRouter.go b/api/router/GlobalPluginRouter.go index 06950c33421..0d8154bf822 100644 --- a/api/router/GlobalPluginRouter.go +++ b/api/router/GlobalPluginRouter.go @@ -41,7 +41,8 @@ type GlobalPluginRouterImpl struct { func (impl *GlobalPluginRouterImpl) initGlobalPluginRouter(globalPluginRouter *mux.Router) { globalPluginRouter.Path("/migrate"). HandlerFunc(impl.globalPluginRestHandler.MigratePluginData).Methods("PUT") - + globalPluginRouter.Path("/create"). + HandlerFunc(impl.globalPluginRestHandler.CreatePlugin).Methods("POST") // versioning impact handling to be done for below apis, globalPluginRouter.Path(""). HandlerFunc(impl.globalPluginRestHandler.PatchPlugin).Methods("POST") @@ -68,5 +69,7 @@ func (impl *GlobalPluginRouterImpl) initGlobalPluginRouter(globalPluginRouter *m globalPluginRouter.Path("/list/tags"). HandlerFunc(impl.globalPluginRestHandler.GetAllUniqueTags).Methods("GET") + globalPluginRouter.Path("/list/v2/min"). + HandlerFunc(impl.globalPluginRestHandler.GetAllPluginMinData).Methods("GET") } diff --git a/internal/util/ErrorUtil.go b/internal/util/ErrorUtil.go index 59a8d234415..43f3c9b942e 100644 --- a/internal/util/ErrorUtil.go +++ b/internal/util/ErrorUtil.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "net/http" + "strconv" ) type ApiError struct { @@ -36,6 +37,14 @@ type ApiError struct { UserDetailMessage string `json:"userDetailMessage,omitempty"` } +func GetApiError(code int, userMessage, internalMessage string) *ApiError { + return &ApiError{ + HttpStatusCode: code, + Code: strconv.Itoa(code), + InternalMessage: internalMessage, + UserMessage: userMessage, + } +} func NewApiError() *ApiError { return &ApiError{} } diff --git a/pkg/plugin/GlobalPluginService.go b/pkg/plugin/GlobalPluginService.go index 55ff942789b..5423ab22b2d 100644 --- a/pkg/plugin/GlobalPluginService.go +++ b/pkg/plugin/GlobalPluginService.go @@ -24,6 +24,7 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/auth/user/bean" repository2 "github.com/devtron-labs/devtron/pkg/pipeline/repository" + "github.com/devtron-labs/devtron/pkg/plugin/adaptor" bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" helper2 "github.com/devtron-labs/devtron/pkg/plugin/helper" "github.com/devtron-labs/devtron/pkg/plugin/repository" @@ -31,8 +32,8 @@ import ( "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "go.uber.org/zap" + "golang.org/x/mod/semver" "net/http" - "strconv" "strings" "time" ) @@ -73,9 +74,11 @@ type GlobalPluginService interface { GetDetailedPluginInfoByPluginId(pluginId int) (*bean2.PluginMetadataDto, error) GetAllDetailedPluginInfo() ([]*bean2.PluginMetadataDto, error) + CreatePluginOrVersions(pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) ListAllPluginsV2(filter *bean2.PluginsListFilter) (*bean2.PluginsDto, error) GetPluginDetailV2(pluginVersionIds, parentPluginIds []int, fetchAllVersionDetails bool) (*bean2.PluginsDto, error) GetAllUniqueTags() (*bean2.PluginTagsDto, error) + GetAllPluginMinData() ([]*bean2.PluginMinDto, error) MigratePluginData() error } @@ -429,7 +432,7 @@ func (impl *GlobalPluginServiceImpl) validatePluginRequest(pluginReq *bean2.Plug return errors.New("invalid plugin type, should be of the type PRESET or SHARED") } - plugins, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() + plugins, err := impl.globalPluginRepository.GetAllPluginMinData() if err != nil { impl.logger.Errorw("error in getting all plugins", "err", err) return err @@ -670,33 +673,10 @@ func (impl *GlobalPluginServiceImpl) UpdatePluginPipelineScript(dbPluginPipeline func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int, pluginStepsReq []*bean2.PluginStepsDto, userId int32, tx *pg.Tx) error { for _, pluginStep := range pluginStepsReq { - pluginStepData := &repository.PluginStep{ - PluginId: pluginMetadataId, - Name: pluginStep.Name, - Description: pluginStep.Description, - Index: pluginStep.Index, - StepType: pluginStep.StepType, - RefPluginId: pluginStep.RefPluginId, - OutputDirectoryPath: pluginStep.OutputDirectoryPath, - DependentOnStep: pluginStep.DependentOnStep, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginStepData := adaptor.GetPluginStepDbObject(pluginStep, pluginMetadataId, userId) //get the script saved for this plugin step if pluginStep.PluginPipelineScript != nil { - pluginPipelineScript := &repository.PluginPipelineScript{ - Script: pluginStep.PluginPipelineScript.Script, - StoreScriptAt: pluginStep.PluginPipelineScript.StoreScriptAt, - Type: pluginStep.PluginPipelineScript.Type, - DockerfileExists: pluginStep.PluginPipelineScript.DockerfileExists, - MountPath: pluginStep.PluginPipelineScript.MountPath, - MountCodeToContainer: pluginStep.PluginPipelineScript.MountCodeToContainer, - MountCodeToContainerPath: pluginStep.PluginPipelineScript.MountCodeToContainerPath, - MountDirectoryFromHost: pluginStep.PluginPipelineScript.MountDirectoryFromHost, - ContainerImagePath: pluginStep.PluginPipelineScript.ContainerImagePath, - ImagePullSecretType: pluginStep.PluginPipelineScript.ImagePullSecretType, - ImagePullSecret: pluginStep.PluginPipelineScript.ImagePullSecret, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginPipelineScript := adaptor.GetPluginPipelineScriptDbObject(pluginStep.PluginPipelineScript, userId) pluginPipelineScript, err := impl.globalPluginRepository.SavePluginPipelineScript(pluginPipelineScript, tx) if err != nil { impl.logger.Errorw("error in saving plugin pipeline script", "pluginPipelineScript", pluginPipelineScript, "err", err) @@ -719,23 +699,7 @@ func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int pluginStep.Id = pluginStepData.Id //create entry in plugin_step_variable for _, pluginStepVariable := range pluginStep.PluginStepVariable { - pluginStepVariableData := &repository.PluginStepVariable{ - PluginStepId: pluginStepData.Id, - Name: pluginStepVariable.Name, - Format: pluginStepVariable.Format, - Description: pluginStepVariable.Description, - IsExposed: pluginStepVariable.IsExposed, - AllowEmptyValue: pluginStepVariable.AllowEmptyValue, - DefaultValue: pluginStepVariable.DefaultValue, - Value: pluginStepVariable.Value, - VariableType: pluginStepVariable.VariableType, - ValueType: pluginStepVariable.ValueType, - PreviousStepIndex: pluginStepVariable.PreviousStepIndex, - VariableStepIndex: pluginStepVariable.VariableStepIndex, - VariableStepIndexInPlugin: pluginStepVariable.VariableStepIndexInPlugin, - ReferenceVariableName: pluginStepVariable.ReferenceVariableName, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginStepVariableData := adaptor.GetPluginStepVariableDbObject(pluginStepData.Id, pluginStepVariable, userId) pluginStepVariableData, err = impl.globalPluginRepository.SavePluginStepVariables(pluginStepVariableData, tx) if err != nil { impl.logger.Errorw("error in saving plugin step variable", "pluginStepVariableData", pluginStepVariableData, "err", err) @@ -744,14 +708,7 @@ func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int pluginStepVariable.Id = pluginStepVariableData.Id //create entry in plugin_step_condition for _, pluginStepCondition := range pluginStepVariable.PluginStepCondition { - pluginStepConditionData := &repository.PluginStepCondition{ - PluginStepId: pluginStepData.Id, - ConditionVariableId: pluginStepVariableData.Id, - ConditionType: pluginStepCondition.ConditionType, - ConditionalOperator: pluginStepCondition.ConditionalOperator, - ConditionalValue: pluginStepCondition.ConditionalValue, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginStepConditionData := adaptor.GetPluginStepConditionDbObject(pluginStepData.Id, pluginStepVariableData.Id, pluginStepCondition, userId) pluginStepConditionData, err = impl.globalPluginRepository.SavePluginStepConditions(pluginStepConditionData, tx) if err != nil { impl.logger.Errorw("error in saving plugin step condition", "pluginStepConditionData", pluginStepConditionData, "err", err) @@ -768,7 +725,6 @@ func (impl *GlobalPluginServiceImpl) updatePlugin(pluginUpdateReq *bean2.PluginM if len(pluginUpdateReq.Type) == 0 { return nil, errors.New("invalid plugin type, should be of the type PRESET or SHARED") } - dbConnection := impl.globalPluginRepository.GetConnection() tx, err := dbConnection.Begin() if err != nil { @@ -856,6 +812,7 @@ func (impl *GlobalPluginServiceImpl) updatePlugin(pluginUpdateReq *bean2.PluginM return nil, err } } + if len(pluginStepsToUpdate) > 0 { err = impl.updateDeepPluginStepData(pluginStepsToUpdate, pluginStepVariables, pluginStepConditions, pluginSteps, userId, tx) if err != nil { @@ -1386,7 +1343,6 @@ func filterPluginStepData(existingPluginStepsInDb []*repository.PluginStep, plug } else { return nil, nil, pluginStepUpdateReq } - return newPluginStepsToCreate, pluginStepsToRemove, pluginStepsToUpdate } @@ -1805,28 +1761,59 @@ func (impl *GlobalPluginServiceImpl) ListAllPluginsV2(filter *bean2.PluginsListF return pluginDetails, nil } +func (impl *GlobalPluginServiceImpl) validateDetailRequest(pluginVersions []*repository.PluginMetadata, pluginVersionIds, parentPluginIds []int) error { + pluginVersionsIdMap, pluginParentIdMap := make(map[int]bool, len(pluginVersionIds)), make(map[int]bool, len(parentPluginIds)) + allPlugins, err := impl.globalPluginRepository.GetAllPluginMinData() + if err != nil { + impl.logger.Errorw("validateDetailRequest, error in getting all plugins parent metadata", "err", err) + return err + } + for _, pluginVersion := range pluginVersions { + pluginVersionsIdMap[pluginVersion.Id] = true + } + for _, plugin := range allPlugins { + pluginParentIdMap[plugin.Id] = true + } + for _, versionId := range pluginVersionIds { + if _, ok := pluginVersionsIdMap[versionId]; !ok { + errorMsg := fmt.Sprintf("there are some plugin version ids in request that do not exist:- %d", versionId) + return util.GetApiError(http.StatusBadRequest, errorMsg, errorMsg) + } + } + for _, pluginId := range parentPluginIds { + if _, ok := pluginParentIdMap[pluginId]; !ok { + errorMsg := fmt.Sprintf("there are some plugin parent ids in request that do not exist %d", pluginId) + return util.GetApiError(http.StatusBadRequest, errorMsg, errorMsg) + } + } + return nil +} // GetPluginDetailV2 returns all details of the of a plugin version according to the pluginVersionIds and parentPluginIds // provided by user, and minimal data for all versions of that plugin. func (impl *GlobalPluginServiceImpl) GetPluginDetailV2(pluginVersionIds, parentPluginIds []int, fetchAllVersionDetails bool) (*bean2.PluginsDto, error) { + var err error + pluginVersionsMetadata, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() + if err != nil { + impl.logger.Errorw("GetPluginDetailV2, error in getting all plugins versions metadata", "err", err) + return nil, err + } + err = impl.validateDetailRequest(pluginVersionsMetadata, pluginVersionIds, parentPluginIds) + if err != nil { + return nil, err + } pluginParentMetadataDtos := make([]*bean2.PluginParentMetadataDto, 0, len(pluginVersionIds)+len(parentPluginIds)) if len(pluginVersionIds) == 0 && len(parentPluginIds) == 0 { - return nil, &util.ApiError{HttpStatusCode: http.StatusBadRequest, Code: strconv.Itoa(http.StatusBadRequest), InternalMessage: bean2.NoPluginOrParentIdProvidedErr, UserMessage: bean2.NoPluginOrParentIdProvidedErr} + return nil, util.GetApiError(http.StatusBadRequest, bean2.NoPluginOrParentIdProvidedErr, bean2.NoPluginOrParentIdProvidedErr) } pluginVersionIdsMap, parentPluginIdsMap := helper2.GetPluginVersionAndParentPluginIdsMap(pluginVersionIds, parentPluginIds) - var err error pluginParentMetadataIds := make([]int, 0, len(pluginVersionIds)+len(parentPluginIds)) pluginVersionsIdToInclude := make(map[int]bool, len(pluginVersionIds)+len(parentPluginIds)) - pluginVersionsMetadata, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() - if err != nil { - impl.logger.Errorw("GetPluginDetailV2, error in getting all plugins versions metadata", "err", err) - return nil, err - } filteredPluginVersionMetadata := helper2.GetPluginVersionsMetadataByVersionAndParentPluginIds(pluginVersionsMetadata, pluginVersionIdsMap, parentPluginIdsMap) if len(filteredPluginVersionMetadata) == 0 { - return nil, &util.ApiError{HttpStatusCode: http.StatusNotFound, Code: strconv.Itoa(http.StatusNotFound), InternalMessage: bean2.NoPluginFoundForThisSearchQueryErr, UserMessage: bean2.NoPluginFoundForThisSearchQueryErr} + return nil, util.GetApiError(http.StatusNotFound, bean2.NoPluginFoundForThisSearchQueryErr, bean2.NoPluginFoundForThisSearchQueryErr) } for _, version := range filteredPluginVersionMetadata { _, found := pluginVersionIdsMap[version.Id] @@ -1884,7 +1871,6 @@ func (impl *GlobalPluginServiceImpl) MigratePluginData() error { // MigratePluginDataToParentPluginMetadata migrates pre-existing plugin metadata from plugin_metadata table into plugin_parent_metadata table, // and also populate plugin_parent_metadata_id in plugin_metadata. -// this operation will happen only once when the get all plugin list v2 api is being called, returns error if any func (impl *GlobalPluginServiceImpl) MigratePluginDataToParentPluginMetadata(pluginsMetadata []*repository.PluginMetadata) error { dbConnection := impl.globalPluginRepository.GetConnection() tx, err := dbConnection.Begin() @@ -1948,3 +1934,303 @@ func (impl *GlobalPluginServiceImpl) MigratePluginDataToParentPluginMetadata(plu } return nil } + +func (impl *GlobalPluginServiceImpl) GetAllPluginMinData() ([]*bean2.PluginMinDto, error) { + pluginsParentMinData, err := impl.globalPluginRepository.GetAllPluginMinData() + if err != nil { + impl.logger.Errorw("GetAllPluginMinData, error in getting all plugin parent metadata min data", "err", err) + return nil, err + } + pluginMinList := make([]*bean2.PluginMinDto, 0, len(pluginsParentMinData)) + for _, item := range pluginsParentMinData { + //since creating new version of preset plugin is disabled for end user, hence ignoring PRESET plugin in min list + if item.Type == repository.PLUGIN_TYPE_PRESET { + continue + } + pluginMinList = append(pluginMinList, bean2.NewPluginMinDto().WithParentPluginId(item.Id).WithPluginName(item.Name).WithIcon(item.Icon)) + } + return pluginMinList, nil +} + +func (impl *GlobalPluginServiceImpl) checkValidationOnPluginNameAndIdentifier(pluginReq *bean2.PluginParentMetadataDto) error { + plugins, err := impl.globalPluginRepository.GetAllPluginMinData() + if err != nil { + impl.logger.Errorw("error in getting all plugins", "err", err) + return err + } + for _, plugin := range plugins { + if plugin.Identifier == pluginReq.PluginIdentifier { + return util.GetApiError(http.StatusConflict, bean2.PluginWithSameIdentifierExistsError, bean2.PluginWithSameIdentifierExistsError) + } + if plugin.Name == pluginReq.Name { + return util.GetApiError(http.StatusConflict, bean2.PluginWithSameNameExistError, bean2.PluginWithSameNameExistError) + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) checkValidationOnVersion(pluginReq *bean2.PluginParentMetadataDto) error { + pluginVersions, err := impl.globalPluginRepository.GetPluginVersionsByParentId(pluginReq.Id) + if err != nil { + impl.logger.Errorw("checkValidationOnVersion, error in getting all plugins versions by parentPluginId", "parentPluginId", pluginReq.Id, "err", err) + return err + } + for _, pluginVersion := range pluginVersions { + if pluginReq.Versions != nil && len(pluginReq.Versions.DetailedPluginVersionData) > 0 && pluginReq.Versions.DetailedPluginVersionData[0] != nil { + // if plugin version from req is already created then return error + if pluginVersion.PluginVersion == pluginReq.Versions.DetailedPluginVersionData[0].Version { + return util.GetApiError(http.StatusBadRequest, bean2.PluginVersionAlreadyExistError, bean2.PluginVersionAlreadyExistError) + } + } + + } + return nil +} + +func (impl *GlobalPluginServiceImpl) validateV2PluginRequest(pluginReq *bean2.PluginParentMetadataDto) error { + if pluginReq.Versions == nil || len(pluginReq.Versions.DetailedPluginVersionData) == 0 || pluginReq.Versions.DetailedPluginVersionData[0] == nil { + return util.GetApiError(http.StatusBadRequest, bean2.NoStepDataToProceedError, bean2.NoStepDataToProceedError) + } + if pluginReq.Id == 0 { + //create plugin req. + err := impl.checkValidationOnPluginNameAndIdentifier(pluginReq) + if err != nil { + impl.logger.Errorw("error in checkValidationOnPluginNameAndIdentifier", "err", err) + return err + } + } else { + err := impl.checkValidationOnVersion(pluginReq) + if err != nil { + impl.logger.Errorw("error in checkValidationOnPluginNameAndIdentifier", "err", err) + return err + } + } + version := pluginReq.Versions.DetailedPluginVersionData[0].Version + if !strings.Contains(version, "v") { + version = fmt.Sprintf("v%s", version) + } + // semantic versioning validation on plugin's version + if !semver.IsValid(version) { + return util.GetApiError(http.StatusBadRequest, bean2.PluginVersionNotSemanticallyCorrectError, bean2.PluginVersionNotSemanticallyCorrectError) + } + //validate icon url and size + if len(pluginReq.Icon) > 0 { + err := utils.FetchIconAndCheckSize(pluginReq.Icon, bean2.PluginIconMaxSizeInBytes) + if err != nil { + errMsg := fmt.Sprintf("%s err:= %s", bean2.PluginIconNotCorrectOrReachableError, err.Error()) + return util.GetApiError(http.StatusBadRequest, errMsg, errMsg) + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) createPluginTagAndRelations(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + if pluginReq.AreNewTagsPresent { + err := impl.CreateNewPluginTagsAndRelationsIfRequiredV2(pluginReq, userId, tx) + if err != nil { + impl.logger.Errorw("createPluginTagAndRelations, error in CreateNewPluginTagsAndRelationsIfRequired", "tags", pluginReq.Tags, "err", err) + return err + } + } else if len(pluginReq.Tags) > 0 { + err := impl.CreatePluginTagRelations(pluginReq, userId, tx) + if err != nil { + impl.logger.Errorw("createPluginTagAndRelations, error in CreatePluginTagRelations", "tags", pluginReq.Tags, "err", err) + return err + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) CreatePluginTagRelations(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + tags, err := impl.globalPluginRepository.GetPluginTagByNames(pluginReq.Tags) + if err != nil { + impl.logger.Errorw("CreatePluginTagRelations, error in GetPluginTagByNames", "tags", pluginReq.Tags, "err", err) + return err + } + newPluginTagRelationsToCreate := make([]*repository.PluginTagRelation, 0, len(pluginReq.Tags)) + for _, tag := range tags { + newPluginTagRelationsToCreate = append(newPluginTagRelationsToCreate, repository.NewPluginTagRelation().CreateAuditLog(userId).WithTagAndPluginId(tag.Id, pluginReq.Id)) + } + + if len(newPluginTagRelationsToCreate) > 0 { + err = impl.globalPluginRepository.SavePluginTagRelationInBulk(newPluginTagRelationsToCreate, tx) + if err != nil { + impl.logger.Errorw("CreatePluginTagRelations, error in saving plugin tag relation in bulk", "newPluginTagRelationsToCreate", newPluginTagRelationsToCreate, "err", err) + return err + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) createPluginStepDataAndTagRelations(pluginVersionId int, pluginVersionDetail *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + if len(pluginVersionDetail.PluginSteps) > 0 { + err := impl.saveDeepPluginStepData(pluginVersionId, pluginVersionDetail.PluginSteps, userId, tx) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in saving plugin step data", "err", err) + return err + } + } else { + return util.GetApiError(http.StatusBadRequest, bean2.PluginStepsNotProvidedError, bean2.PluginStepsNotProvidedError) + } + + err := impl.createPluginTagAndRelations(pluginVersionDetail, userId, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in createPluginTagAndRelations", "tags", pluginVersionDetail.Tags, "err", err) + return err + } + return nil +} + +func (impl *GlobalPluginServiceImpl) createNewPlugin(tx *pg.Tx, pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { + pluginParentMetadata, err := impl.globalPluginRepository.SavePluginParentMetadata(tx, adaptor.GetPluginParentMetadataDbObject(pluginDto, userId)) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in saving plugin parent metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + pluginDto.Id = pluginParentMetadata.Id + pluginVersionDto := adaptor.GetPluginVersionMetadataDbObject(pluginDto, userId). + WithPluginParentMetadataId(pluginParentMetadata.Id). + WithIsLatestFlag(true) + + pluginVersionMetadata, err := impl.globalPluginRepository.SavePluginMetadata(pluginVersionDto, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in saving plugin version metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + pluginDto.Versions.DetailedPluginVersionData[0].Id = pluginVersionMetadata.Id + + pluginStageMapping := &repository.PluginStageMapping{ + PluginId: pluginParentMetadata.Id, + StageType: repository.CI_CD, + AuditLog: sql.NewDefaultAuditLog(userId), + } + _, err = impl.globalPluginRepository.SavePluginStageMapping(pluginStageMapping, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in saving plugin stage mapping", "pluginDto", pluginDto, "err", err) + return 0, err + } + + err = impl.createPluginStepDataAndTagRelations(pluginVersionMetadata.Id, pluginDto.Versions.DetailedPluginVersionData[0], userId, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in createPluginStepDataAndTagRelations", "pluginDto", pluginDto, "err", err) + return 0, err + } + return pluginVersionMetadata.Id, nil +} + +func (impl *GlobalPluginServiceImpl) createNewPluginVersionOfExistingPlugin(tx *pg.Tx, pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { + var pluginParentMinData *repository.PluginParentMetadata + var err error + pluginParentMinData, err = impl.globalPluginRepository.GetPluginParentMinDataById(pluginDto.Id) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in getting plugin parent metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + // before saving new plugin version marking previous version's isLatest as false. + err = impl.globalPluginRepository.MarkPreviousPluginVersionLatestFalse(pluginParentMinData.Id) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in MarkPreviousPluginVersionLatestFalse", "pluginParentId", pluginDto.Id, "err", err) + return 0, err + } + pluginDto.Name = pluginParentMinData.Name + pluginVersionDto := adaptor.GetPluginVersionMetadataDbObject(pluginDto, userId). + WithPluginParentMetadataId(pluginParentMinData.Id). + WithIsLatestFlag(true) + + pluginVersionMetadata, err := impl.globalPluginRepository.SavePluginMetadata(pluginVersionDto, tx) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in saving plugin version metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + pluginDto.Versions.DetailedPluginVersionData[0].Id = pluginVersionMetadata.Id + + err = impl.createPluginStepDataAndTagRelations(pluginVersionMetadata.Id, pluginDto.Versions.DetailedPluginVersionData[0], userId, tx) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in createPluginStepDataAndTagRelations", "pluginDto", pluginDto, "err", err) + return 0, err + } + return pluginVersionMetadata.Id, nil +} + +func (impl *GlobalPluginServiceImpl) CreatePluginOrVersions(pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { + err := impl.validateV2PluginRequest(pluginDto) + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in validating create plugin request", "pluginReqDto", pluginDto, "err", err) + return 0, err + } + + dbConnection := impl.globalPluginRepository.GetConnection() + tx, err := dbConnection.Begin() + if err != nil { + return 0, err + } + // Rollback tx on error. + defer tx.Rollback() + var versionMetadataId int + if pluginDto.Id > 0 { + // create new version of existing plugin req. + versionMetadataId, err = impl.createNewPluginVersionOfExistingPlugin(tx, pluginDto, userId) + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in creating new version of an existing plugin", "existingPluginName", pluginDto.Name, "err", err) + return 0, err + } + } else { + // create new plugin req. + versionMetadataId, err = impl.createNewPlugin(tx, pluginDto, userId) + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in creating new plugin", "pluginDto", pluginDto, "err", err) + return 0, err + } + } + err = tx.Commit() + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in committing db transaction", "err", err) + return 0, err + } + return versionMetadataId, nil +} + +func (impl *GlobalPluginServiceImpl) CreateNewPluginTagsAndRelationsIfRequiredV2(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + allPluginTags, err := impl.globalPluginRepository.GetAllPluginTags() + if err != nil { + impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in getting all plugin tags", "err", err) + return err + } + existingTagMap := make(map[string]*repository.PluginTag, len(allPluginTags)) + for _, tag := range allPluginTags { + existingTagMap[tag.Name] = tag + } + //check for new tags, then create new plugin_tag and plugin_tag_relation entry in db when new tags are present in request + newPluginTagsToCreate := make([]*repository.PluginTag, 0, len(pluginReq.Tags)) + newPluginTagRelationsToCreate := make([]*repository.PluginTagRelation, 0, len(pluginReq.Tags)) + + for _, tagReq := range pluginReq.Tags { + if _, ok := existingTagMap[tagReq]; !ok { + newPluginTagsToCreate = append(newPluginTagsToCreate, repository.NewPluginTag().CreateAuditLog(userId).WithName(tagReq)) + } + } + + if len(newPluginTagsToCreate) > 0 { + err = impl.globalPluginRepository.SavePluginTagInBulk(newPluginTagsToCreate, tx) + if err != nil { + impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in saving plugin tag", "newPluginTags", newPluginTagsToCreate, "err", err) + return err + } + for _, newTag := range newPluginTagsToCreate { + existingTagMap[newTag.Name] = newTag + } + } + + for _, tag := range pluginReq.Tags { + newPluginTagRelationsToCreate = append(newPluginTagRelationsToCreate, repository.NewPluginTagRelation().CreateAuditLog(userId).WithTagAndPluginId(existingTagMap[tag].Id, pluginReq.Id)) + } + + if len(newPluginTagRelationsToCreate) > 0 { + err = impl.globalPluginRepository.SavePluginTagRelationInBulk(newPluginTagRelationsToCreate, tx) + if err != nil { + impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in saving plugin tag relation in bulk", "newPluginTagRelationsToCreate", newPluginTagRelationsToCreate, "err", err) + return err + } + } + return nil +} diff --git a/pkg/plugin/adaptor/adaptor.go b/pkg/plugin/adaptor/adaptor.go new file mode 100644 index 00000000000..e5e0f50e9d3 --- /dev/null +++ b/pkg/plugin/adaptor/adaptor.go @@ -0,0 +1,80 @@ +package adaptor + +import ( + bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" + "github.com/devtron-labs/devtron/pkg/plugin/repository" + "github.com/devtron-labs/devtron/pkg/sql" +) + +func GetPluginParentMetadataDbObject(pluginDto *bean2.PluginParentMetadataDto, userId int32) *repository.PluginParentMetadata { + return repository.NewPluginParentMetadata().CreateAuditLog(userId). + WithBasicMetadata(pluginDto.Name, pluginDto.PluginIdentifier, pluginDto.Description, pluginDto.Icon, repository.PLUGIN_TYPE_SHARED) +} + +func GetPluginVersionMetadataDbObject(pluginDto *bean2.PluginParentMetadataDto, userId int32) *repository.PluginMetadata { + versionDto := pluginDto.Versions.DetailedPluginVersionData[0] + return repository.NewPluginVersionMetadata().CreateAuditLog(userId).WithBasicMetadata(pluginDto.Name, versionDto.Description, versionDto.Version, versionDto.DocLink) +} + +func GetPluginStepDbObject(pluginStepDto *bean2.PluginStepsDto, pluginVersionMetadataId int, userId int32) *repository.PluginStep { + return &repository.PluginStep{ + PluginId: pluginVersionMetadataId, + Name: pluginStepDto.Name, + Description: pluginStepDto.Description, + Index: 1, + StepType: repository.PLUGIN_STEP_TYPE_INLINE, + RefPluginId: pluginStepDto.RefPluginId, + OutputDirectoryPath: pluginStepDto.OutputDirectoryPath, + DependentOnStep: pluginStepDto.DependentOnStep, + AuditLog: sql.NewDefaultAuditLog(userId), + } +} +func GetPluginPipelineScriptDbObject(pluginPipelineScript *bean2.PluginPipelineScript, userId int32) *repository.PluginPipelineScript { + return &repository.PluginPipelineScript{ + Script: pluginPipelineScript.Script, + StoreScriptAt: pluginPipelineScript.StoreScriptAt, + Type: pluginPipelineScript.Type, + DockerfileExists: pluginPipelineScript.DockerfileExists, + MountPath: pluginPipelineScript.MountPath, + MountCodeToContainer: pluginPipelineScript.MountCodeToContainer, + MountCodeToContainerPath: pluginPipelineScript.MountCodeToContainerPath, + MountDirectoryFromHost: pluginPipelineScript.MountDirectoryFromHost, + ContainerImagePath: pluginPipelineScript.ContainerImagePath, + ImagePullSecretType: pluginPipelineScript.ImagePullSecretType, + ImagePullSecret: pluginPipelineScript.ImagePullSecret, + AuditLog: sql.NewDefaultAuditLog(userId), + } + +} + +func GetPluginStepVariableDbObject(pluginStepId int, pluginVariableDto *bean2.PluginVariableDto, userId int32) *repository.PluginStepVariable { + return &repository.PluginStepVariable{ + PluginStepId: pluginStepId, + Name: pluginVariableDto.Name, + Format: pluginVariableDto.Format, + Description: pluginVariableDto.Description, + IsExposed: true, //currently hard coding this, later after plugin creation gets more mature will let user decide + AllowEmptyValue: pluginVariableDto.AllowEmptyValue, + DefaultValue: pluginVariableDto.DefaultValue, + Value: pluginVariableDto.Value, + VariableType: pluginVariableDto.VariableType, + ValueType: pluginVariableDto.ValueType, + PreviousStepIndex: pluginVariableDto.PreviousStepIndex, + VariableStepIndex: 1, //currently hard coding this, later after plugin creation gets more mature will let user decide + VariableStepIndexInPlugin: pluginVariableDto.VariableStepIndexInPlugin, + ReferenceVariableName: pluginVariableDto.ReferenceVariableName, + AuditLog: sql.NewDefaultAuditLog(userId), + } +} + +func GetPluginStepConditionDbObject(stepDataId, pluginStepVariableId int, pluginStepCondition *bean2.PluginStepCondition, + userId int32) *repository.PluginStepCondition { + return &repository.PluginStepCondition{ + PluginStepId: stepDataId, + ConditionVariableId: pluginStepVariableId, + ConditionType: pluginStepCondition.ConditionType, + ConditionalOperator: pluginStepCondition.ConditionalOperator, + ConditionalValue: pluginStepCondition.ConditionalValue, + AuditLog: sql.NewDefaultAuditLog(userId), + } +} diff --git a/pkg/plugin/bean/bean.go b/pkg/plugin/bean/bean.go index 55424f3caac..c31d3463327 100644 --- a/pkg/plugin/bean/bean.go +++ b/pkg/plugin/bean/bean.go @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package bean import ( @@ -44,15 +43,47 @@ type PluginListComponentDto struct { //created new struct for backward compatibi } type PluginMetadataDto struct { - Id int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` // SHARED, PRESET etc - Icon string `json:"icon,omitempty"` - Tags []string `json:"tags"` - Action int `json:"action,omitempty"` - PluginStage string `json:"pluginStage,omitempty"` - PluginSteps []*PluginStepsDto `json:"pluginSteps,omitempty"` + Id int `json:"id"` + Name string `json:"name" validate:"required,min=3,max=100,global-entity-name"` + Description string `json:"description" validate:"max=300"` + Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` // SHARED, PRESET etc + Icon string `json:"icon,omitempty"` + Tags []string `json:"tags"` + Action int `json:"action,omitempty"` + PluginStage string `json:"pluginStage,omitempty"` + PluginSteps []*PluginStepsDto `json:"pluginSteps,omitempty"` + AreNewTagsPresent bool `json:"areNewTagsPresent,omitempty"` +} + +type PluginMinDto struct { + ParentPluginId int `json:"id,omitempty"` + PluginName string `json:"name,omitempty"` + Icon string `json:"icon,omitempty"` + PluginVersionId int `json:"pluginVersionId,omitempty"` +} + +func NewPluginMinDto() *PluginMinDto { + return &PluginMinDto{} +} + +func (r *PluginMinDto) WithParentPluginId(id int) *PluginMinDto { + r.ParentPluginId = id + return r +} + +func (r *PluginMinDto) WithPluginName(name string) *PluginMinDto { + r.PluginName = name + return r +} + +func (r *PluginMinDto) WithIcon(icon string) *PluginMinDto { + r.Icon = icon + return r +} + +func (r *PluginMinDto) WithPluginVersionId(versionId int) *PluginMinDto { + r.PluginVersionId = versionId + return r } type PluginsDto struct { @@ -76,9 +107,9 @@ func (r *PluginsDto) WithTotalCount(count int) *PluginsDto { type PluginParentMetadataDto struct { Id int `json:"id"` - Name string `json:"name"` - PluginIdentifier string `json:"pluginIdentifier"` - Description string `json:"description"` + Name string `json:"name" validate:"required,min=3,max=100,global-entity-name"` + PluginIdentifier string `json:"pluginIdentifier" validate:"required,min=3,max=100,global-entity-name"` + Description string `json:"description" validate:"max=300"` Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` Icon string `json:"icon,omitempty"` Versions *PluginVersions `json:"pluginVersions"` @@ -124,17 +155,6 @@ type PluginVersions struct { MinimalPluginVersionData []*PluginsVersionDetail `json:"minimalPluginVersionData"` // contains only few metadata } -type PluginMinDto struct { - PluginName string `json:"pluginName"` - PluginVersions []*PluginVersionsMinDto `json:"pluginVersions"` - Icon string `json:"icon"` -} - -type PluginVersionsMinDto struct { - Id int `json:"id"` - Version string `json:"version"` -} - func NewPluginVersions() *PluginVersions { return &PluginVersions{} } @@ -154,7 +174,7 @@ type PluginsVersionDetail struct { InputVariables []*PluginVariableDto `json:"inputVariables"` OutputVariables []*PluginVariableDto `json:"outputVariables"` DocLink string `json:"docLink"` - Version string `json:"pluginVersion"` + Version string `json:"pluginVersion" validate:"max=50,min=3"` IsLatest bool `json:"isLatest"` UpdatedBy string `json:"updatedBy"` CreatedOn time.Time `json:"-"` @@ -336,10 +356,18 @@ type RegistryCredentials struct { } const ( - NoPluginOrParentIdProvidedErr = "no value for pluginVersionIds and parentPluginIds provided in query param" - NoPluginFoundForThisSearchQueryErr = "unable to find desired plugin for the query filter" + NoPluginOrParentIdProvidedErr = "no value for pluginVersionIds and parentPluginIds provided in query param" + NoPluginFoundForThisSearchQueryErr = "unable to find desired plugin for the query filter" + PluginStepsNotProvidedError = "plugin steps not provided" + PluginWithSameNameExistError = "plugin with the same name exists, please choose another name" + PluginWithSameIdentifierExistsError = "plugin with the same identifier exists, please choose another identifier name" + PluginVersionNotSemanticallyCorrectError = "please provide a plugin version that adheres to Semantic Versioning 2.0.0 to ensure compatibility and proper versioning" + PluginIconNotCorrectOrReachableError = "cannot validate icon, make sure that provided url link is reachable" + PluginVersionAlreadyExistError = "this plugin version already exists, please provide another plugin version" + NoStepDataToProceedError = "no step data provided to save, please provide a plugin step to proceed further" ) const ( - SpecialCharsRegex = ` !"#$%&'()*+,./:;<=>?@[\]^_{|}~` + "`" + SpecialCharsRegex = ` !"#$%&'()*+,./:;<=>?@[\]^_{|}~` + "`" + PluginIconMaxSizeInBytes = 2 * 1024 * 1024 ) diff --git a/pkg/plugin/repository/GlobalPluginRepository.go b/pkg/plugin/repository/GlobalPluginRepository.go index 8b650935231..9cc50748f29 100644 --- a/pkg/plugin/repository/GlobalPluginRepository.go +++ b/pkg/plugin/repository/GlobalPluginRepository.go @@ -99,6 +99,16 @@ func (r *PluginParentMetadata) CreateAuditLog(userId int32) *PluginParentMetadat return r } +func (r *PluginParentMetadata) WithBasicMetadata(name, identifier, description, icon string, pluginType PluginType) *PluginParentMetadata { + r.Name = name + r.Identifier = identifier + r.Description = description + r.Icon = icon + r.Type = pluginType + r.Deleted = false + return r +} + // SetParentPluginMetadata method signature used only for migration purposes, sets pluginVersionsMetadata into plugin_parent_metadata func (r *PluginParentMetadata) SetParentPluginMetadata(pluginMetadata *PluginMetadata) *PluginParentMetadata { r.Name = pluginMetadata.Name @@ -135,6 +145,38 @@ type PluginMetadata struct { sql.AuditLog } +func NewPluginVersionMetadata() *PluginMetadata { + return &PluginMetadata{} +} + +func (r *PluginMetadata) CreateAuditLog(userId int32) *PluginMetadata { + r.CreatedBy = userId + r.CreatedOn = time.Now() + r.UpdatedBy = userId + r.UpdatedOn = time.Now() + return r +} + +func (r *PluginMetadata) WithBasicMetadata(name, description, pluginVersion, docLink string) *PluginMetadata { + r.Name = name + r.PluginVersion = pluginVersion + r.Description = description + r.DocLink = docLink + r.Deleted = false + r.IsDeprecated = false + return r +} + +func (r *PluginMetadata) WithPluginParentMetadataId(parentId int) *PluginMetadata { + r.PluginParentMetadataId = parentId + return r +} + +func (r *PluginMetadata) WithIsLatestFlag(isLatest bool) *PluginMetadata { + r.IsLatest = isLatest + return r +} + type PluginTag struct { tableName struct{} `sql:"plugin_tag" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -143,6 +185,23 @@ type PluginTag struct { sql.AuditLog } +func NewPluginTag() *PluginTag { + return &PluginTag{} +} + +func (r *PluginTag) WithName(name string) *PluginTag { + r.Name = name + return r +} + +func (r *PluginTag) CreateAuditLog(userId int32) *PluginTag { + r.CreatedBy = userId + r.CreatedOn = time.Now() + r.UpdatedBy = userId + r.UpdatedOn = time.Now() + return r +} + type PluginTagRelation struct { tableName struct{} `sql:"plugin_tag_relation" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -151,6 +210,24 @@ type PluginTagRelation struct { sql.AuditLog } +func NewPluginTagRelation() *PluginTagRelation { + return &PluginTagRelation{} +} + +func (r *PluginTagRelation) WithTagAndPluginId(tagId, pluginId int) *PluginTagRelation { + r.TagId = tagId + r.PluginId = pluginId + return r +} + +func (r *PluginTagRelation) CreateAuditLog(userId int32) *PluginTagRelation { + r.CreatedBy = userId + r.CreatedOn = time.Now() + r.UpdatedBy = userId + r.UpdatedOn = time.Now() + return r +} + // Below two tables are used at pipeline-steps level too type PluginPipelineScript struct { @@ -247,7 +324,9 @@ type GlobalPluginRepository interface { GetMetaDataForAllPlugins() ([]*PluginMetadata, error) GetMetaDataForPluginWithStageType(stageType int) ([]*PluginMetadata, error) GetMetaDataByPluginId(pluginId int) (*PluginMetadata, error) + GetMetaDataByPluginIds(pluginIds []int) ([]*PluginMetadata, error) GetAllPluginTags() ([]*PluginTag, error) + GetPluginTagByNames(tagNames []string) ([]*PluginTag, error) GetAllPluginTagRelations() ([]*PluginTagRelation, error) GetTagsByPluginId(pluginId int) ([]string, error) GetScriptDetailById(id int) (*PluginPipelineScript, error) @@ -264,10 +343,14 @@ type GlobalPluginRepository interface { GetConditionsByPluginId(pluginId int) ([]*PluginStepCondition, error) GetPluginStageMappingByPluginId(pluginId int) (*PluginStageMapping, error) GetConnection() (dbConnection *pg.DB) + GetPluginVersionsByParentId(parentPluginId int) ([]*PluginMetadata, error) GetPluginParentMetadataByIdentifier(pluginIdentifier string) (*PluginParentMetadata, error) GetAllFilteredPluginParentMetadata(searchKey string, tags []string) ([]*PluginParentMetadata, error) GetPluginParentMetadataByIds(ids []int) ([]*PluginParentMetadata, error) + GetAllPluginMinData() ([]*PluginParentMetadata, error) + GetPluginParentMinDataById(id int) (*PluginParentMetadata, error) + MarkPreviousPluginVersionLatestFalse(pluginParentId int) error SavePluginMetadata(pluginMetadata *PluginMetadata, tx *pg.Tx) (*PluginMetadata, error) SavePluginStageMapping(pluginStageMapping *PluginStageMapping, tx *pg.Tx) (*PluginStageMapping, error) @@ -351,6 +434,19 @@ func (impl *GlobalPluginRepositoryImpl) GetAllPluginTags() ([]*PluginTag, error) return tags, nil } +func (impl *GlobalPluginRepositoryImpl) GetPluginTagByNames(tagNames []string) ([]*PluginTag, error) { + var tags []*PluginTag + err := impl.dbConnection.Model(&tags). + Where("deleted = ?", false). + Where("name in (?)", pg.In(tagNames)). + Select() + if err != nil { + impl.logger.Errorw("err in getting all tags by names", "tagNames", tagNames, "err", err) + return nil, err + } + return tags, nil +} + func (impl *GlobalPluginRepositoryImpl) GetAllPluginTagRelations() ([]*PluginTagRelation, error) { var rel []*PluginTagRelation err := impl.dbConnection.Model(&rel). @@ -385,6 +481,18 @@ func (impl *GlobalPluginRepositoryImpl) GetMetaDataByPluginId(pluginId int) (*Pl return &plugin, nil } +func (impl *GlobalPluginRepositoryImpl) GetMetaDataByPluginIds(pluginIds []int) ([]*PluginMetadata, error) { + var plugins []*PluginMetadata + err := impl.dbConnection.Model(&plugins). + Where("deleted = ?", false). + Where("id in (?)", pg.In(pluginIds)).Select() + if err != nil { + impl.logger.Errorw("err in getting plugins by pluginIds", "pluginIds", pluginIds, "err", err) + return nil, err + } + return plugins, nil +} + func (impl *GlobalPluginRepositoryImpl) GetStepsByPluginIds(pluginIds []int) ([]*PluginStep, error) { var pluginSteps []*PluginStep err := impl.dbConnection.Model(&pluginSteps). @@ -511,6 +619,20 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginByName(pluginName string) ([]*P } +func (impl *GlobalPluginRepositoryImpl) GetPluginVersionsByParentId(parentPluginId int) ([]*PluginMetadata, error) { + var plugin []*PluginMetadata + err := impl.dbConnection.Model(&plugin). + Where("plugin_parent_metadata_id = ?", parentPluginId). + Where("deleted = ?", false). + Where("is_deprecated = ?", false). + Select() + if err != nil { + impl.logger.Errorw("err in getting pluginVersionMetadata by parentPluginId", "parentPluginId", parentPluginId, "err", err) + return nil, err + } + return plugin, nil +} + func (impl *GlobalPluginRepositoryImpl) GetAllPluginMetaData() ([]*PluginMetadata, error) { var plugins []*PluginMetadata err := impl.dbConnection.Model(&plugins).Where("deleted = ?", false).Select() @@ -700,6 +822,18 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginParentMetadataByIdentifier(plug return &pluginParentMetadata, nil } +func (impl *GlobalPluginRepositoryImpl) GetPluginParentMinDataById(id int) (*PluginParentMetadata, error) { + var pluginParentMetadata PluginParentMetadata + err := impl.dbConnection.Model(&pluginParentMetadata). + Column("plugin_parent_metadata.id", "plugin_parent_metadata.name"). + Where("id = ?", id). + Where("deleted = ?", false).Select() + if err != nil { + return nil, err + } + return &pluginParentMetadata, nil +} + func (impl *GlobalPluginRepositoryImpl) SavePluginParentMetadata(tx *pg.Tx, pluginParentMetadata *PluginParentMetadata) (*PluginParentMetadata, error) { err := tx.Insert(pluginParentMetadata) return pluginParentMetadata, err @@ -712,24 +846,20 @@ func (impl *GlobalPluginRepositoryImpl) UpdatePluginMetadataInBulk(pluginsMetada func (impl *GlobalPluginRepositoryImpl) GetAllFilteredPluginParentMetadata(searchKey string, tags []string) ([]*PluginParentMetadata, error) { var plugins []*PluginParentMetadata - subQuery := "select ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + + query := "select ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + " inner join plugin_metadata pm on pm.plugin_parent_metadata_id=ppm.id" - whereCondition := fmt.Sprintf(" where ppm.deleted=false") - orderCondition := fmt.Sprintf(" ORDER BY ppm.id asc") + whereCondition := fmt.Sprintf(" where ppm.deleted=false AND pm.deleted=false AND pm.is_latest=true") if len(tags) > 0 { - subQuery = "select DISTINCT ON(ppm.id) ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + - " inner join plugin_metadata pm on pm.plugin_parent_metadata_id=ppm.id" + - " left join plugin_tag_relation ptr on ptr.plugin_id=pm.id" + - " left join plugin_tag pt on ptr.tag_id=pt.id" - whereCondition += fmt.Sprintf(" AND pm.deleted=false AND pt.deleted=false AND pt.name in (%s)", helper.GetCommaSepratedStringWithComma(tags)) + tagFilterSubQuery := fmt.Sprintf("select ptr.plugin_id from plugin_tag_relation ptr inner join plugin_tag pt on ptr.tag_id =pt.id where pt.deleted =false and pt.name in (%s) group by ptr.plugin_id having count(ptr.plugin_id )=%d", helper.GetCommaSepratedStringWithComma(tags), len(tags)) + whereCondition += fmt.Sprintf(" AND pm.id in (%s)", tagFilterSubQuery) } if len(searchKey) > 0 { searchKeyLike := "%" + searchKey + "%" whereCondition += fmt.Sprintf(" AND (pm.description ilike '%s' or pm.name ilike '%s')", searchKeyLike, searchKeyLike) } - whereCondition += fmt.Sprintf(" AND pm.is_latest=true") - subQuery += whereCondition + orderCondition - query := fmt.Sprintf(" select * from (%s) x ORDER BY name asc;", subQuery) + orderCondition := " ORDER BY ppm.name asc;" + + query += whereCondition + orderCondition _, err := impl.dbConnection.Query(&plugins, query) if err != nil { return nil, err @@ -749,3 +879,29 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginParentMetadataByIds(ids []int) } return plugins, nil } + +func (impl *GlobalPluginRepositoryImpl) GetAllPluginMinData() ([]*PluginParentMetadata, error) { + var plugins []*PluginParentMetadata + err := impl.dbConnection.Model(&plugins). + Column("plugin_parent_metadata.id", "plugin_parent_metadata.name", "plugin_parent_metadata.type", "plugin_parent_metadata.icon", "plugin_parent_metadata.identifier"). + Where("deleted = ?", false). + Select() + if err != nil { + impl.logger.Errorw("err in getting all plugin parent metadata min data", "err", err) + return nil, err + } + return plugins, nil +} + +func (impl *GlobalPluginRepositoryImpl) MarkPreviousPluginVersionLatestFalse(pluginParentId int) error { + var model PluginMetadata + _, err := impl.dbConnection.Model(&model). + Set("is_latest = ?", false). + Where("id = (select id from plugin_metadata where plugin_parent_metadata_id = ? and is_latest =true order by created_on desc limit ?)", pluginParentId, 1). + Update() + if err != nil { + impl.logger.Errorw("error in updating last version isLatest as false for a plugin parent id", "pluginParentId", pluginParentId, "err", err) + return err + } + return nil +} diff --git a/pkg/plugin/utils/utils.go b/pkg/plugin/utils/utils.go index 6d78a291439..168e694d89b 100644 --- a/pkg/plugin/utils/utils.go +++ b/pkg/plugin/utils/utils.go @@ -21,9 +21,11 @@ import ( "fmt" bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" "github.com/devtron-labs/devtron/pkg/plugin/repository" + "net/http" "regexp" "sort" "strings" + "time" ) func GetStageType(stageTypeReq string) (int, error) { @@ -72,3 +74,26 @@ func SortPluginsVersionDetailSliceByCreatedOn(pluginsVersionDetail []*bean2.Plug return false }) } + +func FetchIconAndCheckSize(url string, maxSize int64) error { + client := http.Client{ + Timeout: 5 * time.Second, + } + iconResp, err := client.Get(url) + if err != nil { + return fmt.Errorf("error in fetching icon : %s", err.Error()) + } + if iconResp != nil { + if iconResp.StatusCode >= 200 && iconResp.StatusCode < 300 { + if iconResp.ContentLength > maxSize { + return fmt.Errorf("icon size too large") + } + iconResp.Body.Close() + } else { + return fmt.Errorf("error in fetching icon : %s", iconResp.Status) + } + } else { + return fmt.Errorf("error in fetching icon : empty response") + } + return nil +} From 47843d92d60a7ecab37eed685bf4d3b2d6c87e6d Mon Sep 17 00:00:00 2001 From: Prakash Date: Mon, 2 Sep 2024 15:32:11 +0530 Subject: [PATCH 29/61] Revert "feat: plugin creation support (#5630)" (#5778) This reverts commit 4296366ae288f3a67f87e547d2b946acbcd2dd65. --- api/restHandler/GlobalPluginRestHandler.go | 67 --- api/router/GlobalPluginRouter.go | 5 +- internal/util/ErrorUtil.go | 9 - pkg/plugin/GlobalPluginService.go | 412 +++--------------- pkg/plugin/adaptor/adaptor.go | 80 ---- pkg/plugin/bean/bean.go | 84 ++-- .../repository/GlobalPluginRepository.go | 178 +------- pkg/plugin/utils/utils.go | 25 -- 8 files changed, 103 insertions(+), 757 deletions(-) delete mode 100644 pkg/plugin/adaptor/adaptor.go diff --git a/api/restHandler/GlobalPluginRestHandler.go b/api/restHandler/GlobalPluginRestHandler.go index 44a2305c0ab..7fc25ec6b16 100644 --- a/api/restHandler/GlobalPluginRestHandler.go +++ b/api/restHandler/GlobalPluginRestHandler.go @@ -35,7 +35,6 @@ import ( type GlobalPluginRestHandler interface { PatchPlugin(w http.ResponseWriter, r *http.Request) - CreatePlugin(w http.ResponseWriter, r *http.Request) GetAllGlobalVariables(w http.ResponseWriter, r *http.Request) ListAllPlugins(w http.ResponseWriter, r *http.Request) @@ -47,7 +46,6 @@ type GlobalPluginRestHandler interface { GetPluginDetailByIds(w http.ResponseWriter, r *http.Request) GetAllUniqueTags(w http.ResponseWriter, r *http.Request) MigratePluginData(w http.ResponseWriter, r *http.Request) - GetAllPluginMinData(w http.ResponseWriter, r *http.Request) } func NewGlobalPluginRestHandler(logger *zap.SugaredLogger, globalPluginService plugin.GlobalPluginService, @@ -422,68 +420,3 @@ func (handler *GlobalPluginRestHandlerImpl) MigratePluginData(w http.ResponseWri } common.WriteJsonResp(w, nil, nil, http.StatusOK) } - -func (handler *GlobalPluginRestHandlerImpl) CreatePlugin(w http.ResponseWriter, r *http.Request) { - userId, err := handler.userService.GetLoggedInUser(r) - if userId == 0 || err != nil { - common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) - return - } - token := r.Header.Get("token") - appId, err := common.ExtractIntQueryParam(w, r, "appId", 0) - if err != nil { - return - } - ok, err := handler.IsUserAuthorized(token, appId) - if err != nil { - common.WriteJsonResp(w, err, nil, http.StatusBadRequest) - return - } - if !ok { - common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) - return - } - decoder := json.NewDecoder(r.Body) - var pluginDataDto bean.PluginParentMetadataDto - err = decoder.Decode(&pluginDataDto) - if err != nil { - handler.logger.Errorw("request err, CreatePlugin", "error", err, "payload", pluginDataDto) - common.WriteJsonResp(w, err, nil, http.StatusBadRequest) - return - } - handler.logger.Infow("request payload received for creating plugins", pluginDataDto, "userId", userId) - - pluginVersionId, err := handler.globalPluginService.CreatePluginOrVersions(&pluginDataDto, userId) - if err != nil { - handler.logger.Errorw("service error, error in creating plugin", "pluginCreateRequestDto", pluginDataDto, "err", err) - common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) - return - } - - common.WriteJsonResp(w, nil, bean.NewPluginMinDto().WithPluginVersionId(pluginVersionId), http.StatusOK) -} - -func (handler *GlobalPluginRestHandlerImpl) GetAllPluginMinData(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("token") - appId, err := common.ExtractIntQueryParam(w, r, "appId", 0) - if err != nil { - return - } - ok, err := handler.IsUserAuthorized(token, appId) - if err != nil { - common.WriteJsonResp(w, err, nil, http.StatusBadRequest) - return - } - if !ok { - common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) - return - } - - pluginDetail, err := handler.globalPluginService.GetAllPluginMinData() - if err != nil { - handler.logger.Errorw("error in getting all unique tags", "err", err) - common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) - return - } - common.WriteJsonResp(w, nil, pluginDetail, http.StatusOK) -} diff --git a/api/router/GlobalPluginRouter.go b/api/router/GlobalPluginRouter.go index 0d8154bf822..06950c33421 100644 --- a/api/router/GlobalPluginRouter.go +++ b/api/router/GlobalPluginRouter.go @@ -41,8 +41,7 @@ type GlobalPluginRouterImpl struct { func (impl *GlobalPluginRouterImpl) initGlobalPluginRouter(globalPluginRouter *mux.Router) { globalPluginRouter.Path("/migrate"). HandlerFunc(impl.globalPluginRestHandler.MigratePluginData).Methods("PUT") - globalPluginRouter.Path("/create"). - HandlerFunc(impl.globalPluginRestHandler.CreatePlugin).Methods("POST") + // versioning impact handling to be done for below apis, globalPluginRouter.Path(""). HandlerFunc(impl.globalPluginRestHandler.PatchPlugin).Methods("POST") @@ -69,7 +68,5 @@ func (impl *GlobalPluginRouterImpl) initGlobalPluginRouter(globalPluginRouter *m globalPluginRouter.Path("/list/tags"). HandlerFunc(impl.globalPluginRestHandler.GetAllUniqueTags).Methods("GET") - globalPluginRouter.Path("/list/v2/min"). - HandlerFunc(impl.globalPluginRestHandler.GetAllPluginMinData).Methods("GET") } diff --git a/internal/util/ErrorUtil.go b/internal/util/ErrorUtil.go index 43f3c9b942e..59a8d234415 100644 --- a/internal/util/ErrorUtil.go +++ b/internal/util/ErrorUtil.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "net/http" - "strconv" ) type ApiError struct { @@ -37,14 +36,6 @@ type ApiError struct { UserDetailMessage string `json:"userDetailMessage,omitempty"` } -func GetApiError(code int, userMessage, internalMessage string) *ApiError { - return &ApiError{ - HttpStatusCode: code, - Code: strconv.Itoa(code), - InternalMessage: internalMessage, - UserMessage: userMessage, - } -} func NewApiError() *ApiError { return &ApiError{} } diff --git a/pkg/plugin/GlobalPluginService.go b/pkg/plugin/GlobalPluginService.go index 5423ab22b2d..55ff942789b 100644 --- a/pkg/plugin/GlobalPluginService.go +++ b/pkg/plugin/GlobalPluginService.go @@ -24,7 +24,6 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/auth/user/bean" repository2 "github.com/devtron-labs/devtron/pkg/pipeline/repository" - "github.com/devtron-labs/devtron/pkg/plugin/adaptor" bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" helper2 "github.com/devtron-labs/devtron/pkg/plugin/helper" "github.com/devtron-labs/devtron/pkg/plugin/repository" @@ -32,8 +31,8 @@ import ( "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "go.uber.org/zap" - "golang.org/x/mod/semver" "net/http" + "strconv" "strings" "time" ) @@ -74,11 +73,9 @@ type GlobalPluginService interface { GetDetailedPluginInfoByPluginId(pluginId int) (*bean2.PluginMetadataDto, error) GetAllDetailedPluginInfo() ([]*bean2.PluginMetadataDto, error) - CreatePluginOrVersions(pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) ListAllPluginsV2(filter *bean2.PluginsListFilter) (*bean2.PluginsDto, error) GetPluginDetailV2(pluginVersionIds, parentPluginIds []int, fetchAllVersionDetails bool) (*bean2.PluginsDto, error) GetAllUniqueTags() (*bean2.PluginTagsDto, error) - GetAllPluginMinData() ([]*bean2.PluginMinDto, error) MigratePluginData() error } @@ -432,7 +429,7 @@ func (impl *GlobalPluginServiceImpl) validatePluginRequest(pluginReq *bean2.Plug return errors.New("invalid plugin type, should be of the type PRESET or SHARED") } - plugins, err := impl.globalPluginRepository.GetAllPluginMinData() + plugins, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() if err != nil { impl.logger.Errorw("error in getting all plugins", "err", err) return err @@ -673,10 +670,33 @@ func (impl *GlobalPluginServiceImpl) UpdatePluginPipelineScript(dbPluginPipeline func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int, pluginStepsReq []*bean2.PluginStepsDto, userId int32, tx *pg.Tx) error { for _, pluginStep := range pluginStepsReq { - pluginStepData := adaptor.GetPluginStepDbObject(pluginStep, pluginMetadataId, userId) + pluginStepData := &repository.PluginStep{ + PluginId: pluginMetadataId, + Name: pluginStep.Name, + Description: pluginStep.Description, + Index: pluginStep.Index, + StepType: pluginStep.StepType, + RefPluginId: pluginStep.RefPluginId, + OutputDirectoryPath: pluginStep.OutputDirectoryPath, + DependentOnStep: pluginStep.DependentOnStep, + AuditLog: sql.NewDefaultAuditLog(userId), + } //get the script saved for this plugin step if pluginStep.PluginPipelineScript != nil { - pluginPipelineScript := adaptor.GetPluginPipelineScriptDbObject(pluginStep.PluginPipelineScript, userId) + pluginPipelineScript := &repository.PluginPipelineScript{ + Script: pluginStep.PluginPipelineScript.Script, + StoreScriptAt: pluginStep.PluginPipelineScript.StoreScriptAt, + Type: pluginStep.PluginPipelineScript.Type, + DockerfileExists: pluginStep.PluginPipelineScript.DockerfileExists, + MountPath: pluginStep.PluginPipelineScript.MountPath, + MountCodeToContainer: pluginStep.PluginPipelineScript.MountCodeToContainer, + MountCodeToContainerPath: pluginStep.PluginPipelineScript.MountCodeToContainerPath, + MountDirectoryFromHost: pluginStep.PluginPipelineScript.MountDirectoryFromHost, + ContainerImagePath: pluginStep.PluginPipelineScript.ContainerImagePath, + ImagePullSecretType: pluginStep.PluginPipelineScript.ImagePullSecretType, + ImagePullSecret: pluginStep.PluginPipelineScript.ImagePullSecret, + AuditLog: sql.NewDefaultAuditLog(userId), + } pluginPipelineScript, err := impl.globalPluginRepository.SavePluginPipelineScript(pluginPipelineScript, tx) if err != nil { impl.logger.Errorw("error in saving plugin pipeline script", "pluginPipelineScript", pluginPipelineScript, "err", err) @@ -699,7 +719,23 @@ func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int pluginStep.Id = pluginStepData.Id //create entry in plugin_step_variable for _, pluginStepVariable := range pluginStep.PluginStepVariable { - pluginStepVariableData := adaptor.GetPluginStepVariableDbObject(pluginStepData.Id, pluginStepVariable, userId) + pluginStepVariableData := &repository.PluginStepVariable{ + PluginStepId: pluginStepData.Id, + Name: pluginStepVariable.Name, + Format: pluginStepVariable.Format, + Description: pluginStepVariable.Description, + IsExposed: pluginStepVariable.IsExposed, + AllowEmptyValue: pluginStepVariable.AllowEmptyValue, + DefaultValue: pluginStepVariable.DefaultValue, + Value: pluginStepVariable.Value, + VariableType: pluginStepVariable.VariableType, + ValueType: pluginStepVariable.ValueType, + PreviousStepIndex: pluginStepVariable.PreviousStepIndex, + VariableStepIndex: pluginStepVariable.VariableStepIndex, + VariableStepIndexInPlugin: pluginStepVariable.VariableStepIndexInPlugin, + ReferenceVariableName: pluginStepVariable.ReferenceVariableName, + AuditLog: sql.NewDefaultAuditLog(userId), + } pluginStepVariableData, err = impl.globalPluginRepository.SavePluginStepVariables(pluginStepVariableData, tx) if err != nil { impl.logger.Errorw("error in saving plugin step variable", "pluginStepVariableData", pluginStepVariableData, "err", err) @@ -708,7 +744,14 @@ func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int pluginStepVariable.Id = pluginStepVariableData.Id //create entry in plugin_step_condition for _, pluginStepCondition := range pluginStepVariable.PluginStepCondition { - pluginStepConditionData := adaptor.GetPluginStepConditionDbObject(pluginStepData.Id, pluginStepVariableData.Id, pluginStepCondition, userId) + pluginStepConditionData := &repository.PluginStepCondition{ + PluginStepId: pluginStepData.Id, + ConditionVariableId: pluginStepVariableData.Id, + ConditionType: pluginStepCondition.ConditionType, + ConditionalOperator: pluginStepCondition.ConditionalOperator, + ConditionalValue: pluginStepCondition.ConditionalValue, + AuditLog: sql.NewDefaultAuditLog(userId), + } pluginStepConditionData, err = impl.globalPluginRepository.SavePluginStepConditions(pluginStepConditionData, tx) if err != nil { impl.logger.Errorw("error in saving plugin step condition", "pluginStepConditionData", pluginStepConditionData, "err", err) @@ -725,6 +768,7 @@ func (impl *GlobalPluginServiceImpl) updatePlugin(pluginUpdateReq *bean2.PluginM if len(pluginUpdateReq.Type) == 0 { return nil, errors.New("invalid plugin type, should be of the type PRESET or SHARED") } + dbConnection := impl.globalPluginRepository.GetConnection() tx, err := dbConnection.Begin() if err != nil { @@ -812,7 +856,6 @@ func (impl *GlobalPluginServiceImpl) updatePlugin(pluginUpdateReq *bean2.PluginM return nil, err } } - if len(pluginStepsToUpdate) > 0 { err = impl.updateDeepPluginStepData(pluginStepsToUpdate, pluginStepVariables, pluginStepConditions, pluginSteps, userId, tx) if err != nil { @@ -1343,6 +1386,7 @@ func filterPluginStepData(existingPluginStepsInDb []*repository.PluginStep, plug } else { return nil, nil, pluginStepUpdateReq } + return newPluginStepsToCreate, pluginStepsToRemove, pluginStepsToUpdate } @@ -1761,59 +1805,28 @@ func (impl *GlobalPluginServiceImpl) ListAllPluginsV2(filter *bean2.PluginsListF return pluginDetails, nil } -func (impl *GlobalPluginServiceImpl) validateDetailRequest(pluginVersions []*repository.PluginMetadata, pluginVersionIds, parentPluginIds []int) error { - pluginVersionsIdMap, pluginParentIdMap := make(map[int]bool, len(pluginVersionIds)), make(map[int]bool, len(parentPluginIds)) - allPlugins, err := impl.globalPluginRepository.GetAllPluginMinData() - if err != nil { - impl.logger.Errorw("validateDetailRequest, error in getting all plugins parent metadata", "err", err) - return err - } - for _, pluginVersion := range pluginVersions { - pluginVersionsIdMap[pluginVersion.Id] = true - } - for _, plugin := range allPlugins { - pluginParentIdMap[plugin.Id] = true - } - for _, versionId := range pluginVersionIds { - if _, ok := pluginVersionsIdMap[versionId]; !ok { - errorMsg := fmt.Sprintf("there are some plugin version ids in request that do not exist:- %d", versionId) - return util.GetApiError(http.StatusBadRequest, errorMsg, errorMsg) - } - } - for _, pluginId := range parentPluginIds { - if _, ok := pluginParentIdMap[pluginId]; !ok { - errorMsg := fmt.Sprintf("there are some plugin parent ids in request that do not exist %d", pluginId) - return util.GetApiError(http.StatusBadRequest, errorMsg, errorMsg) - } - } - return nil -} // GetPluginDetailV2 returns all details of the of a plugin version according to the pluginVersionIds and parentPluginIds // provided by user, and minimal data for all versions of that plugin. func (impl *GlobalPluginServiceImpl) GetPluginDetailV2(pluginVersionIds, parentPluginIds []int, fetchAllVersionDetails bool) (*bean2.PluginsDto, error) { - var err error - pluginVersionsMetadata, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() - if err != nil { - impl.logger.Errorw("GetPluginDetailV2, error in getting all plugins versions metadata", "err", err) - return nil, err - } - err = impl.validateDetailRequest(pluginVersionsMetadata, pluginVersionIds, parentPluginIds) - if err != nil { - return nil, err - } pluginParentMetadataDtos := make([]*bean2.PluginParentMetadataDto, 0, len(pluginVersionIds)+len(parentPluginIds)) if len(pluginVersionIds) == 0 && len(parentPluginIds) == 0 { - return nil, util.GetApiError(http.StatusBadRequest, bean2.NoPluginOrParentIdProvidedErr, bean2.NoPluginOrParentIdProvidedErr) + return nil, &util.ApiError{HttpStatusCode: http.StatusBadRequest, Code: strconv.Itoa(http.StatusBadRequest), InternalMessage: bean2.NoPluginOrParentIdProvidedErr, UserMessage: bean2.NoPluginOrParentIdProvidedErr} } pluginVersionIdsMap, parentPluginIdsMap := helper2.GetPluginVersionAndParentPluginIdsMap(pluginVersionIds, parentPluginIds) + var err error pluginParentMetadataIds := make([]int, 0, len(pluginVersionIds)+len(parentPluginIds)) pluginVersionsIdToInclude := make(map[int]bool, len(pluginVersionIds)+len(parentPluginIds)) + pluginVersionsMetadata, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() + if err != nil { + impl.logger.Errorw("GetPluginDetailV2, error in getting all plugins versions metadata", "err", err) + return nil, err + } filteredPluginVersionMetadata := helper2.GetPluginVersionsMetadataByVersionAndParentPluginIds(pluginVersionsMetadata, pluginVersionIdsMap, parentPluginIdsMap) if len(filteredPluginVersionMetadata) == 0 { - return nil, util.GetApiError(http.StatusNotFound, bean2.NoPluginFoundForThisSearchQueryErr, bean2.NoPluginFoundForThisSearchQueryErr) + return nil, &util.ApiError{HttpStatusCode: http.StatusNotFound, Code: strconv.Itoa(http.StatusNotFound), InternalMessage: bean2.NoPluginFoundForThisSearchQueryErr, UserMessage: bean2.NoPluginFoundForThisSearchQueryErr} } for _, version := range filteredPluginVersionMetadata { _, found := pluginVersionIdsMap[version.Id] @@ -1871,6 +1884,7 @@ func (impl *GlobalPluginServiceImpl) MigratePluginData() error { // MigratePluginDataToParentPluginMetadata migrates pre-existing plugin metadata from plugin_metadata table into plugin_parent_metadata table, // and also populate plugin_parent_metadata_id in plugin_metadata. +// this operation will happen only once when the get all plugin list v2 api is being called, returns error if any func (impl *GlobalPluginServiceImpl) MigratePluginDataToParentPluginMetadata(pluginsMetadata []*repository.PluginMetadata) error { dbConnection := impl.globalPluginRepository.GetConnection() tx, err := dbConnection.Begin() @@ -1934,303 +1948,3 @@ func (impl *GlobalPluginServiceImpl) MigratePluginDataToParentPluginMetadata(plu } return nil } - -func (impl *GlobalPluginServiceImpl) GetAllPluginMinData() ([]*bean2.PluginMinDto, error) { - pluginsParentMinData, err := impl.globalPluginRepository.GetAllPluginMinData() - if err != nil { - impl.logger.Errorw("GetAllPluginMinData, error in getting all plugin parent metadata min data", "err", err) - return nil, err - } - pluginMinList := make([]*bean2.PluginMinDto, 0, len(pluginsParentMinData)) - for _, item := range pluginsParentMinData { - //since creating new version of preset plugin is disabled for end user, hence ignoring PRESET plugin in min list - if item.Type == repository.PLUGIN_TYPE_PRESET { - continue - } - pluginMinList = append(pluginMinList, bean2.NewPluginMinDto().WithParentPluginId(item.Id).WithPluginName(item.Name).WithIcon(item.Icon)) - } - return pluginMinList, nil -} - -func (impl *GlobalPluginServiceImpl) checkValidationOnPluginNameAndIdentifier(pluginReq *bean2.PluginParentMetadataDto) error { - plugins, err := impl.globalPluginRepository.GetAllPluginMinData() - if err != nil { - impl.logger.Errorw("error in getting all plugins", "err", err) - return err - } - for _, plugin := range plugins { - if plugin.Identifier == pluginReq.PluginIdentifier { - return util.GetApiError(http.StatusConflict, bean2.PluginWithSameIdentifierExistsError, bean2.PluginWithSameIdentifierExistsError) - } - if plugin.Name == pluginReq.Name { - return util.GetApiError(http.StatusConflict, bean2.PluginWithSameNameExistError, bean2.PluginWithSameNameExistError) - } - } - return nil -} - -func (impl *GlobalPluginServiceImpl) checkValidationOnVersion(pluginReq *bean2.PluginParentMetadataDto) error { - pluginVersions, err := impl.globalPluginRepository.GetPluginVersionsByParentId(pluginReq.Id) - if err != nil { - impl.logger.Errorw("checkValidationOnVersion, error in getting all plugins versions by parentPluginId", "parentPluginId", pluginReq.Id, "err", err) - return err - } - for _, pluginVersion := range pluginVersions { - if pluginReq.Versions != nil && len(pluginReq.Versions.DetailedPluginVersionData) > 0 && pluginReq.Versions.DetailedPluginVersionData[0] != nil { - // if plugin version from req is already created then return error - if pluginVersion.PluginVersion == pluginReq.Versions.DetailedPluginVersionData[0].Version { - return util.GetApiError(http.StatusBadRequest, bean2.PluginVersionAlreadyExistError, bean2.PluginVersionAlreadyExistError) - } - } - - } - return nil -} - -func (impl *GlobalPluginServiceImpl) validateV2PluginRequest(pluginReq *bean2.PluginParentMetadataDto) error { - if pluginReq.Versions == nil || len(pluginReq.Versions.DetailedPluginVersionData) == 0 || pluginReq.Versions.DetailedPluginVersionData[0] == nil { - return util.GetApiError(http.StatusBadRequest, bean2.NoStepDataToProceedError, bean2.NoStepDataToProceedError) - } - if pluginReq.Id == 0 { - //create plugin req. - err := impl.checkValidationOnPluginNameAndIdentifier(pluginReq) - if err != nil { - impl.logger.Errorw("error in checkValidationOnPluginNameAndIdentifier", "err", err) - return err - } - } else { - err := impl.checkValidationOnVersion(pluginReq) - if err != nil { - impl.logger.Errorw("error in checkValidationOnPluginNameAndIdentifier", "err", err) - return err - } - } - version := pluginReq.Versions.DetailedPluginVersionData[0].Version - if !strings.Contains(version, "v") { - version = fmt.Sprintf("v%s", version) - } - // semantic versioning validation on plugin's version - if !semver.IsValid(version) { - return util.GetApiError(http.StatusBadRequest, bean2.PluginVersionNotSemanticallyCorrectError, bean2.PluginVersionNotSemanticallyCorrectError) - } - //validate icon url and size - if len(pluginReq.Icon) > 0 { - err := utils.FetchIconAndCheckSize(pluginReq.Icon, bean2.PluginIconMaxSizeInBytes) - if err != nil { - errMsg := fmt.Sprintf("%s err:= %s", bean2.PluginIconNotCorrectOrReachableError, err.Error()) - return util.GetApiError(http.StatusBadRequest, errMsg, errMsg) - } - } - return nil -} - -func (impl *GlobalPluginServiceImpl) createPluginTagAndRelations(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { - if pluginReq.AreNewTagsPresent { - err := impl.CreateNewPluginTagsAndRelationsIfRequiredV2(pluginReq, userId, tx) - if err != nil { - impl.logger.Errorw("createPluginTagAndRelations, error in CreateNewPluginTagsAndRelationsIfRequired", "tags", pluginReq.Tags, "err", err) - return err - } - } else if len(pluginReq.Tags) > 0 { - err := impl.CreatePluginTagRelations(pluginReq, userId, tx) - if err != nil { - impl.logger.Errorw("createPluginTagAndRelations, error in CreatePluginTagRelations", "tags", pluginReq.Tags, "err", err) - return err - } - } - return nil -} - -func (impl *GlobalPluginServiceImpl) CreatePluginTagRelations(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { - tags, err := impl.globalPluginRepository.GetPluginTagByNames(pluginReq.Tags) - if err != nil { - impl.logger.Errorw("CreatePluginTagRelations, error in GetPluginTagByNames", "tags", pluginReq.Tags, "err", err) - return err - } - newPluginTagRelationsToCreate := make([]*repository.PluginTagRelation, 0, len(pluginReq.Tags)) - for _, tag := range tags { - newPluginTagRelationsToCreate = append(newPluginTagRelationsToCreate, repository.NewPluginTagRelation().CreateAuditLog(userId).WithTagAndPluginId(tag.Id, pluginReq.Id)) - } - - if len(newPluginTagRelationsToCreate) > 0 { - err = impl.globalPluginRepository.SavePluginTagRelationInBulk(newPluginTagRelationsToCreate, tx) - if err != nil { - impl.logger.Errorw("CreatePluginTagRelations, error in saving plugin tag relation in bulk", "newPluginTagRelationsToCreate", newPluginTagRelationsToCreate, "err", err) - return err - } - } - return nil -} - -func (impl *GlobalPluginServiceImpl) createPluginStepDataAndTagRelations(pluginVersionId int, pluginVersionDetail *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { - if len(pluginVersionDetail.PluginSteps) > 0 { - err := impl.saveDeepPluginStepData(pluginVersionId, pluginVersionDetail.PluginSteps, userId, tx) - if err != nil { - impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in saving plugin step data", "err", err) - return err - } - } else { - return util.GetApiError(http.StatusBadRequest, bean2.PluginStepsNotProvidedError, bean2.PluginStepsNotProvidedError) - } - - err := impl.createPluginTagAndRelations(pluginVersionDetail, userId, tx) - if err != nil { - impl.logger.Errorw("createNewPlugin, error in createPluginTagAndRelations", "tags", pluginVersionDetail.Tags, "err", err) - return err - } - return nil -} - -func (impl *GlobalPluginServiceImpl) createNewPlugin(tx *pg.Tx, pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { - pluginParentMetadata, err := impl.globalPluginRepository.SavePluginParentMetadata(tx, adaptor.GetPluginParentMetadataDbObject(pluginDto, userId)) - if err != nil { - impl.logger.Errorw("createNewPlugin, error in saving plugin parent metadata", "pluginDto", pluginDto, "err", err) - return 0, err - } - pluginDto.Id = pluginParentMetadata.Id - pluginVersionDto := adaptor.GetPluginVersionMetadataDbObject(pluginDto, userId). - WithPluginParentMetadataId(pluginParentMetadata.Id). - WithIsLatestFlag(true) - - pluginVersionMetadata, err := impl.globalPluginRepository.SavePluginMetadata(pluginVersionDto, tx) - if err != nil { - impl.logger.Errorw("createNewPlugin, error in saving plugin version metadata", "pluginDto", pluginDto, "err", err) - return 0, err - } - pluginDto.Versions.DetailedPluginVersionData[0].Id = pluginVersionMetadata.Id - - pluginStageMapping := &repository.PluginStageMapping{ - PluginId: pluginParentMetadata.Id, - StageType: repository.CI_CD, - AuditLog: sql.NewDefaultAuditLog(userId), - } - _, err = impl.globalPluginRepository.SavePluginStageMapping(pluginStageMapping, tx) - if err != nil { - impl.logger.Errorw("createNewPlugin, error in saving plugin stage mapping", "pluginDto", pluginDto, "err", err) - return 0, err - } - - err = impl.createPluginStepDataAndTagRelations(pluginVersionMetadata.Id, pluginDto.Versions.DetailedPluginVersionData[0], userId, tx) - if err != nil { - impl.logger.Errorw("createNewPlugin, error in createPluginStepDataAndTagRelations", "pluginDto", pluginDto, "err", err) - return 0, err - } - return pluginVersionMetadata.Id, nil -} - -func (impl *GlobalPluginServiceImpl) createNewPluginVersionOfExistingPlugin(tx *pg.Tx, pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { - var pluginParentMinData *repository.PluginParentMetadata - var err error - pluginParentMinData, err = impl.globalPluginRepository.GetPluginParentMinDataById(pluginDto.Id) - if err != nil { - impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in getting plugin parent metadata", "pluginDto", pluginDto, "err", err) - return 0, err - } - // before saving new plugin version marking previous version's isLatest as false. - err = impl.globalPluginRepository.MarkPreviousPluginVersionLatestFalse(pluginParentMinData.Id) - if err != nil { - impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in MarkPreviousPluginVersionLatestFalse", "pluginParentId", pluginDto.Id, "err", err) - return 0, err - } - pluginDto.Name = pluginParentMinData.Name - pluginVersionDto := adaptor.GetPluginVersionMetadataDbObject(pluginDto, userId). - WithPluginParentMetadataId(pluginParentMinData.Id). - WithIsLatestFlag(true) - - pluginVersionMetadata, err := impl.globalPluginRepository.SavePluginMetadata(pluginVersionDto, tx) - if err != nil { - impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in saving plugin version metadata", "pluginDto", pluginDto, "err", err) - return 0, err - } - pluginDto.Versions.DetailedPluginVersionData[0].Id = pluginVersionMetadata.Id - - err = impl.createPluginStepDataAndTagRelations(pluginVersionMetadata.Id, pluginDto.Versions.DetailedPluginVersionData[0], userId, tx) - if err != nil { - impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in createPluginStepDataAndTagRelations", "pluginDto", pluginDto, "err", err) - return 0, err - } - return pluginVersionMetadata.Id, nil -} - -func (impl *GlobalPluginServiceImpl) CreatePluginOrVersions(pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { - err := impl.validateV2PluginRequest(pluginDto) - if err != nil { - impl.logger.Errorw("CreatePluginOrVersions, error in validating create plugin request", "pluginReqDto", pluginDto, "err", err) - return 0, err - } - - dbConnection := impl.globalPluginRepository.GetConnection() - tx, err := dbConnection.Begin() - if err != nil { - return 0, err - } - // Rollback tx on error. - defer tx.Rollback() - var versionMetadataId int - if pluginDto.Id > 0 { - // create new version of existing plugin req. - versionMetadataId, err = impl.createNewPluginVersionOfExistingPlugin(tx, pluginDto, userId) - if err != nil { - impl.logger.Errorw("CreatePluginOrVersions, error in creating new version of an existing plugin", "existingPluginName", pluginDto.Name, "err", err) - return 0, err - } - } else { - // create new plugin req. - versionMetadataId, err = impl.createNewPlugin(tx, pluginDto, userId) - if err != nil { - impl.logger.Errorw("CreatePluginOrVersions, error in creating new plugin", "pluginDto", pluginDto, "err", err) - return 0, err - } - } - err = tx.Commit() - if err != nil { - impl.logger.Errorw("CreatePluginOrVersions, error in committing db transaction", "err", err) - return 0, err - } - return versionMetadataId, nil -} - -func (impl *GlobalPluginServiceImpl) CreateNewPluginTagsAndRelationsIfRequiredV2(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { - allPluginTags, err := impl.globalPluginRepository.GetAllPluginTags() - if err != nil { - impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in getting all plugin tags", "err", err) - return err - } - existingTagMap := make(map[string]*repository.PluginTag, len(allPluginTags)) - for _, tag := range allPluginTags { - existingTagMap[tag.Name] = tag - } - //check for new tags, then create new plugin_tag and plugin_tag_relation entry in db when new tags are present in request - newPluginTagsToCreate := make([]*repository.PluginTag, 0, len(pluginReq.Tags)) - newPluginTagRelationsToCreate := make([]*repository.PluginTagRelation, 0, len(pluginReq.Tags)) - - for _, tagReq := range pluginReq.Tags { - if _, ok := existingTagMap[tagReq]; !ok { - newPluginTagsToCreate = append(newPluginTagsToCreate, repository.NewPluginTag().CreateAuditLog(userId).WithName(tagReq)) - } - } - - if len(newPluginTagsToCreate) > 0 { - err = impl.globalPluginRepository.SavePluginTagInBulk(newPluginTagsToCreate, tx) - if err != nil { - impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in saving plugin tag", "newPluginTags", newPluginTagsToCreate, "err", err) - return err - } - for _, newTag := range newPluginTagsToCreate { - existingTagMap[newTag.Name] = newTag - } - } - - for _, tag := range pluginReq.Tags { - newPluginTagRelationsToCreate = append(newPluginTagRelationsToCreate, repository.NewPluginTagRelation().CreateAuditLog(userId).WithTagAndPluginId(existingTagMap[tag].Id, pluginReq.Id)) - } - - if len(newPluginTagRelationsToCreate) > 0 { - err = impl.globalPluginRepository.SavePluginTagRelationInBulk(newPluginTagRelationsToCreate, tx) - if err != nil { - impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in saving plugin tag relation in bulk", "newPluginTagRelationsToCreate", newPluginTagRelationsToCreate, "err", err) - return err - } - } - return nil -} diff --git a/pkg/plugin/adaptor/adaptor.go b/pkg/plugin/adaptor/adaptor.go deleted file mode 100644 index e5e0f50e9d3..00000000000 --- a/pkg/plugin/adaptor/adaptor.go +++ /dev/null @@ -1,80 +0,0 @@ -package adaptor - -import ( - bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" - "github.com/devtron-labs/devtron/pkg/plugin/repository" - "github.com/devtron-labs/devtron/pkg/sql" -) - -func GetPluginParentMetadataDbObject(pluginDto *bean2.PluginParentMetadataDto, userId int32) *repository.PluginParentMetadata { - return repository.NewPluginParentMetadata().CreateAuditLog(userId). - WithBasicMetadata(pluginDto.Name, pluginDto.PluginIdentifier, pluginDto.Description, pluginDto.Icon, repository.PLUGIN_TYPE_SHARED) -} - -func GetPluginVersionMetadataDbObject(pluginDto *bean2.PluginParentMetadataDto, userId int32) *repository.PluginMetadata { - versionDto := pluginDto.Versions.DetailedPluginVersionData[0] - return repository.NewPluginVersionMetadata().CreateAuditLog(userId).WithBasicMetadata(pluginDto.Name, versionDto.Description, versionDto.Version, versionDto.DocLink) -} - -func GetPluginStepDbObject(pluginStepDto *bean2.PluginStepsDto, pluginVersionMetadataId int, userId int32) *repository.PluginStep { - return &repository.PluginStep{ - PluginId: pluginVersionMetadataId, - Name: pluginStepDto.Name, - Description: pluginStepDto.Description, - Index: 1, - StepType: repository.PLUGIN_STEP_TYPE_INLINE, - RefPluginId: pluginStepDto.RefPluginId, - OutputDirectoryPath: pluginStepDto.OutputDirectoryPath, - DependentOnStep: pluginStepDto.DependentOnStep, - AuditLog: sql.NewDefaultAuditLog(userId), - } -} -func GetPluginPipelineScriptDbObject(pluginPipelineScript *bean2.PluginPipelineScript, userId int32) *repository.PluginPipelineScript { - return &repository.PluginPipelineScript{ - Script: pluginPipelineScript.Script, - StoreScriptAt: pluginPipelineScript.StoreScriptAt, - Type: pluginPipelineScript.Type, - DockerfileExists: pluginPipelineScript.DockerfileExists, - MountPath: pluginPipelineScript.MountPath, - MountCodeToContainer: pluginPipelineScript.MountCodeToContainer, - MountCodeToContainerPath: pluginPipelineScript.MountCodeToContainerPath, - MountDirectoryFromHost: pluginPipelineScript.MountDirectoryFromHost, - ContainerImagePath: pluginPipelineScript.ContainerImagePath, - ImagePullSecretType: pluginPipelineScript.ImagePullSecretType, - ImagePullSecret: pluginPipelineScript.ImagePullSecret, - AuditLog: sql.NewDefaultAuditLog(userId), - } - -} - -func GetPluginStepVariableDbObject(pluginStepId int, pluginVariableDto *bean2.PluginVariableDto, userId int32) *repository.PluginStepVariable { - return &repository.PluginStepVariable{ - PluginStepId: pluginStepId, - Name: pluginVariableDto.Name, - Format: pluginVariableDto.Format, - Description: pluginVariableDto.Description, - IsExposed: true, //currently hard coding this, later after plugin creation gets more mature will let user decide - AllowEmptyValue: pluginVariableDto.AllowEmptyValue, - DefaultValue: pluginVariableDto.DefaultValue, - Value: pluginVariableDto.Value, - VariableType: pluginVariableDto.VariableType, - ValueType: pluginVariableDto.ValueType, - PreviousStepIndex: pluginVariableDto.PreviousStepIndex, - VariableStepIndex: 1, //currently hard coding this, later after plugin creation gets more mature will let user decide - VariableStepIndexInPlugin: pluginVariableDto.VariableStepIndexInPlugin, - ReferenceVariableName: pluginVariableDto.ReferenceVariableName, - AuditLog: sql.NewDefaultAuditLog(userId), - } -} - -func GetPluginStepConditionDbObject(stepDataId, pluginStepVariableId int, pluginStepCondition *bean2.PluginStepCondition, - userId int32) *repository.PluginStepCondition { - return &repository.PluginStepCondition{ - PluginStepId: stepDataId, - ConditionVariableId: pluginStepVariableId, - ConditionType: pluginStepCondition.ConditionType, - ConditionalOperator: pluginStepCondition.ConditionalOperator, - ConditionalValue: pluginStepCondition.ConditionalValue, - AuditLog: sql.NewDefaultAuditLog(userId), - } -} diff --git a/pkg/plugin/bean/bean.go b/pkg/plugin/bean/bean.go index c31d3463327..55424f3caac 100644 --- a/pkg/plugin/bean/bean.go +++ b/pkg/plugin/bean/bean.go @@ -13,6 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package bean import ( @@ -43,47 +44,15 @@ type PluginListComponentDto struct { //created new struct for backward compatibi } type PluginMetadataDto struct { - Id int `json:"id"` - Name string `json:"name" validate:"required,min=3,max=100,global-entity-name"` - Description string `json:"description" validate:"max=300"` - Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` // SHARED, PRESET etc - Icon string `json:"icon,omitempty"` - Tags []string `json:"tags"` - Action int `json:"action,omitempty"` - PluginStage string `json:"pluginStage,omitempty"` - PluginSteps []*PluginStepsDto `json:"pluginSteps,omitempty"` - AreNewTagsPresent bool `json:"areNewTagsPresent,omitempty"` -} - -type PluginMinDto struct { - ParentPluginId int `json:"id,omitempty"` - PluginName string `json:"name,omitempty"` - Icon string `json:"icon,omitempty"` - PluginVersionId int `json:"pluginVersionId,omitempty"` -} - -func NewPluginMinDto() *PluginMinDto { - return &PluginMinDto{} -} - -func (r *PluginMinDto) WithParentPluginId(id int) *PluginMinDto { - r.ParentPluginId = id - return r -} - -func (r *PluginMinDto) WithPluginName(name string) *PluginMinDto { - r.PluginName = name - return r -} - -func (r *PluginMinDto) WithIcon(icon string) *PluginMinDto { - r.Icon = icon - return r -} - -func (r *PluginMinDto) WithPluginVersionId(versionId int) *PluginMinDto { - r.PluginVersionId = versionId - return r + Id int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` // SHARED, PRESET etc + Icon string `json:"icon,omitempty"` + Tags []string `json:"tags"` + Action int `json:"action,omitempty"` + PluginStage string `json:"pluginStage,omitempty"` + PluginSteps []*PluginStepsDto `json:"pluginSteps,omitempty"` } type PluginsDto struct { @@ -107,9 +76,9 @@ func (r *PluginsDto) WithTotalCount(count int) *PluginsDto { type PluginParentMetadataDto struct { Id int `json:"id"` - Name string `json:"name" validate:"required,min=3,max=100,global-entity-name"` - PluginIdentifier string `json:"pluginIdentifier" validate:"required,min=3,max=100,global-entity-name"` - Description string `json:"description" validate:"max=300"` + Name string `json:"name"` + PluginIdentifier string `json:"pluginIdentifier"` + Description string `json:"description"` Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` Icon string `json:"icon,omitempty"` Versions *PluginVersions `json:"pluginVersions"` @@ -155,6 +124,17 @@ type PluginVersions struct { MinimalPluginVersionData []*PluginsVersionDetail `json:"minimalPluginVersionData"` // contains only few metadata } +type PluginMinDto struct { + PluginName string `json:"pluginName"` + PluginVersions []*PluginVersionsMinDto `json:"pluginVersions"` + Icon string `json:"icon"` +} + +type PluginVersionsMinDto struct { + Id int `json:"id"` + Version string `json:"version"` +} + func NewPluginVersions() *PluginVersions { return &PluginVersions{} } @@ -174,7 +154,7 @@ type PluginsVersionDetail struct { InputVariables []*PluginVariableDto `json:"inputVariables"` OutputVariables []*PluginVariableDto `json:"outputVariables"` DocLink string `json:"docLink"` - Version string `json:"pluginVersion" validate:"max=50,min=3"` + Version string `json:"pluginVersion"` IsLatest bool `json:"isLatest"` UpdatedBy string `json:"updatedBy"` CreatedOn time.Time `json:"-"` @@ -356,18 +336,10 @@ type RegistryCredentials struct { } const ( - NoPluginOrParentIdProvidedErr = "no value for pluginVersionIds and parentPluginIds provided in query param" - NoPluginFoundForThisSearchQueryErr = "unable to find desired plugin for the query filter" - PluginStepsNotProvidedError = "plugin steps not provided" - PluginWithSameNameExistError = "plugin with the same name exists, please choose another name" - PluginWithSameIdentifierExistsError = "plugin with the same identifier exists, please choose another identifier name" - PluginVersionNotSemanticallyCorrectError = "please provide a plugin version that adheres to Semantic Versioning 2.0.0 to ensure compatibility and proper versioning" - PluginIconNotCorrectOrReachableError = "cannot validate icon, make sure that provided url link is reachable" - PluginVersionAlreadyExistError = "this plugin version already exists, please provide another plugin version" - NoStepDataToProceedError = "no step data provided to save, please provide a plugin step to proceed further" + NoPluginOrParentIdProvidedErr = "no value for pluginVersionIds and parentPluginIds provided in query param" + NoPluginFoundForThisSearchQueryErr = "unable to find desired plugin for the query filter" ) const ( - SpecialCharsRegex = ` !"#$%&'()*+,./:;<=>?@[\]^_{|}~` + "`" - PluginIconMaxSizeInBytes = 2 * 1024 * 1024 + SpecialCharsRegex = ` !"#$%&'()*+,./:;<=>?@[\]^_{|}~` + "`" ) diff --git a/pkg/plugin/repository/GlobalPluginRepository.go b/pkg/plugin/repository/GlobalPluginRepository.go index 9cc50748f29..8b650935231 100644 --- a/pkg/plugin/repository/GlobalPluginRepository.go +++ b/pkg/plugin/repository/GlobalPluginRepository.go @@ -99,16 +99,6 @@ func (r *PluginParentMetadata) CreateAuditLog(userId int32) *PluginParentMetadat return r } -func (r *PluginParentMetadata) WithBasicMetadata(name, identifier, description, icon string, pluginType PluginType) *PluginParentMetadata { - r.Name = name - r.Identifier = identifier - r.Description = description - r.Icon = icon - r.Type = pluginType - r.Deleted = false - return r -} - // SetParentPluginMetadata method signature used only for migration purposes, sets pluginVersionsMetadata into plugin_parent_metadata func (r *PluginParentMetadata) SetParentPluginMetadata(pluginMetadata *PluginMetadata) *PluginParentMetadata { r.Name = pluginMetadata.Name @@ -145,38 +135,6 @@ type PluginMetadata struct { sql.AuditLog } -func NewPluginVersionMetadata() *PluginMetadata { - return &PluginMetadata{} -} - -func (r *PluginMetadata) CreateAuditLog(userId int32) *PluginMetadata { - r.CreatedBy = userId - r.CreatedOn = time.Now() - r.UpdatedBy = userId - r.UpdatedOn = time.Now() - return r -} - -func (r *PluginMetadata) WithBasicMetadata(name, description, pluginVersion, docLink string) *PluginMetadata { - r.Name = name - r.PluginVersion = pluginVersion - r.Description = description - r.DocLink = docLink - r.Deleted = false - r.IsDeprecated = false - return r -} - -func (r *PluginMetadata) WithPluginParentMetadataId(parentId int) *PluginMetadata { - r.PluginParentMetadataId = parentId - return r -} - -func (r *PluginMetadata) WithIsLatestFlag(isLatest bool) *PluginMetadata { - r.IsLatest = isLatest - return r -} - type PluginTag struct { tableName struct{} `sql:"plugin_tag" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -185,23 +143,6 @@ type PluginTag struct { sql.AuditLog } -func NewPluginTag() *PluginTag { - return &PluginTag{} -} - -func (r *PluginTag) WithName(name string) *PluginTag { - r.Name = name - return r -} - -func (r *PluginTag) CreateAuditLog(userId int32) *PluginTag { - r.CreatedBy = userId - r.CreatedOn = time.Now() - r.UpdatedBy = userId - r.UpdatedOn = time.Now() - return r -} - type PluginTagRelation struct { tableName struct{} `sql:"plugin_tag_relation" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -210,24 +151,6 @@ type PluginTagRelation struct { sql.AuditLog } -func NewPluginTagRelation() *PluginTagRelation { - return &PluginTagRelation{} -} - -func (r *PluginTagRelation) WithTagAndPluginId(tagId, pluginId int) *PluginTagRelation { - r.TagId = tagId - r.PluginId = pluginId - return r -} - -func (r *PluginTagRelation) CreateAuditLog(userId int32) *PluginTagRelation { - r.CreatedBy = userId - r.CreatedOn = time.Now() - r.UpdatedBy = userId - r.UpdatedOn = time.Now() - return r -} - // Below two tables are used at pipeline-steps level too type PluginPipelineScript struct { @@ -324,9 +247,7 @@ type GlobalPluginRepository interface { GetMetaDataForAllPlugins() ([]*PluginMetadata, error) GetMetaDataForPluginWithStageType(stageType int) ([]*PluginMetadata, error) GetMetaDataByPluginId(pluginId int) (*PluginMetadata, error) - GetMetaDataByPluginIds(pluginIds []int) ([]*PluginMetadata, error) GetAllPluginTags() ([]*PluginTag, error) - GetPluginTagByNames(tagNames []string) ([]*PluginTag, error) GetAllPluginTagRelations() ([]*PluginTagRelation, error) GetTagsByPluginId(pluginId int) ([]string, error) GetScriptDetailById(id int) (*PluginPipelineScript, error) @@ -343,14 +264,10 @@ type GlobalPluginRepository interface { GetConditionsByPluginId(pluginId int) ([]*PluginStepCondition, error) GetPluginStageMappingByPluginId(pluginId int) (*PluginStageMapping, error) GetConnection() (dbConnection *pg.DB) - GetPluginVersionsByParentId(parentPluginId int) ([]*PluginMetadata, error) GetPluginParentMetadataByIdentifier(pluginIdentifier string) (*PluginParentMetadata, error) GetAllFilteredPluginParentMetadata(searchKey string, tags []string) ([]*PluginParentMetadata, error) GetPluginParentMetadataByIds(ids []int) ([]*PluginParentMetadata, error) - GetAllPluginMinData() ([]*PluginParentMetadata, error) - GetPluginParentMinDataById(id int) (*PluginParentMetadata, error) - MarkPreviousPluginVersionLatestFalse(pluginParentId int) error SavePluginMetadata(pluginMetadata *PluginMetadata, tx *pg.Tx) (*PluginMetadata, error) SavePluginStageMapping(pluginStageMapping *PluginStageMapping, tx *pg.Tx) (*PluginStageMapping, error) @@ -434,19 +351,6 @@ func (impl *GlobalPluginRepositoryImpl) GetAllPluginTags() ([]*PluginTag, error) return tags, nil } -func (impl *GlobalPluginRepositoryImpl) GetPluginTagByNames(tagNames []string) ([]*PluginTag, error) { - var tags []*PluginTag - err := impl.dbConnection.Model(&tags). - Where("deleted = ?", false). - Where("name in (?)", pg.In(tagNames)). - Select() - if err != nil { - impl.logger.Errorw("err in getting all tags by names", "tagNames", tagNames, "err", err) - return nil, err - } - return tags, nil -} - func (impl *GlobalPluginRepositoryImpl) GetAllPluginTagRelations() ([]*PluginTagRelation, error) { var rel []*PluginTagRelation err := impl.dbConnection.Model(&rel). @@ -481,18 +385,6 @@ func (impl *GlobalPluginRepositoryImpl) GetMetaDataByPluginId(pluginId int) (*Pl return &plugin, nil } -func (impl *GlobalPluginRepositoryImpl) GetMetaDataByPluginIds(pluginIds []int) ([]*PluginMetadata, error) { - var plugins []*PluginMetadata - err := impl.dbConnection.Model(&plugins). - Where("deleted = ?", false). - Where("id in (?)", pg.In(pluginIds)).Select() - if err != nil { - impl.logger.Errorw("err in getting plugins by pluginIds", "pluginIds", pluginIds, "err", err) - return nil, err - } - return plugins, nil -} - func (impl *GlobalPluginRepositoryImpl) GetStepsByPluginIds(pluginIds []int) ([]*PluginStep, error) { var pluginSteps []*PluginStep err := impl.dbConnection.Model(&pluginSteps). @@ -619,20 +511,6 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginByName(pluginName string) ([]*P } -func (impl *GlobalPluginRepositoryImpl) GetPluginVersionsByParentId(parentPluginId int) ([]*PluginMetadata, error) { - var plugin []*PluginMetadata - err := impl.dbConnection.Model(&plugin). - Where("plugin_parent_metadata_id = ?", parentPluginId). - Where("deleted = ?", false). - Where("is_deprecated = ?", false). - Select() - if err != nil { - impl.logger.Errorw("err in getting pluginVersionMetadata by parentPluginId", "parentPluginId", parentPluginId, "err", err) - return nil, err - } - return plugin, nil -} - func (impl *GlobalPluginRepositoryImpl) GetAllPluginMetaData() ([]*PluginMetadata, error) { var plugins []*PluginMetadata err := impl.dbConnection.Model(&plugins).Where("deleted = ?", false).Select() @@ -822,18 +700,6 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginParentMetadataByIdentifier(plug return &pluginParentMetadata, nil } -func (impl *GlobalPluginRepositoryImpl) GetPluginParentMinDataById(id int) (*PluginParentMetadata, error) { - var pluginParentMetadata PluginParentMetadata - err := impl.dbConnection.Model(&pluginParentMetadata). - Column("plugin_parent_metadata.id", "plugin_parent_metadata.name"). - Where("id = ?", id). - Where("deleted = ?", false).Select() - if err != nil { - return nil, err - } - return &pluginParentMetadata, nil -} - func (impl *GlobalPluginRepositoryImpl) SavePluginParentMetadata(tx *pg.Tx, pluginParentMetadata *PluginParentMetadata) (*PluginParentMetadata, error) { err := tx.Insert(pluginParentMetadata) return pluginParentMetadata, err @@ -846,20 +712,24 @@ func (impl *GlobalPluginRepositoryImpl) UpdatePluginMetadataInBulk(pluginsMetada func (impl *GlobalPluginRepositoryImpl) GetAllFilteredPluginParentMetadata(searchKey string, tags []string) ([]*PluginParentMetadata, error) { var plugins []*PluginParentMetadata - query := "select ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + + subQuery := "select ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + " inner join plugin_metadata pm on pm.plugin_parent_metadata_id=ppm.id" - whereCondition := fmt.Sprintf(" where ppm.deleted=false AND pm.deleted=false AND pm.is_latest=true") + whereCondition := fmt.Sprintf(" where ppm.deleted=false") + orderCondition := fmt.Sprintf(" ORDER BY ppm.id asc") if len(tags) > 0 { - tagFilterSubQuery := fmt.Sprintf("select ptr.plugin_id from plugin_tag_relation ptr inner join plugin_tag pt on ptr.tag_id =pt.id where pt.deleted =false and pt.name in (%s) group by ptr.plugin_id having count(ptr.plugin_id )=%d", helper.GetCommaSepratedStringWithComma(tags), len(tags)) - whereCondition += fmt.Sprintf(" AND pm.id in (%s)", tagFilterSubQuery) + subQuery = "select DISTINCT ON(ppm.id) ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + + " inner join plugin_metadata pm on pm.plugin_parent_metadata_id=ppm.id" + + " left join plugin_tag_relation ptr on ptr.plugin_id=pm.id" + + " left join plugin_tag pt on ptr.tag_id=pt.id" + whereCondition += fmt.Sprintf(" AND pm.deleted=false AND pt.deleted=false AND pt.name in (%s)", helper.GetCommaSepratedStringWithComma(tags)) } if len(searchKey) > 0 { searchKeyLike := "%" + searchKey + "%" whereCondition += fmt.Sprintf(" AND (pm.description ilike '%s' or pm.name ilike '%s')", searchKeyLike, searchKeyLike) } - orderCondition := " ORDER BY ppm.name asc;" - - query += whereCondition + orderCondition + whereCondition += fmt.Sprintf(" AND pm.is_latest=true") + subQuery += whereCondition + orderCondition + query := fmt.Sprintf(" select * from (%s) x ORDER BY name asc;", subQuery) _, err := impl.dbConnection.Query(&plugins, query) if err != nil { return nil, err @@ -879,29 +749,3 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginParentMetadataByIds(ids []int) } return plugins, nil } - -func (impl *GlobalPluginRepositoryImpl) GetAllPluginMinData() ([]*PluginParentMetadata, error) { - var plugins []*PluginParentMetadata - err := impl.dbConnection.Model(&plugins). - Column("plugin_parent_metadata.id", "plugin_parent_metadata.name", "plugin_parent_metadata.type", "plugin_parent_metadata.icon", "plugin_parent_metadata.identifier"). - Where("deleted = ?", false). - Select() - if err != nil { - impl.logger.Errorw("err in getting all plugin parent metadata min data", "err", err) - return nil, err - } - return plugins, nil -} - -func (impl *GlobalPluginRepositoryImpl) MarkPreviousPluginVersionLatestFalse(pluginParentId int) error { - var model PluginMetadata - _, err := impl.dbConnection.Model(&model). - Set("is_latest = ?", false). - Where("id = (select id from plugin_metadata where plugin_parent_metadata_id = ? and is_latest =true order by created_on desc limit ?)", pluginParentId, 1). - Update() - if err != nil { - impl.logger.Errorw("error in updating last version isLatest as false for a plugin parent id", "pluginParentId", pluginParentId, "err", err) - return err - } - return nil -} diff --git a/pkg/plugin/utils/utils.go b/pkg/plugin/utils/utils.go index 168e694d89b..6d78a291439 100644 --- a/pkg/plugin/utils/utils.go +++ b/pkg/plugin/utils/utils.go @@ -21,11 +21,9 @@ import ( "fmt" bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" "github.com/devtron-labs/devtron/pkg/plugin/repository" - "net/http" "regexp" "sort" "strings" - "time" ) func GetStageType(stageTypeReq string) (int, error) { @@ -74,26 +72,3 @@ func SortPluginsVersionDetailSliceByCreatedOn(pluginsVersionDetail []*bean2.Plug return false }) } - -func FetchIconAndCheckSize(url string, maxSize int64) error { - client := http.Client{ - Timeout: 5 * time.Second, - } - iconResp, err := client.Get(url) - if err != nil { - return fmt.Errorf("error in fetching icon : %s", err.Error()) - } - if iconResp != nil { - if iconResp.StatusCode >= 200 && iconResp.StatusCode < 300 { - if iconResp.ContentLength > maxSize { - return fmt.Errorf("icon size too large") - } - iconResp.Body.Close() - } else { - return fmt.Errorf("error in fetching icon : %s", iconResp.Status) - } - } else { - return fmt.Errorf("error in fetching icon : empty response") - } - return nil -} From fd90dfb64540e9136ecfd2f0683fad08c28aa356 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Mon, 2 Sep 2024 19:00:03 +0530 Subject: [PATCH 30/61] fix: unimplemented cluster cron service (#5781) --- api/cluster/wire_cluster.go | 1 + .../application/k8sApplicationRestHandler.go | 1 + api/k8s/wire_k8sApp.go | 3 - cmd/external-app/wire_gen.go | 7 +- pkg/cluster/ClusterCronService.go | 72 ------------------- pkg/cluster/ClusterService.go | 49 ++++++++++--- pkg/cluster/ClusterServiceExtended.go | 27 ++----- pkg/k8s/capacity/k8sCapacityService.go | 2 +- util/GlobalConfig.go | 6 ++ wire_gen.go | 32 +++++---- 10 files changed, 77 insertions(+), 123 deletions(-) delete mode 100644 pkg/cluster/ClusterCronService.go diff --git a/api/cluster/wire_cluster.go b/api/cluster/wire_cluster.go index 2a38444542e..bbfa050f87c 100644 --- a/api/cluster/wire_cluster.go +++ b/api/cluster/wire_cluster.go @@ -29,6 +29,7 @@ import ( var ClusterWireSet = wire.NewSet( repository.NewClusterRepositoryImpl, wire.Bind(new(repository.ClusterRepository), new(*repository.ClusterRepositoryImpl)), + cluster.NewClusterServiceImpl, cluster.NewClusterServiceImplExtended, wire.Bind(new(cluster.ClusterService), new(*cluster.ClusterServiceImplExtended)), diff --git a/api/k8s/application/k8sApplicationRestHandler.go b/api/k8s/application/k8sApplicationRestHandler.go index b6830b7d0d7..d153d84c980 100644 --- a/api/k8s/application/k8sApplicationRestHandler.go +++ b/api/k8s/application/k8sApplicationRestHandler.go @@ -559,6 +559,7 @@ func (handler *K8sApplicationRestHandlerImpl) GetPodLogs(w http.ResponseWriter, common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return } + handler.logger.Infow("get pod logs request", "request", request) handler.requestValidationAndRBAC(w, r, token, request) lastEventId := r.Header.Get(bean2.LastEventID) isReconnect := false diff --git a/api/k8s/wire_k8sApp.go b/api/k8s/wire_k8sApp.go index 0e940ca648a..c728d597ef5 100644 --- a/api/k8s/wire_k8sApp.go +++ b/api/k8s/wire_k8sApp.go @@ -53,7 +53,4 @@ var K8sApplicationWireSet = wire.NewSet( informer.NewGlobalMapClusterNamespace, informer.NewK8sInformerFactoryImpl, wire.Bind(new(informer.K8sInformerFactory), new(*informer.K8sInformerFactoryImpl)), - - cluster.NewClusterCronServiceImpl, - wire.Bind(new(cluster.ClusterCronService), new(*cluster.ClusterCronServiceImpl)), ) diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index 23dbbb83014..5271cca4cdd 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -201,7 +201,11 @@ func InitializeApp() (*App, error) { clusterRepositoryImpl := repository2.NewClusterRepositoryImpl(db, sugaredLogger) syncMap := informer.NewGlobalMapClusterNamespace() k8sInformerFactoryImpl := informer.NewK8sInformerFactoryImpl(sugaredLogger, syncMap, k8sServiceImpl) - clusterServiceImpl := cluster.NewClusterServiceImpl(clusterRepositoryImpl, sugaredLogger, k8sServiceImpl, k8sInformerFactoryImpl, userAuthRepositoryImpl, userRepositoryImpl, roleGroupRepositoryImpl) + cronLoggerImpl := cron.NewCronLoggerImpl(sugaredLogger) + clusterServiceImpl, err := cluster.NewClusterServiceImpl(clusterRepositoryImpl, sugaredLogger, k8sServiceImpl, k8sInformerFactoryImpl, userAuthRepositoryImpl, userRepositoryImpl, roleGroupRepositoryImpl, environmentVariables, cronLoggerImpl) + if err != nil { + return nil, err + } appStatusRepositoryImpl := appStatus.NewAppStatusRepositoryImpl(db, sugaredLogger) environmentRepositoryImpl := repository2.NewEnvironmentRepositoryImpl(db, sugaredLogger, appStatusRepositoryImpl) attributesRepositoryImpl := repository3.NewAttributesRepositoryImpl(db) @@ -334,7 +338,6 @@ func InitializeApp() (*App, error) { } moduleRepositoryImpl := moduleRepo.NewModuleRepositoryImpl(db) providerIdentifierServiceImpl := providerIdentifier.NewProviderIdentifierServiceImpl(sugaredLogger) - cronLoggerImpl := cron.NewCronLoggerImpl(sugaredLogger) telemetryEventClientImpl, err := telemetry.NewTelemetryEventClientImpl(sugaredLogger, httpClient, clusterServiceImpl, k8sServiceImpl, acdAuthConfig, userServiceImpl, attributesRepositoryImpl, ssoLoginServiceImpl, posthogClient, moduleRepositoryImpl, serverDataStoreServerDataStore, userAuditServiceImpl, helmAppClientImpl, installedAppRepositoryImpl, providerIdentifierServiceImpl, cronLoggerImpl) if err != nil { return nil, err diff --git a/pkg/cluster/ClusterCronService.go b/pkg/cluster/ClusterCronService.go deleted file mode 100644 index 6b4a5567877..00000000000 --- a/pkg/cluster/ClusterCronService.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2024. Devtron Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cluster - -import ( - "fmt" - "github.com/caarlos0/env/v6" - cron2 "github.com/devtron-labs/devtron/util/cron" - "github.com/robfig/cron/v3" - "go.uber.org/zap" -) - -type ClusterCronService interface { -} - -type ClusterCronServiceImpl struct { - logger *zap.SugaredLogger - clusterService ClusterService -} - -type ClusterStatusConfig struct { - ClusterStatusCronTime int `env:"CLUSTER_STATUS_CRON_TIME" envDefault:"15"` -} - -func NewClusterCronServiceImpl(logger *zap.SugaredLogger, clusterService ClusterService, cronLogger *cron2.CronLoggerImpl) (*ClusterCronServiceImpl, error) { - clusterCronServiceImpl := &ClusterCronServiceImpl{ - logger: logger, - clusterService: clusterService, - } - // initialise cron - newCron := cron.New(cron.WithChain(cron.Recover(cronLogger))) - newCron.Start() - cfg := &ClusterStatusConfig{} - err := env.Parse(cfg) - if err != nil { - fmt.Println("failed to parse server cluster status config: " + err.Error()) - } - // add function into cron - _, err = newCron.AddFunc(fmt.Sprintf("@every %dm", cfg.ClusterStatusCronTime), clusterCronServiceImpl.GetAndUpdateClusterConnectionStatus) - if err != nil { - fmt.Println("error in adding cron function into cluster cron service") - return clusterCronServiceImpl, err - } - return clusterCronServiceImpl, nil -} - -func (impl *ClusterCronServiceImpl) GetAndUpdateClusterConnectionStatus() { - impl.logger.Debug("starting cluster connection status fetch thread") - defer impl.logger.Debug("stopped cluster connection status fetch thread") - - //getting all clusters - clusters, err := impl.clusterService.FindAllExceptVirtual() - if err != nil { - impl.logger.Errorw("error in getting all clusters", "err", err) - return - } - impl.clusterService.ConnectClustersInBatch(clusters, true) -} diff --git a/pkg/cluster/ClusterService.go b/pkg/cluster/ClusterService.go index 54d59508dbe..ad088f74cf2 100644 --- a/pkg/cluster/ClusterService.go +++ b/pkg/cluster/ClusterService.go @@ -20,6 +20,8 @@ import ( "context" "encoding/json" "fmt" + cronUtil "github.com/devtron-labs/devtron/util/cron" + "github.com/robfig/cron/v3" "log" "net/http" "net/url" @@ -43,7 +45,7 @@ import ( "github.com/devtron-labs/devtron/internal/constants" "github.com/devtron-labs/devtron/internal/util" "github.com/devtron-labs/devtron/pkg/cluster/repository" - util2 "github.com/devtron-labs/devtron/util" + globalUtil "github.com/devtron-labs/devtron/util" "github.com/go-pg/pg" "go.uber.org/zap" ) @@ -201,7 +203,9 @@ type ClusterServiceImpl struct { func NewClusterServiceImpl(repository repository.ClusterRepository, logger *zap.SugaredLogger, K8sUtil *k8s.K8sServiceImpl, K8sInformerFactory informer.K8sInformerFactory, userAuthRepository repository3.UserAuthRepository, userRepository repository3.UserRepository, - roleGroupRepository repository3.RoleGroupRepository) *ClusterServiceImpl { + roleGroupRepository repository3.RoleGroupRepository, + envVariables *globalUtil.EnvironmentVariables, + cronLogger *cronUtil.CronLoggerImpl) (*ClusterServiceImpl, error) { clusterService := &ClusterServiceImpl{ clusterRepository: repository, logger: logger, @@ -211,8 +215,19 @@ func NewClusterServiceImpl(repository repository.ClusterRepository, logger *zap. userRepository: userRepository, roleGroupRepository: roleGroupRepository, } + // initialise cron + newCron := cron.New(cron.WithChain(cron.Recover(cronLogger))) + newCron.Start() + cfg := envVariables.GlobalClusterConfig + // add function into cron + _, err := newCron.AddFunc(fmt.Sprintf("@every %dm", cfg.ClusterStatusCronTime), clusterService.getAndUpdateClusterConnectionStatus) + if err != nil { + fmt.Println("error in adding cron function into cluster cron service") + return clusterService, err + } + logger.Infow("cluster cron service started successfully!", "cronTime", cfg.ClusterStatusCronTime) go clusterService.buildInformer() - return clusterService + return clusterService, nil } func (impl *ClusterServiceImpl) ConvertClusterBeanToCluster(clusterBean *ClusterBean, userId int32) *repository.Cluster { @@ -242,6 +257,20 @@ func (impl *ClusterServiceImpl) ConvertClusterBeanToCluster(clusterBean *Cluster return model } +// getAndUpdateClusterConnectionStatus is a cron function to update the connection status of all clusters +func (impl *ClusterServiceImpl) getAndUpdateClusterConnectionStatus() { + impl.logger.Debug("starting cluster connection status fetch thread") + defer impl.logger.Debug("stopped cluster connection status fetch thread") + + //getting all clusters + clusters, err := impl.FindAllExceptVirtual() + if err != nil { + impl.logger.Errorw("error in getting all clusters", "err", err) + return + } + impl.ConnectClustersInBatch(clusters, true) +} + func (impl *ClusterServiceImpl) Save(parent context.Context, bean *ClusterBean, userId int32) (*ClusterBean, error) { //validating config @@ -289,7 +318,7 @@ func (impl *ClusterServiceImpl) Save(parent context.Context, bean *ClusterBean, //on successful creation of new cluster, update informer cache for namespace group by cluster //here sync for ea mode only - if util2.IsBaseStack() { + if globalUtil.IsBaseStack() { impl.SyncNsInformer(bean) } impl.logger.Info("saving secret for cluster informer") @@ -530,7 +559,7 @@ func (impl *ClusterServiceImpl) Update(ctx context.Context, bean *ClusterBean, u bean.Id = model.Id //here sync for ea mode only - if bean.HasConfigOrUrlChanged && util2.IsBaseStack() { + if bean.HasConfigOrUrlChanged && globalUtil.IsBaseStack() { impl.SyncNsInformer(bean) } impl.logger.Infow("saving secret for cluster informer") @@ -643,7 +672,7 @@ func (impl *ClusterServiceImpl) buildInformer() { impl.K8sInformerFactory.BuildInformer(clusterInfo) } -func (impl ClusterServiceImpl) DeleteFromDb(bean *ClusterBean, userId int32) error { +func (impl *ClusterServiceImpl) DeleteFromDb(bean *ClusterBean, userId int32) error { existingCluster, err := impl.clusterRepository.FindById(bean.Id) if err != nil { impl.logger.Errorw("No matching entry found for delete.", "id", bean.Id) @@ -668,7 +697,7 @@ func (impl ClusterServiceImpl) DeleteFromDb(bean *ClusterBean, userId int32) err return nil } -func (impl ClusterServiceImpl) CheckIfConfigIsValid(cluster *ClusterBean) error { +func (impl *ClusterServiceImpl) CheckIfConfigIsValid(cluster *ClusterBean) error { clusterConfig := cluster.GetClusterConfig() response, err := impl.K8sUtil.DiscoveryClientGetLiveZCall(clusterConfig) if err != nil { @@ -1068,7 +1097,7 @@ func (impl *ClusterServiceImpl) GetAndUpdateConnectionStatusForOneCluster(k8sCli mutex.Unlock() } -func (impl ClusterServiceImpl) ConvertClusterBeanObjectToCluster(bean *ClusterBean) *v1alpha1.Cluster { +func (impl *ClusterServiceImpl) ConvertClusterBeanObjectToCluster(bean *ClusterBean) *v1alpha1.Cluster { configMap := bean.Config serverUrl := bean.ServerUrl bearerToken := "" @@ -1097,7 +1126,7 @@ func (impl ClusterServiceImpl) ConvertClusterBeanObjectToCluster(bean *ClusterBe return cl } -func (impl ClusterServiceImpl) GetClusterConfigByClusterId(clusterId int) (*k8s.ClusterConfig, error) { +func (impl *ClusterServiceImpl) GetClusterConfigByClusterId(clusterId int) (*k8s.ClusterConfig, error) { clusterBean, err := impl.FindById(clusterId) if err != nil { impl.logger.Errorw("error in getting clusterBean by cluster id", "err", err, "clusterId", clusterId) @@ -1108,7 +1137,7 @@ func (impl ClusterServiceImpl) GetClusterConfigByClusterId(clusterId int) (*k8s. return clusterConfig, nil } -func (impl ClusterServiceImpl) IsClusterReachable(clusterId int) (bool, error) { +func (impl *ClusterServiceImpl) IsClusterReachable(clusterId int) (bool, error) { cluster, err := impl.clusterRepository.FindById(clusterId) if err != nil { impl.logger.Errorw("error in finding cluster from clusterId", "envId", clusterId) diff --git a/pkg/cluster/ClusterServiceExtended.go b/pkg/cluster/ClusterServiceExtended.go index 045d5ce5aee..c8047bc1625 100644 --- a/pkg/cluster/ClusterServiceExtended.go +++ b/pkg/cluster/ClusterServiceExtended.go @@ -27,9 +27,6 @@ import ( cluster3 "github.com/argoproj/argo-cd/v2/pkg/apiclient/cluster" "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" "github.com/devtron-labs/common-lib/utils/k8s" - repository5 "github.com/devtron-labs/devtron/pkg/auth/user/repository" - "github.com/devtron-labs/devtron/pkg/k8s/informer" - cluster2 "github.com/devtron-labs/devtron/client/argocdServer/cluster" "github.com/devtron-labs/devtron/client/grafana" "github.com/devtron-labs/devtron/internal/constants" @@ -37,7 +34,6 @@ import ( appStoreBean "github.com/devtron-labs/devtron/pkg/appStore/bean" repository2 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" "github.com/devtron-labs/devtron/pkg/cluster/repository" - "go.uber.org/zap" ) // extends ClusterServiceImpl and enhances method of ClusterService with full mode specific errors @@ -50,30 +46,19 @@ type ClusterServiceImplExtended struct { *ClusterServiceImpl } -func NewClusterServiceImplExtended(repository repository.ClusterRepository, environmentRepository repository.EnvironmentRepository, - grafanaClient grafana.GrafanaClient, logger *zap.SugaredLogger, installedAppRepository repository2.InstalledAppRepository, - K8sUtil *k8s.K8sServiceImpl, - clusterServiceCD cluster2.ServiceClient, K8sInformerFactory informer.K8sInformerFactory, - userAuthRepository repository5.UserAuthRepository, - userRepository repository5.UserRepository, roleGroupRepository repository5.RoleGroupRepository, - gitOpsConfigReadService config.GitOpsConfigReadService) *ClusterServiceImplExtended { +func NewClusterServiceImplExtended(environmentRepository repository.EnvironmentRepository, + grafanaClient grafana.GrafanaClient, installedAppRepository repository2.InstalledAppRepository, + clusterServiceCD cluster2.ServiceClient, + gitOpsConfigReadService config.GitOpsConfigReadService, + clusterServiceImpl *ClusterServiceImpl) *ClusterServiceImplExtended { clusterServiceExt := &ClusterServiceImplExtended{ environmentRepository: environmentRepository, grafanaClient: grafanaClient, installedAppRepository: installedAppRepository, clusterServiceCD: clusterServiceCD, gitOpsConfigReadService: gitOpsConfigReadService, - ClusterServiceImpl: &ClusterServiceImpl{ - clusterRepository: repository, - logger: logger, - K8sUtil: K8sUtil, - K8sInformerFactory: K8sInformerFactory, - userAuthRepository: userAuthRepository, - userRepository: userRepository, - roleGroupRepository: roleGroupRepository, - }, + ClusterServiceImpl: clusterServiceImpl, } - go clusterServiceExt.buildInformer() return clusterServiceExt } diff --git a/pkg/k8s/capacity/k8sCapacityService.go b/pkg/k8s/capacity/k8sCapacityService.go index 3ed56849ea7..7768f810f50 100644 --- a/pkg/k8s/capacity/k8sCapacityService.go +++ b/pkg/k8s/capacity/k8sCapacityService.go @@ -113,7 +113,7 @@ func (impl *K8sCapacityServiceImpl) GetClusterCapacityDetail(ctx context.Context if err != nil { if client.IsClusterUnReachableError(err) { impl.logger.Errorw("k8s cluster unreachable", "err", err) - return nil, &util.ApiError{HttpStatusCode: http.StatusBadRequest, UserMessage: err.Error()} + return nil, &util.ApiError{HttpStatusCode: http.StatusBadRequest, UserMessage: err.Error(), InternalMessage: err.Error()} } impl.logger.Errorw("error in getting node list", "err", err, "clusterId", cluster.Id) return nil, err diff --git a/util/GlobalConfig.go b/util/GlobalConfig.go index e980325d297..879c34094ec 100644 --- a/util/GlobalConfig.go +++ b/util/GlobalConfig.go @@ -25,6 +25,7 @@ type EnvironmentVariables struct { DevtronSecretConfig *DevtronSecretConfig DeploymentServiceTypeConfig *DeploymentServiceTypeConfig TerminalEnvVariables *TerminalEnvVariables + GlobalClusterConfig *GlobalClusterConfig } type DeploymentServiceTypeConfig struct { @@ -43,6 +44,10 @@ type GlobalEnvVariables struct { ExecuteWireNilChecker bool `env:"EXECUTE_WIRE_NIL_CHECKER" envDefault:"false"` } +type GlobalClusterConfig struct { + ClusterStatusCronTime int `env:"CLUSTER_STATUS_CRON_TIME" envDefault:"15"` +} + type DevtronSecretConfig struct { DevtronSecretName string `env:"DEVTRON_SECRET_NAME" envDefault:"devtron-secret"` DevtronDexSecretNamespace string `env:"DEVTRON_DEX_SECRET_NAMESPACE" envDefault:"devtroncd"` @@ -58,6 +63,7 @@ func GetEnvironmentVariables() (*EnvironmentVariables, error) { DevtronSecretConfig: &DevtronSecretConfig{}, DeploymentServiceTypeConfig: &DeploymentServiceTypeConfig{}, TerminalEnvVariables: &TerminalEnvVariables{}, + GlobalClusterConfig: &GlobalClusterConfig{}, } err := env.Parse(cfg) if err != nil { diff --git a/wire_gen.go b/wire_gen.go index 0bc013a9cf0..76510b4eab7 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -239,7 +239,6 @@ func InitializeApp() (*App, error) { } appStatusRepositoryImpl := appStatus.NewAppStatusRepositoryImpl(db, sugaredLogger) environmentRepositoryImpl := repository.NewEnvironmentRepositoryImpl(db, sugaredLogger, appStatusRepositoryImpl) - clusterRepositoryImpl := repository.NewClusterRepositoryImpl(db, sugaredLogger) httpClient := util.NewHttpClient() grafanaClientConfig, err := grafana.GetGrafanaClientConfig() if err != nil { @@ -249,11 +248,6 @@ func InitializeApp() (*App, error) { attributesServiceImpl := attributes.NewAttributesServiceImpl(sugaredLogger, attributesRepositoryImpl) grafanaClientImpl := grafana.NewGrafanaClientImpl(sugaredLogger, httpClient, grafanaClientConfig, attributesServiceImpl) installedAppRepositoryImpl := repository3.NewInstalledAppRepositoryImpl(sugaredLogger, db) - runtimeConfig, err := k8s.GetRuntimeConfig() - if err != nil { - return nil, err - } - k8sServiceImpl := k8s.NewK8sUtil(sugaredLogger, runtimeConfig) connectionConfig, err := connection.GetConfig() if err != nil { return nil, err @@ -268,19 +262,17 @@ func InitializeApp() (*App, error) { return nil, err } serviceClientImpl := cluster.NewServiceClientImpl(sugaredLogger, argoCDConnectionManagerImpl) - syncMap := informer.NewGlobalMapClusterNamespace() - k8sInformerFactoryImpl := informer.NewK8sInformerFactoryImpl(sugaredLogger, syncMap, k8sServiceImpl) + gitOpsConfigRepositoryImpl := repository2.NewGitOpsConfigRepositoryImpl(sugaredLogger, db) defaultAuthPolicyRepositoryImpl := repository4.NewDefaultAuthPolicyRepositoryImpl(db, sugaredLogger) defaultAuthRoleRepositoryImpl := repository4.NewDefaultAuthRoleRepositoryImpl(db, sugaredLogger) userAuthRepositoryImpl := repository4.NewUserAuthRepositoryImpl(db, sugaredLogger, defaultAuthPolicyRepositoryImpl, defaultAuthRoleRepositoryImpl) userRepositoryImpl := repository4.NewUserRepositoryImpl(db, sugaredLogger) roleGroupRepositoryImpl := repository4.NewRoleGroupRepositoryImpl(db, sugaredLogger) - gitOpsConfigRepositoryImpl := repository2.NewGitOpsConfigRepositoryImpl(sugaredLogger, db) - clientRuntimeConfig, err := client.GetRuntimeConfig() + runtimeConfig, err := client.GetRuntimeConfig() if err != nil { return nil, err } - k8sClient, err := client.NewK8sClient(clientRuntimeConfig) + k8sClient, err := client.NewK8sClient(runtimeConfig) if err != nil { return nil, err } @@ -309,7 +301,20 @@ func InitializeApp() (*App, error) { return nil, err } gitOpsConfigReadServiceImpl := config.NewGitOpsConfigReadServiceImpl(sugaredLogger, gitOpsConfigRepositoryImpl, userServiceImpl, environmentVariables) - clusterServiceImplExtended := cluster2.NewClusterServiceImplExtended(clusterRepositoryImpl, environmentRepositoryImpl, grafanaClientImpl, sugaredLogger, installedAppRepositoryImpl, k8sServiceImpl, serviceClientImpl, k8sInformerFactoryImpl, userAuthRepositoryImpl, userRepositoryImpl, roleGroupRepositoryImpl, gitOpsConfigReadServiceImpl) + clusterRepositoryImpl := repository.NewClusterRepositoryImpl(db, sugaredLogger) + k8sRuntimeConfig, err := k8s.GetRuntimeConfig() + if err != nil { + return nil, err + } + k8sServiceImpl := k8s.NewK8sUtil(sugaredLogger, k8sRuntimeConfig) + syncMap := informer.NewGlobalMapClusterNamespace() + k8sInformerFactoryImpl := informer.NewK8sInformerFactoryImpl(sugaredLogger, syncMap, k8sServiceImpl) + cronLoggerImpl := cron.NewCronLoggerImpl(sugaredLogger) + clusterServiceImpl, err := cluster2.NewClusterServiceImpl(clusterRepositoryImpl, sugaredLogger, k8sServiceImpl, k8sInformerFactoryImpl, userAuthRepositoryImpl, userRepositoryImpl, roleGroupRepositoryImpl, environmentVariables, cronLoggerImpl) + if err != nil { + return nil, err + } + clusterServiceImplExtended := cluster2.NewClusterServiceImplExtended(environmentRepositoryImpl, grafanaClientImpl, installedAppRepositoryImpl, serviceClientImpl, gitOpsConfigReadServiceImpl, clusterServiceImpl) loginService := middleware.NewUserLogin(sessionManager, k8sClient) userAuthServiceImpl := user.NewUserAuthServiceImpl(userAuthRepositoryImpl, sessionManager, loginService, sugaredLogger, userRepositoryImpl, roleGroupRepositoryImpl, userServiceImpl) environmentServiceImpl := cluster2.NewEnvironmentServiceImpl(environmentRepositoryImpl, clusterServiceImplExtended, sugaredLogger, k8sServiceImpl, k8sInformerFactoryImpl, userAuthServiceImpl, attributesRepositoryImpl) @@ -384,14 +389,13 @@ func InitializeApp() (*App, error) { moduleServiceHelperImpl := module.NewModuleServiceHelperImpl(serverEnvConfigServerEnvConfig) moduleResourceStatusRepositoryImpl := moduleRepo.NewModuleResourceStatusRepositoryImpl(db) moduleDataStoreModuleDataStore := moduleDataStore.InitModuleDataStore() - cronLoggerImpl := cron.NewCronLoggerImpl(sugaredLogger) moduleCronServiceImpl, err := module.NewModuleCronServiceImpl(sugaredLogger, moduleEnvConfig, moduleRepositoryImpl, serverEnvConfigServerEnvConfig, helmAppServiceImpl, moduleServiceHelperImpl, moduleResourceStatusRepositoryImpl, moduleDataStoreModuleDataStore, cronLoggerImpl) if err != nil { return nil, err } scanToolMetadataRepositoryImpl := security.NewScanToolMetadataRepositoryImpl(db, sugaredLogger) moduleServiceImpl := module.NewModuleServiceImpl(sugaredLogger, serverEnvConfigServerEnvConfig, moduleRepositoryImpl, moduleActionAuditLogRepositoryImpl, helmAppServiceImpl, serverDataStoreServerDataStore, serverCacheServiceImpl, moduleCacheServiceImpl, moduleCronServiceImpl, moduleServiceHelperImpl, moduleResourceStatusRepositoryImpl, scanToolMetadataRepositoryImpl) - argoUserServiceImpl, err := argo.NewArgoUserServiceImpl(sugaredLogger, clusterServiceImplExtended, environmentVariables, runtimeConfig, argoCDConnectionManagerImpl, versionServiceImpl, k8sServiceImpl, gitOpsConfigReadServiceImpl, moduleServiceImpl) + argoUserServiceImpl, err := argo.NewArgoUserServiceImpl(sugaredLogger, clusterServiceImplExtended, environmentVariables, k8sRuntimeConfig, argoCDConnectionManagerImpl, versionServiceImpl, k8sServiceImpl, gitOpsConfigReadServiceImpl, moduleServiceImpl) if err != nil { return nil, err } From 1540271bd777b6bccd288e513a9070d8f04b6056 Mon Sep 17 00:00:00 2001 From: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Date: Mon, 2 Sep 2024 19:29:19 +0530 Subject: [PATCH 31/61] fix: sql injection fixes (#5783) * sql injection fixes * query param init fix --- .../user/repository/UserAuthRepository.go | 86 +++++++++++++------ 1 file changed, 60 insertions(+), 26 deletions(-) diff --git a/pkg/auth/user/repository/UserAuthRepository.go b/pkg/auth/user/repository/UserAuthRepository.go index 15f0c3198ae..f11bfef4156 100644 --- a/pkg/auth/user/repository/UserAuthRepository.go +++ b/pkg/auth/user/repository/UserAuthRepository.go @@ -945,40 +945,48 @@ func (impl UserAuthRepositoryImpl) GetRolesForWorkflow(workflow, entityName stri func (impl UserAuthRepositoryImpl) GetRoleForClusterEntity(cluster, namespace, group, kind, resource, action string) (RoleModel, error) { var model RoleModel + var queryParams []string query := "SELECT * FROM roles WHERE entity = ? " + queryParams = append(queryParams, bean.CLUSTER_ENTITIY) var err error if len(cluster) > 0 { - query += " and cluster='" + cluster + "' " + query += " and cluster = ? " + queryParams = append(queryParams, cluster) } else { query += " and cluster IS NULL " } if len(namespace) > 0 { - query += " and namespace='" + namespace + "' " + query += " and namespace = ? " + queryParams = append(queryParams, namespace) } else { query += " and namespace IS NULL " } if len(group) > 0 { - query += " and \"group\"='" + group + "' " + query += " and \"group\"= ? " + queryParams = append(queryParams, group) } else { query += " and \"group\" IS NULL " } if len(kind) > 0 { - query += " and kind='" + kind + "' " + query += " and kind = ? " + queryParams = append(queryParams, kind) } else { query += " and kind IS NULL " } if len(resource) > 0 { - query += " and resource='" + resource + "' " + query += " and resource = ? " + queryParams = append(queryParams, resource) } else { query += " and resource IS NULL " } if len(action) > 0 { - query += " and action='" + action + "' ;" + query += " and action = ? ;" + queryParams = append(queryParams, action) } else { query += " and action IS NULL ;" } - _, err = impl.dbConnection.Query(&model, query, bean.CLUSTER_ENTITIY) + _, err = impl.dbConnection.Query(&model, query, queryParams) if err != nil { impl.Logger.Errorw("error in getting roles for clusterEntity", "err", err, bean2.CLUSTER, cluster, "namespace", namespace, "kind", kind, "group", group, "resource", resource) @@ -990,24 +998,29 @@ func (impl UserAuthRepositoryImpl) GetRoleForClusterEntity(cluster, namespace, g func (impl UserAuthRepositoryImpl) GetRoleForJobsEntity(entity, team, app, env, act string, workflow string) (RoleModel, error) { var model RoleModel var err error + var queryParams []string if len(team) > 0 && len(act) > 0 { query := "SELECT role.* FROM roles role WHERE role.team = ? AND role.action=? AND role.entity=? " + queryParams = append(queryParams, team, act, entity) if len(env) == 0 { query = query + " AND role.environment is NULL" } else { - query += "AND role.environment='" + env + "'" + query += "AND role.environment = ? " + queryParams = append(queryParams, env) } if len(app) == 0 { query = query + " AND role.entity_name is NULL" } else { - query += " AND role.entity_name='" + app + "'" + query += " AND role.entity_name = ? " + queryParams = append(queryParams, app) } if len(workflow) == 0 { query = query + " AND role.workflow is NULL;" } else { - query += " AND role.workflow='" + workflow + "';" + query += " AND role.workflow = ? ;" + queryParams = append(queryParams, workflow) } - _, err = impl.dbConnection.Query(&model, query, team, act, entity) + _, err = impl.dbConnection.Query(&model, query, queryParams) } else { return model, nil } @@ -1021,21 +1034,27 @@ func (impl UserAuthRepositoryImpl) GetRoleForChartGroupEntity(entity, app, act, var model RoleModel var err error if len(app) > 0 && act == "update" { + var queryParams []string query := "SELECT role.* FROM roles role WHERE role.entity = ? AND role.entity_name=? AND role.action=?" + queryParams = append(queryParams, entity, app, act) if len(accessType) == 0 { query = query + " and role.access_type is NULL" } else { - query += " and role.access_type='" + accessType + "'" + query += " and role.access_type = ? " + queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, entity, app, act) + _, err = impl.dbConnection.Query(&model, query, queryParams) } else if app == "" { + var queryParams []string query := "SELECT role.* FROM roles role WHERE role.entity = ? AND role.action=?" + queryParams = append(queryParams, entity, act) if len(accessType) == 0 { query = query + " and role.access_type is NULL" } else { - query += " and role.access_type='" + accessType + "'" + query += " and role.access_type = ? " + queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, entity, act) + _, err = impl.dbConnection.Query(&model, query, queryParams) } if err != nil { impl.Logger.Errorw("error in getting role for chart group entity", "err", err, "entity", entity, "app", app, "act", act, "accessType", accessType) @@ -1047,52 +1066,67 @@ func (impl UserAuthRepositoryImpl) GetRoleForOtherEntity(team, app, env, act, ac var model RoleModel var err error if len(team) > 0 && len(app) > 0 && len(env) > 0 && len(act) > 0 { + var queryParams []string query := "SELECT role.* FROM roles role WHERE role.team = ? AND role.entity_name=? AND role.environment=? AND role.action=?" + queryParams = append(queryParams, team, app, env, act) if oldValues { query = query + " and role.access_type is NULL" } else { - query += " and role.access_type='" + accessType + "'" + query += " and role.access_type = ? " + queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, team, app, env, act) + _, err = impl.dbConnection.Query(&model, query, queryParams) } else if len(team) > 0 && app == "" && len(env) > 0 && len(act) > 0 { - + var queryParams []string query := "SELECT role.* FROM roles role WHERE role.team=? AND coalesce(role.entity_name,'')=? AND role.environment=? AND role.action=?" + queryParams = append(queryParams, team, EMPTY_PLACEHOLDER_FOR_QUERY, env, act) if oldValues { query = query + " and role.access_type is NULL" } else { - query += " and role.access_type='" + accessType + "'" + query += " and role.access_type = ? " + queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, team, EMPTY_PLACEHOLDER_FOR_QUERY, env, act) + _, err = impl.dbConnection.Query(&model, query, queryParams) } else if len(team) > 0 && len(app) > 0 && env == "" && len(act) > 0 { + var queryParams []string //this is applicable for all environment of a team query := "SELECT role.* FROM roles role WHERE role.team = ? AND role.entity_name=? AND coalesce(role.environment,'')=? AND role.action=?" + queryParams = append(queryParams, team, app, EMPTY_PLACEHOLDER_FOR_QUERY, act) if oldValues { query = query + " and role.access_type is NULL" } else { - query += " and role.access_type='" + accessType + "'" + query += " and role.access_type = ? " + queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, team, app, EMPTY_PLACEHOLDER_FOR_QUERY, act) + _, err = impl.dbConnection.Query(&model, query, queryParams) } else if len(team) > 0 && app == "" && env == "" && len(act) > 0 { + var queryParams []string //this is applicable for all environment of a team query := "SELECT role.* FROM roles role WHERE role.team = ? AND coalesce(role.entity_name,'')=? AND coalesce(role.environment,'')=? AND role.action=?" + queryParams = append(queryParams, team, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, act) if oldValues { query = query + " and role.access_type is NULL" } else { - query += " and role.access_type='" + accessType + "'" + query += " and role.access_type = ? " + queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, team, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, act) + _, err = impl.dbConnection.Query(&model, query, queryParams) } else if team == "" && app == "" && env == "" && len(act) > 0 { + var queryParams []string //this is applicable for super admin, all env, all team, all app query := "SELECT role.* FROM roles role WHERE coalesce(role.team,'') = ? AND coalesce(role.entity_name,'')=? AND coalesce(role.environment,'')=? AND role.action=?" + queryParams = append(queryParams, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, act) if len(accessType) == 0 { query = query + " and role.access_type is NULL" } else { - query += " and role.access_type='" + accessType + "'" + query += " and role.access_type = ? " + queryParams = append(queryParams, accessType) + } - _, err = impl.dbConnection.Query(&model, query, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, act) + _, err = impl.dbConnection.Query(&model, query, queryParams) } else if team == "" && app == "" && env == "" && act == "" { return model, nil } else { From ba0284545013d887fe0290b75d9654cc73339362 Mon Sep 17 00:00:00 2001 From: Bhushan Nemade Date: Tue, 3 Sep 2024 10:48:22 +0530 Subject: [PATCH 32/61] doc: Vulnerability Scanning Plugin doc (#5722) * vulnerability scanning plugin doc * summary.md added --- docs/SUMMARY.md | 1 + .../plugins/vulnerability-scanning.md | 51 +++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 docs/user-guide/plugins/vulnerability-scanning.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index e4ad4067a05..c8a025572c6 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -151,6 +151,7 @@ * [Semgrep](user-guide/plugins/semgrep.md) * [SonarQube](user-guide/plugins/sonarqube.md) * [SonarQube v1.1.0](user-guide/plugins/sonarqube-v1.1.0.md) + * [Vulnerability Scanning](user-guide/plugins/vulnerability-scanning.md) ## Resources diff --git a/docs/user-guide/plugins/vulnerability-scanning.md b/docs/user-guide/plugins/vulnerability-scanning.md new file mode 100644 index 00000000000..178c2025bf8 --- /dev/null +++ b/docs/user-guide/plugins/vulnerability-scanning.md @@ -0,0 +1,51 @@ +# Vulnerability-Scanning + +## Introduction +The **Vulnerability Scanning** plugin by Devtron enables you to scan and detect vulnerabilities of your applications using Trivy/Clair. The Vulnerability Scanning plugin is recommended to be integrated into the Job Pipeline, especially when you are using external CI pipelines like Jenkins, GitLab, or GitHub Actions. Based on Vulnerability Scanning results, you can enforce security policies to either proceed with or abort the deployment process, giving you more control over your deployment process. + +### Prerequisites +Before integrating the Vulnerability Scanning plugin, ensure that you have installed the `Vulnerability Scanning (Trivy/Clair)` integration from Devtron Stack Manager. + +--- + +## Steps +1. Go to **Applications** → **Devtron Apps**. +2. Click your application. +3. Go to **App Configuration** → **Workflow Editor**. +4. Click **New Workflow** and navigate to the **CREATE JOB PIPELINE**. +5. Enter the required fields in the **Basic configuration** window. +6. Click **Task to be executed**. +7. Under 'TASKS', click the **+ Add task** button. +8. Click the **Vulnerability Scanning** plugin. +9. Enter the following [user inputs](#user-inputs) with appropriate values. +--- + +## User Inputs + +### Task Name +Enter the name of your task + +e.g., `Vulnerability Scanning for External CI ` + +### Description +Add a brief explanation of the task and the reason for choosing the plugin. Include information for someone else to understand the purpose of the task. + +e.g., `The Vulnerability Scanning plugin is integrated for detecting vulnerabilities in applications.` + +### Input Variables + +| Variable | Format | Description | Sample Value | +| ------------------------ | ------------ | ----------- | ------------ | +| IMAGE_SCAN_MAX_RETRIES | STRING | Maximum retries for image scanning. | 2 | +| IMAGE_SCAN_RETRY_DELAY | STRING | Delay between image scanning retries (seconds). | 120 | + +### Trigger/Skip Condition +Here you can set conditions to execute or skip the task. You can select `Set trigger conditions` for the execution of a task or `Set skip conditions` to skip the task. + +### Output Variables +Vulnerability Scanning will not be generating an output variable. + +Click **Update Pipeline**. + + + From 02f4a1b47adb64bb0fe6e7b8ed01225740ecae70 Mon Sep 17 00:00:00 2001 From: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:54:22 +0530 Subject: [PATCH 33/61] docs: Jira plugins doc (Validator + Updater) (#5709) * Docs of Jira Plugins - Validator and Updater * Added Index Links * Additional Proofreading --- docs/SUMMARY.md | 2 + docs/user-guide/plugins/jira-updater.md | 57 +++++++++++++++++++++++ docs/user-guide/plugins/jira-validator.md | 54 +++++++++++++++++++++ docs/user-guide/plugins/plugin-list.md | 2 + 4 files changed, 115 insertions(+) create mode 100644 docs/user-guide/plugins/jira-updater.md create mode 100644 docs/user-guide/plugins/jira-validator.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index c8a025572c6..136be17a3ce 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -146,6 +146,8 @@ * [DockerSlim](user-guide/plugins/docker-slim.md) * [GoLang-migrate](user-guide/plugins/golang-migrate.md) * [Jenkins](user-guide/plugins/jenkins.md) + * [Jira Issue Validator](user-guide/plugins/jira-validator.md) + * [Jira Issue Updater](user-guide/plugins/jira-updater.md) * [K6 Load Testing](user-guide/plugins/k6-load-testing.md) * [Pull images from container repository](user-guide/plugins/pull-images-from-container-repository.md) * [Semgrep](user-guide/plugins/semgrep.md) diff --git a/docs/user-guide/plugins/jira-updater.md b/docs/user-guide/plugins/jira-updater.md new file mode 100644 index 00000000000..c137b5e4810 --- /dev/null +++ b/docs/user-guide/plugins/jira-updater.md @@ -0,0 +1,57 @@ +# Jira Issue Updater + +## Introduction +The Jira Issue Updater plugin extends the capabilities of Devtron CI by allowing updates to Jira issues directly from the pipeline. It can add build pipeline status and docker image ID as a comment on Jira tickets, keeping the issue tracking synchronized with your CI processes. + +### Prerequisites + +- A Jira account with the necessary [API access](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/#Create-an-API-token). +- The API credentials (username, password, and base URL) for your Jira instance. Obtain the API credentials from your Jira admin if required. +- A pull request raised with your Git provider. Title of pull request must contain the Jira ID. +- Jira Issue (e.g., REDOC-12) +- Webhook added to the git repository. [Click here](https://docs.devtron.ai/usage/applications/creating-application/workflow/ci-pipeline#configuring-webhook) to know more. + +--- + +## Steps + +1. On the **Edit build pipeline** page, go to the **Post-Build Stage**. +2. Click **+ Add task**. +3. Select **Jira Issue Updater** from the list of plugins. + * Enter a task name (mandatory). + * Optionally, enter a description. + * Provide values for the input variables. + + | Variable | Format | Description | + | ------------------------ | ------ | --------------------------------------------------------- | + | JiraUsername | String | Your Jira username (e.g., johndoe@devtron.ai) | + | JiraPassword | String | Your Jira API token provided by the Jira admin | + | JiraBaseUrl | String | The base URL of your Jira instance (e.g., https://yourdomain.atlassian.net/) | + | UpdateWithDockerImageId | Bool | Set to `true` to include the Docker Image ID in the update | + | UpdateWithBuildStatus | Bool | Set to `true` to include the build status in the update | + + * `Trigger/Skip Condition` allows you to set conditions under which this task will execute or be skipped. + * `Pass/Failure Condition` allows you define conditions to determine if the build passes or fails based on the Jira update. + +4. Go to the **Build Stage**. + +5. Select **Pull Request** in the **Source Type** dropdown. + +6. Use filters to fetch only the PRs matching your regex. Here are few examples: + * **Title** can be a regex pattern (e.g., `^(?P([a-zA-Z0-9-].*))`) to extract the Jira ID from the PR title. Only those PRs fulfilling the regex will be shown for image build process. + * **State** can be `^open$`, where only PRs in open state will be shown for image build process. + +7. Click **Update Pipeline**. + +--- + +## Results + +![Figure 1: Build Log](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/plugins/jira/jira-updater-log.jpg) + +![Figure 2: Comments added by the Plugin on the Jira Issue](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/plugins/jira/jira-updater.jpg) + + + + + diff --git a/docs/user-guide/plugins/jira-validator.md b/docs/user-guide/plugins/jira-validator.md new file mode 100644 index 00000000000..3122d136563 --- /dev/null +++ b/docs/user-guide/plugins/jira-validator.md @@ -0,0 +1,54 @@ +# Jira Issue Validator + +## Introduction +The Jira Issue Validator plugin extends the filtering capabilities of the Devtron CI and lets users perform validation based on Jira Ticket ID status. This plugin ensures that only builds associated with valid Jira tickets are executed, improving the accuracy of the CI process. + +### Prerequisites + +- A Jira account with the necessary [API access](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/#Create-an-API-token). +- The API credentials (username, password, and base URL) for your Jira instance. Obtain the API credentials from your Jira admin if required. +- A pull request raised with your Git provider. Title of pull request must contain the Jira ID. +- Jira Issue (e.g., REDOC-12) +- Webhook added to the git repository. [Click here](https://docs.devtron.ai/usage/applications/creating-application/workflow/ci-pipeline#configuring-webhook) to know more. + +--- + +## Steps + +1. On the **Edit build pipeline** page, go to the **Pre-Build Stage** (or Post-Build Stage). +2. Click **+ Add task**. +3. Select **Jira Issue Validator** from the list of plugins. + * Enter a task name (mandatory). + * Optionally, enter a description. + * Provide values for the input variables. + + | Variable | Format | Description | + | -------------- | ------ | --------------------------------------------------------- | + | JiraUsername | String | Your Jira username (e.g., johndoe@devtron.ai) | + | JiraPassword | String | Your Jira API token provided by the Jira admin | + | JiraBaseUrl | String | The base URL of your Jira instance (e.g., https://yourdomain.atlassian.net) | + + * `Trigger/Skip Condition` allows you to set conditions under which this task will execute or be skipped. + * `Pass/Failure Condition` allows you to define conditions that determine whether the build passes or fails based on Jira validation. + +4. Go to the **Build Stage**. + +5. Select **Pull Request** in the **Source Type** dropdown. + +6. Use filters to fetch only the PRs matching your regex. Here are few examples: + * **Title** can be a regex pattern (e.g., `^(?P([a-zA-Z0-9-].*))`) to extract the Jira ID from the PR title. Only those PRs fulfilling the regex will be shown for image build process. + * **State** can be `^open$`, where only PRs in open state will be shown for image build process. + +7. Click **Update Pipeline**. + +--- + +## Results + +**Case 1**: If Jira issue exists and the same is found in the PR title + +![Figure 1: Jira Issue Match](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/plugins/jira/jira-issue-validator.jpg) + +**Case 2**: If Jira issue is not found + +![Figure 2: Error in Finding Jira Issue](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/plugins/jira/issue-validation-failed.jpg) diff --git a/docs/user-guide/plugins/plugin-list.md b/docs/user-guide/plugins/plugin-list.md index 908aedee63a..2a7a239775c 100644 --- a/docs/user-guide/plugins/plugin-list.md +++ b/docs/user-guide/plugins/plugin-list.md @@ -11,6 +11,8 @@ We have multiple plugins available in Devtron. At the moment, here are the plugi * [Dependency track - Python](./dependency-track-python.md) * [GoLang-migrate](./golang-migrate.md) * [Jenkins](./jenkins.md) +* [Jira Issue Validator](./jira-validator.md) +* [Jira Issue Updater](./jira-updater.md) * [K6 Load Testing](./k6-load-testing.md) * [Pull images from container repository](./pull-images-from-container-repository.md) * [Semgrep](./semgrep.md) From a6a2ae2f6d7770ac7c38a7111ecd7b14d7a3c82d Mon Sep 17 00:00:00 2001 From: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Date: Tue, 3 Sep 2024 19:18:46 +0530 Subject: [PATCH 34/61] add basic auth and tls for sm (#5789) --- .../templates/servicemonitor.yaml | 51 ++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml index 7368288e0ca..276a50211e7 100644 --- a/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml +++ b/scripts/devtron-reference-helm-charts/statefulset-chart_5-1-0/templates/servicemonitor.yaml @@ -24,8 +24,50 @@ spec: {{- range .Values.ContainerPort }} {{- if .servicemonitor }} {{- if .servicemonitor.enabled}} - {{- if .servicePort }} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} - port: {{ .name }} + {{- end }} + {{- if .servicemonitor.path }} + path: {{ .servicemonitor.path}} + {{- end }} + {{- if .servicemonitor.scheme }} + scheme: {{ .servicemonitor.scheme}} + {{- end }} + {{- if .servicemonitor.interval }} + interval: {{ .servicemonitor.interval}} + {{- end }} + {{- if .servicemonitor.scrapeTimeout }} + scrapeTimeout: {{ .servicemonitor.scrapeTimeout}} + {{- end }} + {{- if .servicemonitor.basicAuth }} + basicAuth: + {{- toYaml .servicemonitor.basicAuth | nindent 8 }} + {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .servicemonitor.metricRelabelings}} + metricRelabelings: +{{toYaml .servicemonitor.metricRelabelings | indent 8 }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- range .Values.containers }} + {{- range .ports }} + {{- if .servicemonitor }} + {{- if .servicemonitor.enabled}} + {{- if .servicemonitor.targetPort }} + - targetPort: {{ .servicemonitor.targetPort }} + {{- else if .servicePort }} + - port: {{ .name }} + {{- end }} {{- if .servicemonitor.path }} path: {{ .servicemonitor.path}} {{- end }} @@ -42,6 +84,13 @@ spec: basicAuth: {{- toYaml .servicemonitor.basicAuth | nindent 8 }} {{- end }} + {{- if .servicemonitor.insecureTLS }} + tlsConfig: + insecureSkipVerify: true + {{- else if .servicemonitor.tlsConfig }} + tlsConfig: + {{- toYaml .servicemonitor.tlsConfig | nindent 8 }} + {{- end }} {{- if .servicemonitor.metricRelabelings}} metricRelabelings: {{toYaml .servicemonitor.metricRelabelings | indent 8 }} From 654ba936bf2b16210782ede41d1bd1e44177f11a Mon Sep 17 00:00:00 2001 From: Badal Kumar <130441461+badal773@users.noreply.github.com> Date: Wed, 4 Sep 2024 16:20:51 +0530 Subject: [PATCH 35/61] docs: added commands enable ingress during helm installation (#5794) * added commands emable ingress during helm installation * modified commands * improved statement * removed unrequired lines --------- Co-authored-by: Badal Kumar Prusty --- docs/setup/install/ingress-setup.md | 70 +++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/docs/setup/install/ingress-setup.md b/docs/setup/install/ingress-setup.md index 8204bc4bef1..380013c8b6f 100644 --- a/docs/setup/install/ingress-setup.md +++ b/docs/setup/install/ingress-setup.md @@ -1,5 +1,75 @@ # Ingress Setup +## Enable Ingress During Installation + +To configure Ingress for a Helm chart during installation or upgrade, you can use the `--set` flag to specify the desired Ingress settings. Below is a guide on how to add Ingress, including optional labels, annotations, and TLS settings. + +### Basic Ingress Configuration + +To enable Ingress and set basic parameters, use the following command: + +```bash +helm install devtron devtron/devtron-operator -n devtroncd \ + --set components.devtron.ingress.enabled=true \ + --set components.devtron.ingress.className=nginx \ + --set components.devtron.ingress.host=devtron.example.com +``` + +### Adding Labels + +To add labels to the Ingress resource, use the following command: + +```bash +helm install devtron devtron/devtron-operator -n devtroncd \ + --set components.devtron.ingress.enabled=true \ + --set components.devtron.ingress.className=nginx \ + --set components.devtron.ingress.host=devtron.example.com \ + --set components.devtron.ingress.labels.env=production +``` + +### Adding Annotations + +To add annotations to the Ingress resource, use the following command: + +```bash +helm install devtron devtron/devtron-operator -n devtroncd \ + --set components.devtron.ingress.enabled=true \ + --set components.devtron.ingress.className=nginx \ + --set components.devtron.ingress.host=devtron.example.com \ + --set components.devtron.ingress.annotations."kubernetes\.io/ingress\.class"=nginx \ + --set components.devtron.ingress.annotations."nginx\.ingress\.kubernetes\.io\/app-root"="/dashboard" +``` + +### Configuring TLS + +To configure TLS settings, including `secretName` and `hosts`, use the following command: + +```bash +helm install devtron devtron/devtron-operator -n devtroncd \ + --set components.devtron.ingress.enabled=true \ + --set components.devtron.ingress.className=nginx \ + --set components.devtron.ingress.host=devtron.example.com \ + --set components.devtron.ingress.tls[0].secretName=devtron-tls \ + --set components.devtron.ingress.tls[0].hosts[0]=devtron.example.com +``` + +### Comprehensive Configuration + +To include all the above settings in a single command, use: + +```bash +helm upgrade devtron devtron/devtron-operator -n devtroncd \ + --set components.devtron.ingress.enabled=true \ + --set components.devtron.ingress.className=nginx \ + --set components.devtron.ingress.host=devtron.example.com \ + --set components.devtron.ingress.annotations."kubernetes\.io/ingress\.class"=nginx \ + --set components.devtron.ingress.annotations."nginx\.ingress\.kubernetes\.io\/app-root"="/dashboard" \ + --set components.devtron.ingress.labels.env=production \ + --set components.devtron.ingress.pathType=ImplementationSpecific \ + --set components.devtron.ingress.tls[0].secretName=devtron-tls \ + --set components.devtron.ingress.tls[0].hosts[0]=devtron.example.com +``` + After Devtron is installed, Devtron is accessible through service `devtron-service`. If you want to access Devtron through ingress, edit `devtron-service` and change the loadbalancer to ClusterIP. You can do this using `kubectl patch` command: From 0e16daf04afb3bed4a267aecc5afd5ce269806a0 Mon Sep 17 00:00:00 2001 From: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Date: Wed, 4 Sep 2024 18:15:03 +0530 Subject: [PATCH 36/61] Revamped + Restructured Ingress Setup Doc (#5798) --- docs/setup/install/ingress-setup.md | 99 +++++++++++++++++++++++------ 1 file changed, 78 insertions(+), 21 deletions(-) diff --git a/docs/setup/install/ingress-setup.md b/docs/setup/install/ingress-setup.md index 380013c8b6f..036bc4c2d90 100644 --- a/docs/setup/install/ingress-setup.md +++ b/docs/setup/install/ingress-setup.md @@ -1,10 +1,33 @@ # Ingress Setup -## Enable Ingress During Installation +## Introduction -To configure Ingress for a Helm chart during installation or upgrade, you can use the `--set` flag to specify the desired Ingress settings. Below is a guide on how to add Ingress, including optional labels, annotations, and TLS settings. +If you wish to use [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) as a means to access the Devtron services available in your cluster, you can configure it either during the installation or after the installation of Devtron. -### Basic Ingress Configuration +Refer the section relevant to you: +* [During Devtron Installation](#enabling-ingress-during-devtron-installation) +* [After Devtron Installation](#configuring-ingress-after-devtron-installation) + +If you have successfully configured Ingress, refer [Post Ingress Setup](#enable-https-for-devtron). + +--- + +## Enabling Ingress during Devtron Installation + +If you are installing Devtron, you can enable Ingress either via [set flag](#using-set-flag) or by using [values.yaml](#using-valuesyaml) to specify the desired Ingress settings. + +### Using set flag + +You can use the `--set` flag to specify the desired Ingress settings. + +Here, we have added 5 configurations you can perform depending on your requirements: +* [Only Basic Configuration](#only-basic-configuration) +* [Configuration Including Labels](#configuration-including-labels) +* [Configuration Including Annotations](#configuration-including-annotations) +* [Configuration Including TLS Settings](#configuration-including-tls-settings) +* [Comprehensive Configuration](#comprehensive-configuration) + +#### Only Basic Configuration To enable Ingress and set basic parameters, use the following command: @@ -15,7 +38,7 @@ helm install devtron devtron/devtron-operator -n devtroncd \ --set components.devtron.ingress.host=devtron.example.com ``` -### Adding Labels +#### Configuration Including Labels To add labels to the Ingress resource, use the following command: @@ -27,7 +50,7 @@ helm install devtron devtron/devtron-operator -n devtroncd \ --set components.devtron.ingress.labels.env=production ``` -### Adding Annotations +#### Configuration Including Annotations To add annotations to the Ingress resource, use the following command: @@ -40,7 +63,7 @@ helm install devtron devtron/devtron-operator -n devtroncd \ --set components.devtron.ingress.annotations."nginx\.ingress\.kubernetes\.io\/app-root"="/dashboard" ``` -### Configuring TLS +#### Configuration Including TLS Settings To configure TLS settings, including `secretName` and `hosts`, use the following command: @@ -53,12 +76,12 @@ helm install devtron devtron/devtron-operator -n devtroncd \ --set components.devtron.ingress.tls[0].hosts[0]=devtron.example.com ``` -### Comprehensive Configuration +#### Comprehensive Configuration To include all the above settings in a single command, use: ```bash -helm upgrade devtron devtron/devtron-operator -n devtroncd \ +helm install devtron devtron/devtron-operator -n devtroncd \ --set components.devtron.ingress.enabled=true \ --set components.devtron.ingress.className=nginx \ --set components.devtron.ingress.host=devtron.example.com \ @@ -70,17 +93,52 @@ helm upgrade devtron devtron/devtron-operator -n devtroncd \ --set components.devtron.ingress.tls[0].hosts[0]=devtron.example.com ``` -After Devtron is installed, Devtron is accessible through service `devtron-service`. -If you want to access Devtron through ingress, edit `devtron-service` and change the loadbalancer to ClusterIP. You can do this using `kubectl patch` command: + +### Using ingress-values.yaml + +As an alternative to the [set flag](#using-set-flag) method, you can enable Ingress using `ingress-values.yaml` instead. + +Create an `ingress-values.yaml` file. You may refer the below format for an advanced ingress configuration which includes labels, annotations, secrets, and many more. + +```yml +components: + devtron: + ingress: + enabled: true + className: nginx + labels: {} + # env: production + annotations: {} + # nginx.ingress.kubernetes.io/app-root: /dashboard + pathType: ImplementationSpecific + host: devtron.example.com + tls: [] + # - secretName: devtron-info-tls + # hosts: + # - devtron.example.com +``` + +Once you have the `ingress-values.yaml` file ready, run the following command: + +```bash +helm install devtron devtron/devtron-operator -n devtroncd --reuse-values -f ingress-values.yaml +``` + +--- + +## Configuring Ingress after Devtron Installation + +After Devtron is installed, Devtron is accessible through `devtron-service`. If you wish to access Devtron through ingress, you'll need to modify this service to use a ClusterIP instead of a LoadBalancer. + +You can do this using the `kubectl patch` command: ```bash kubectl patch -n devtroncd svc devtron-service -p '{"spec": {"ports": [{"port": 80,"targetPort": "devtron","protocol": "TCP","name": "devtron"}],"type": "ClusterIP","selector": {"app": "devtron"}}}' ``` -After this, create ingress by applying the ingress yaml file. -You can use [this yaml file](https://github.com/devtron-labs/devtron/blob/main/manifests/yamls/devtron-ingress.yaml) to create ingress to access Devtron: +Next, create ingress to access Devtron by applying the `devtron-ingress.yaml` file. The file is also available on this [link](https://github.com/devtron-labs/devtron/blob/main/manifests/yamls/devtron-ingress.yaml). You can access Devtron from any host after applying this yaml. -```yaml +```yml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: @@ -119,9 +177,9 @@ spec: pathType: ImplementationSpecific ``` -You can access Devtron from any host after applying this yaml. For k8s versions <1.19, [apply this yaml](https://github.com/devtron-labs/devtron/blob/main/manifests/yamls/devtron-ingress-legacy.yaml): +For k8s versions < 1.19, [apply this yaml](https://github.com/devtron-labs/devtron/blob/main/manifests/yamls/devtron-ingress-legacy.yaml): -```yaml +```yml apiVersion: extensions/v1beta1 kind: Ingress metadata: @@ -149,7 +207,7 @@ spec: Optionally, you also can access Devtron through a specific host by running the following YAML file: -```yaml +```yml apiVersion: networking.k8s.io/v1 kind: Ingress metadata: @@ -189,9 +247,11 @@ spec: pathType: ImplementationSpecific ``` +--- + ## Enable HTTPS For Devtron -Once ingress setup for devtron is done and you want to run Devtron over `https`, you need to add different annotations for different ingress controllers and load balancers. +Once Ingress setup for Devtron is done and you want to run Devtron over `https`, you need to add different annotations for different ingress controllers and load balancers. ### 1. Nginx Ingress Controller @@ -245,7 +305,4 @@ In case of AWS application load balancer, the following annotations need to be a ``` For an Ingress resource to be observed by AGIC (Application Gateway Ingress Controller) must be annotated with kubernetes.io/ingress.class: azure/application-gateway. Only then AGIC will work with the Ingress resource in question. -> Note: Make sure NOT to use port 80 with HTTPS and port 443 with HTTP on the Pods. - - - +> Note: Make sure NOT to use port 80 with HTTPS and port 443 with HTTP on the Pods. \ No newline at end of file From d4bd2725374c5bbee8382e428c6c8046589990ee Mon Sep 17 00:00:00 2001 From: Badal Kumar <130441461+badal773@users.noreply.github.com> Date: Wed, 4 Sep 2024 18:29:58 +0530 Subject: [PATCH 37/61] modifying route (#5799) Co-authored-by: Badal Kumar Prusty --- docs/setup/install/ingress-setup.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/setup/install/ingress-setup.md b/docs/setup/install/ingress-setup.md index 036bc4c2d90..8c0490db4f7 100644 --- a/docs/setup/install/ingress-setup.md +++ b/docs/setup/install/ingress-setup.md @@ -14,7 +14,7 @@ If you have successfully configured Ingress, refer [Post Ingress Setup](#enable- ## Enabling Ingress during Devtron Installation -If you are installing Devtron, you can enable Ingress either via [set flag](#using-set-flag) or by using [values.yaml](#using-valuesyaml) to specify the desired Ingress settings. +If you are installing Devtron, you can enable Ingress either via [set flag](#using-set-flag) or by using [ingress-values.yaml](#using-ingress-valuesyaml) to specify the desired Ingress settings. ### Using set flag From 43ba232b2397326bc0486cd4ab4c2f7bdf05eb1d Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Wed, 4 Sep 2024 18:45:12 +0530 Subject: [PATCH 38/61] fix: cron status update refactoring (#5790) Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> --- api/cluster/EnvironmentRestHandler.go | 6 +-- pkg/cluster/ClusterService.go | 55 ++++++++++++++++++--------- pkg/cluster/EnvironmentService.go | 5 ++- 3 files changed, 43 insertions(+), 23 deletions(-) diff --git a/api/cluster/EnvironmentRestHandler.go b/api/cluster/EnvironmentRestHandler.go index 47d246a445d..081dab38f7f 100644 --- a/api/cluster/EnvironmentRestHandler.go +++ b/api/cluster/EnvironmentRestHandler.go @@ -23,6 +23,7 @@ import ( "net/http" "strconv" "strings" + "sync" "time" k8s2 "github.com/devtron-labs/common-lib/utils/k8s" @@ -513,9 +514,8 @@ func (impl EnvironmentRestHandlerImpl) GetEnvironmentConnection(w http.ResponseW responseObj.ClusterReachable = false } //updating the cluster connection error to db - mapObj := map[int]error{ - clusterBean.Id: err, - } + mapObj := &sync.Map{} + mapObj.Store(clusterBean.Id, err) impl.environmentClusterMappingsService.HandleErrorInClusterConnections([]*request.ClusterBean{clusterBean}, mapObj, true) common.WriteJsonResp(w, nil, responseObj, http.StatusOK) } diff --git a/pkg/cluster/ClusterService.go b/pkg/cluster/ClusterService.go index ad088f74cf2..aa7802b1ab5 100644 --- a/pkg/cluster/ClusterService.go +++ b/pkg/cluster/ClusterService.go @@ -181,7 +181,7 @@ type ClusterService interface { FindAllNamespacesByUserIdAndClusterId(userId int32, clusterId int, isActionUserSuperAdmin bool) ([]string, error) FindAllForClusterByUserId(userId int32, isActionUserSuperAdmin bool) ([]ClusterBean, error) FetchRolesFromGroup(userId int32) ([]*repository3.RoleModel, error) - HandleErrorInClusterConnections(clusters []*ClusterBean, respMap map[int]error, clusterExistInDb bool) + HandleErrorInClusterConnections(clusters []*ClusterBean, respMap *sync.Map, clusterExistInDb bool) ConnectClustersInBatch(clusters []*ClusterBean, clusterExistInDb bool) ConvertClusterBeanToCluster(clusterBean *ClusterBean, userId int32) *repository.Cluster ConvertClusterBeanObjectToCluster(bean *ClusterBean) *v1alpha1.Cluster @@ -259,11 +259,14 @@ func (impl *ClusterServiceImpl) ConvertClusterBeanToCluster(clusterBean *Cluster // getAndUpdateClusterConnectionStatus is a cron function to update the connection status of all clusters func (impl *ClusterServiceImpl) getAndUpdateClusterConnectionStatus() { - impl.logger.Debug("starting cluster connection status fetch thread") - defer impl.logger.Debug("stopped cluster connection status fetch thread") + impl.logger.Info("starting cluster connection status fetch thread") + startTime := time.Now() + defer func() { + impl.logger.Debugw("cluster connection status fetch thread completed", "timeTaken", time.Since(startTime)) + }() //getting all clusters - clusters, err := impl.FindAllExceptVirtual() + clusters, err := impl.FindAll() if err != nil { impl.logger.Errorw("error in getting all clusters", "err", err) return @@ -845,21 +848,26 @@ func (impl *ClusterServiceImpl) FetchRolesFromGroup(userId int32) ([]*repository return roles, nil } +func (impl *ClusterServiceImpl) updateConnectionStatusForVirtualCluster(respMap *sync.Map, clusterId int, clusterName string) { + connErr := fmt.Errorf("Get virtual cluster '%s' error: connection not setup for isolated clusters", clusterName) + respMap.Store(clusterId, connErr) +} + func (impl *ClusterServiceImpl) ConnectClustersInBatch(clusters []*ClusterBean, clusterExistInDb bool) { var wg sync.WaitGroup - respMap := make(map[int]error) - mutex := &sync.Mutex{} - + respMap := &sync.Map{} for idx, cluster := range clusters { + if cluster.IsVirtualCluster { + impl.updateConnectionStatusForVirtualCluster(respMap, cluster.Id, cluster.ClusterName) + continue + } wg.Add(1) go func(idx int, cluster *ClusterBean) { defer wg.Done() clusterConfig := cluster.GetClusterConfig() _, _, k8sClientSet, err := impl.K8sUtil.GetK8sConfigAndClients(clusterConfig) if err != nil { - mutex.Lock() - respMap[cluster.Id] = err - mutex.Unlock() + respMap.Store(cluster.Id, err) return } @@ -867,7 +875,7 @@ func (impl *ClusterServiceImpl) ConnectClustersInBatch(clusters []*ClusterBean, if !clusterExistInDb { id = idx } - impl.GetAndUpdateConnectionStatusForOneCluster(k8sClientSet, id, respMap, mutex) + impl.GetAndUpdateConnectionStatusForOneCluster(k8sClientSet, id, respMap) }(idx, cluster) } @@ -875,8 +883,19 @@ func (impl *ClusterServiceImpl) ConnectClustersInBatch(clusters []*ClusterBean, impl.HandleErrorInClusterConnections(clusters, respMap, clusterExistInDb) } -func (impl *ClusterServiceImpl) HandleErrorInClusterConnections(clusters []*ClusterBean, respMap map[int]error, clusterExistInDb bool) { - for id, err := range respMap { +func (impl *ClusterServiceImpl) HandleErrorInClusterConnections(clusters []*ClusterBean, respMap *sync.Map, clusterExistInDb bool) { + respMap.Range(func(key, value any) bool { + defer func() { + // defer to handle panic on type assertion + if r := recover(); r != nil { + impl.logger.Errorw("error in handling error in cluster connections", "key", key, "value", value, "err", r) + } + }() + id := key.(int) + var err error + if connectionError, ok := value.(error); ok { + err = connectionError + } errorInConnecting := "" if err != nil { errorInConnecting = err.Error() @@ -896,7 +915,8 @@ func (impl *ClusterServiceImpl) HandleErrorInClusterConnections(clusters []*Clus //id is index of the cluster in clusters array clusters[id].ErrorInConnecting = errorInConnecting } - } + return true + }) } func (impl *ClusterServiceImpl) ValidateKubeconfig(kubeConfig string) (map[string]*ValidateClusterBean, error) { @@ -1066,7 +1086,7 @@ func (impl *ClusterServiceImpl) ValidateKubeconfig(kubeConfig string) (map[strin } -func (impl *ClusterServiceImpl) GetAndUpdateConnectionStatusForOneCluster(k8sClientSet *kubernetes.Clientset, clusterId int, respMap map[int]error, mutex *sync.Mutex) { +func (impl *ClusterServiceImpl) GetAndUpdateConnectionStatusForOneCluster(k8sClientSet *kubernetes.Clientset, clusterId int, respMap *sync.Map) { response, err := impl.K8sUtil.GetLiveZCall(k8s.LiveZ, k8sClientSet) log.Println("received response for cluster livez status", "response", string(response), "err", err, "clusterId", clusterId) @@ -1092,9 +1112,8 @@ func (impl *ClusterServiceImpl) GetAndUpdateConnectionStatusForOneCluster(k8sCli } else if err == nil && string(response) != "ok" { err = fmt.Errorf("Validation failed with response : %s", string(response)) } - mutex.Lock() - respMap[clusterId] = err - mutex.Unlock() + + respMap.Store(clusterId, err) } func (impl *ClusterServiceImpl) ConvertClusterBeanObjectToCluster(bean *ClusterBean) *v1alpha1.Cluster { diff --git a/pkg/cluster/EnvironmentService.go b/pkg/cluster/EnvironmentService.go index 820451c4293..77c9b856abb 100644 --- a/pkg/cluster/EnvironmentService.go +++ b/pkg/cluster/EnvironmentService.go @@ -24,6 +24,7 @@ import ( bean2 "github.com/devtron-labs/devtron/pkg/cluster/repository/bean" "strconv" "strings" + "sync" "time" util2 "github.com/devtron-labs/common-lib/utils/k8s" @@ -63,7 +64,7 @@ type EnvironmentService interface { GetByClusterId(id int) ([]*bean2.EnvironmentBean, error) GetCombinedEnvironmentListForDropDown(token string, isActionUserSuperAdmin bool, auth func(email string, object []string) map[string]bool) ([]*bean2.ClusterEnvDto, error) GetCombinedEnvironmentListForDropDownByClusterIds(token string, clusterIds []int, auth func(token string, object string) bool) ([]*bean2.ClusterEnvDto, error) - HandleErrorInClusterConnections(clusters []*ClusterBean, respMap map[int]error, clusterExistInDb bool) + HandleErrorInClusterConnections(clusters []*ClusterBean, respMap *sync.Map, clusterExistInDb bool) GetDetailsById(envId int) (*repository.Environment, error) } @@ -734,7 +735,7 @@ func (impl EnvironmentServiceImpl) Delete(deleteReq *bean2.EnvironmentBean, user return nil } -func (impl EnvironmentServiceImpl) HandleErrorInClusterConnections(clusters []*ClusterBean, respMap map[int]error, clusterExistInDb bool) { +func (impl EnvironmentServiceImpl) HandleErrorInClusterConnections(clusters []*ClusterBean, respMap *sync.Map, clusterExistInDb bool) { impl.clusterService.HandleErrorInClusterConnections(clusters, respMap, clusterExistInDb) } From be9d553eb3a00dbb5aa662a921f635d2e7cc8c8e Mon Sep 17 00:00:00 2001 From: Badal Kumar <130441461+badal773@users.noreply.github.com> Date: Wed, 4 Sep 2024 18:46:53 +0530 Subject: [PATCH 39/61] docs: modified the anchorlink in ingress.md (#5800) * modifying route * modified the anchorlink --------- Co-authored-by: Badal Kumar Prusty --- docs/setup/install/ingress-setup.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/setup/install/ingress-setup.md b/docs/setup/install/ingress-setup.md index 8c0490db4f7..e889af66eb2 100644 --- a/docs/setup/install/ingress-setup.md +++ b/docs/setup/install/ingress-setup.md @@ -14,7 +14,7 @@ If you have successfully configured Ingress, refer [Post Ingress Setup](#enable- ## Enabling Ingress during Devtron Installation -If you are installing Devtron, you can enable Ingress either via [set flag](#using-set-flag) or by using [ingress-values.yaml](#using-ingress-valuesyaml) to specify the desired Ingress settings. +If you are installing Devtron, you can enable Ingress either via [set flag](#using-set-flag) or by using [ingress-values.yaml](#using-ingress-values.yaml) to specify the desired Ingress settings. ### Using set flag From 1e0af22833fa5687c9386b4a6757efc1755d0eff Mon Sep 17 00:00:00 2001 From: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Date: Wed, 4 Sep 2024 19:18:25 +0530 Subject: [PATCH 40/61] query param split (#5801) --- .../user/repository/UserAuthRepository.go | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/auth/user/repository/UserAuthRepository.go b/pkg/auth/user/repository/UserAuthRepository.go index f11bfef4156..844a162f3ed 100644 --- a/pkg/auth/user/repository/UserAuthRepository.go +++ b/pkg/auth/user/repository/UserAuthRepository.go @@ -945,7 +945,7 @@ func (impl UserAuthRepositoryImpl) GetRolesForWorkflow(workflow, entityName stri func (impl UserAuthRepositoryImpl) GetRoleForClusterEntity(cluster, namespace, group, kind, resource, action string) (RoleModel, error) { var model RoleModel - var queryParams []string + var queryParams []interface{} query := "SELECT * FROM roles WHERE entity = ? " queryParams = append(queryParams, bean.CLUSTER_ENTITIY) var err error @@ -986,7 +986,7 @@ func (impl UserAuthRepositoryImpl) GetRoleForClusterEntity(cluster, namespace, g } else { query += " and action IS NULL ;" } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) if err != nil { impl.Logger.Errorw("error in getting roles for clusterEntity", "err", err, bean2.CLUSTER, cluster, "namespace", namespace, "kind", kind, "group", group, "resource", resource) @@ -998,7 +998,7 @@ func (impl UserAuthRepositoryImpl) GetRoleForClusterEntity(cluster, namespace, g func (impl UserAuthRepositoryImpl) GetRoleForJobsEntity(entity, team, app, env, act string, workflow string) (RoleModel, error) { var model RoleModel var err error - var queryParams []string + var queryParams []interface{} if len(team) > 0 && len(act) > 0 { query := "SELECT role.* FROM roles role WHERE role.team = ? AND role.action=? AND role.entity=? " queryParams = append(queryParams, team, act, entity) @@ -1020,7 +1020,7 @@ func (impl UserAuthRepositoryImpl) GetRoleForJobsEntity(entity, team, app, env, query += " AND role.workflow = ? ;" queryParams = append(queryParams, workflow) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } else { return model, nil } @@ -1034,7 +1034,7 @@ func (impl UserAuthRepositoryImpl) GetRoleForChartGroupEntity(entity, app, act, var model RoleModel var err error if len(app) > 0 && act == "update" { - var queryParams []string + var queryParams []interface{} query := "SELECT role.* FROM roles role WHERE role.entity = ? AND role.entity_name=? AND role.action=?" queryParams = append(queryParams, entity, app, act) if len(accessType) == 0 { @@ -1043,9 +1043,9 @@ func (impl UserAuthRepositoryImpl) GetRoleForChartGroupEntity(entity, app, act, query += " and role.access_type = ? " queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } else if app == "" { - var queryParams []string + var queryParams []interface{} query := "SELECT role.* FROM roles role WHERE role.entity = ? AND role.action=?" queryParams = append(queryParams, entity, act) if len(accessType) == 0 { @@ -1054,7 +1054,7 @@ func (impl UserAuthRepositoryImpl) GetRoleForChartGroupEntity(entity, app, act, query += " and role.access_type = ? " queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } if err != nil { impl.Logger.Errorw("error in getting role for chart group entity", "err", err, "entity", entity, "app", app, "act", act, "accessType", accessType) @@ -1066,7 +1066,7 @@ func (impl UserAuthRepositoryImpl) GetRoleForOtherEntity(team, app, env, act, ac var model RoleModel var err error if len(team) > 0 && len(app) > 0 && len(env) > 0 && len(act) > 0 { - var queryParams []string + var queryParams []interface{} query := "SELECT role.* FROM roles role WHERE role.team = ? AND role.entity_name=? AND role.environment=? AND role.action=?" queryParams = append(queryParams, team, app, env, act) if oldValues { @@ -1076,9 +1076,9 @@ func (impl UserAuthRepositoryImpl) GetRoleForOtherEntity(team, app, env, act, ac queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } else if len(team) > 0 && app == "" && len(env) > 0 && len(act) > 0 { - var queryParams []string + var queryParams []interface{} query := "SELECT role.* FROM roles role WHERE role.team=? AND coalesce(role.entity_name,'')=? AND role.environment=? AND role.action=?" queryParams = append(queryParams, team, EMPTY_PLACEHOLDER_FOR_QUERY, env, act) if oldValues { @@ -1087,9 +1087,9 @@ func (impl UserAuthRepositoryImpl) GetRoleForOtherEntity(team, app, env, act, ac query += " and role.access_type = ? " queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } else if len(team) > 0 && len(app) > 0 && env == "" && len(act) > 0 { - var queryParams []string + var queryParams []interface{} //this is applicable for all environment of a team query := "SELECT role.* FROM roles role WHERE role.team = ? AND role.entity_name=? AND coalesce(role.environment,'')=? AND role.action=?" queryParams = append(queryParams, team, app, EMPTY_PLACEHOLDER_FOR_QUERY, act) @@ -1100,9 +1100,9 @@ func (impl UserAuthRepositoryImpl) GetRoleForOtherEntity(team, app, env, act, ac queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } else if len(team) > 0 && app == "" && env == "" && len(act) > 0 { - var queryParams []string + var queryParams []interface{} //this is applicable for all environment of a team query := "SELECT role.* FROM roles role WHERE role.team = ? AND coalesce(role.entity_name,'')=? AND coalesce(role.environment,'')=? AND role.action=?" queryParams = append(queryParams, team, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, act) @@ -1113,9 +1113,9 @@ func (impl UserAuthRepositoryImpl) GetRoleForOtherEntity(team, app, env, act, ac queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } else if team == "" && app == "" && env == "" && len(act) > 0 { - var queryParams []string + var queryParams []interface{} //this is applicable for super admin, all env, all team, all app query := "SELECT role.* FROM roles role WHERE coalesce(role.team,'') = ? AND coalesce(role.entity_name,'')=? AND coalesce(role.environment,'')=? AND role.action=?" queryParams = append(queryParams, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, EMPTY_PLACEHOLDER_FOR_QUERY, act) @@ -1126,7 +1126,7 @@ func (impl UserAuthRepositoryImpl) GetRoleForOtherEntity(team, app, env, act, ac queryParams = append(queryParams, accessType) } - _, err = impl.dbConnection.Query(&model, query, queryParams) + _, err = impl.dbConnection.Query(&model, query, queryParams...) } else if team == "" && app == "" && env == "" && act == "" { return model, nil } else { From 8f92d3fbda242743bdc1470ebbfdb6cc92a504e4 Mon Sep 17 00:00:00 2001 From: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Date: Thu, 5 Sep 2024 15:37:35 +0530 Subject: [PATCH 41/61] fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) * upgraded to /argo-cd/v2 v2.9.21 * argocd vendor files added * sync with common-lib --- go.mod | 17 +- go.sum | 41 +- .../argoproj/argo-cd/v2/common/common.go | 35 + .../v1alpha1/applicationset_types.go | 45 +- .../apis/application/v1alpha1/generated.pb.go | 4253 ++++++++++++----- .../apis/application/v1alpha1/generated.proto | 64 +- .../application/v1alpha1/openapi_generated.go | 588 ++- .../v2/pkg/apis/application/v1alpha1/types.go | 88 +- .../v1alpha1/zz_generated.deepcopy.go | 172 + .../v2/reposerver/apiclient/repository.pb.go | 400 +- .../argo-cd/v2/util/collections/maps.go | 13 + .../argoproj/argo-cd/v2/util/config/reader.go | 8 +- .../argoproj/argo-cd/v2/util/git/git.go | 10 +- .../argoproj/argo-cd/v2/util/helm/client.go | 8 +- .../argoproj/argo-cd/v2/util/helm/cmd.go | 56 +- .../argoproj/argo-cd/v2/util/io/files/util.go | 21 +- .../argoproj/argo-cd/v2/util/kube/kube.go | 6 +- .../argo-cd/v2/util/kube/portforwarder.go | 2 +- .../v2/util/settings/resources_filter.go | 1 - .../argo-cd/v2/util/settings/settings.go | 121 +- .../argoproj/argo-cd/v2/util/tls/tls.go | 10 +- .../gitops-engine/pkg/cache/cluster.go | 1185 +++++ .../argoproj/gitops-engine/pkg/cache/doc.go | 9 + .../gitops-engine/pkg/cache/predicates.go | 14 + .../gitops-engine/pkg/cache/references.go | 109 + .../gitops-engine/pkg/cache/resource.go | 101 + .../gitops-engine/pkg/cache/settings.go | 172 + .../gitops-engine/pkg/utils/kube/ctl.go | 28 +- .../pkg/utils/kube/testdata/openapi_v2.json | 516 ++ .../pkg/utils/kube/uniqueprotomodels.go | 190 + .../aws/aws-sdk-go/aws/auth/bearer/token.go | 50 + .../aws/credentials/ssocreds/provider.go | 75 +- .../credentials/ssocreds/sso_cached_token.go | 237 + .../credentials/ssocreds/token_provider.go | 139 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1574 ++++-- .../aws/aws-sdk-go/aws/session/credentials.go | 36 +- .../aws/aws-sdk-go/aws/session/session.go | 2 +- .../aws-sdk-go/aws/session/shared_config.go | 168 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 2 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/private/protocol/rest/build.go | 4 + .../aws/aws-sdk-go/service/autoscaling/api.go | 128 +- .../aws/aws-sdk-go/service/s3/api.go | 307 +- .../service/s3/s3manager/upload_input.go | 2 +- .../aws/aws-sdk-go/service/ssooidc/api.go | 1682 +++++++ .../aws/aws-sdk-go/service/ssooidc/doc.go | 66 + .../aws/aws-sdk-go/service/ssooidc/errors.go | 107 + .../aws/aws-sdk-go/service/ssooidc/service.go | 106 + .../aws/aws-sdk-go/service/sts/api.go | 79 +- .../ghinstallation/v2/appsTransport.go | 5 + .../ghinstallation/v2/transport.go | 10 + .../common-lib/utils/bean/bean.go | 36 +- vendor/github.com/docker/cli/AUTHORS | 852 ++++ vendor/github.com/docker/cli/LICENSE | 191 + vendor/github.com/docker/cli/NOTICE | 19 + .../docker/cli/cli/config/types/authconfig.go | 22 + .../go-github/v53/github/actions_runners.go | 54 + .../go-github/v53/github/code-scanning.go | 73 + .../google/go-github/v53/github/codespaces.go | 254 + .../v53/github/codespaces_secrets.go | 405 ++ .../google/go-github/v53/github/event.go | 2 + .../go-github/v53/github/github-accessors.go | 624 +++ .../google/go-github/v53/github/github.go | 4 +- .../google/go-github/v53/github/messages.go | 1 + .../google/go-github/v53/github/orgs_rules.go | 105 + .../go-github/v53/github/repos_contents.go | 7 + .../go-github/v53/github/repos_rules.go | 447 ++ vendor/github.com/tidwall/gjson/README.md | 2 +- vendor/github.com/tidwall/gjson/gjson.go | 2 +- vendor/modules.txt | 24 +- vendor/oras.land/oras-go/v2/content/graph.go | 19 +- vendor/oras.land/oras-go/v2/content/reader.go | 7 +- .../oras.land/oras-go/v2/content/storage.go | 2 +- vendor/oras.land/oras-go/v2/errdef/errors.go | 1 + .../oras-go/v2/internal/spec/artifact.go | 12 +- .../oras-go/v2/registry/reference.go | 8 +- .../v2/registry/remote/errcode/errors.go | 6 +- .../oras-go/v2/registry/remote/referrers.go | 9 +- .../oras-go/v2/registry/remote/registry.go | 21 +- .../oras-go/v2/registry/remote/repository.go | 213 +- .../oras-go/v2/registry/remote/url.go | 2 +- .../oras-go/v2/registry/remote/warning.go | 100 + .../oras-go/v2/registry/repository.go | 9 +- 83 files changed, 14270 insertions(+), 2327 deletions(-) create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/cache/doc.go create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/cache/predicates.go create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/cache/references.go create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/cache/settings.go create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/testdata/openapi_v2.json create mode 100644 vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/uniqueprotomodels.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go create mode 100644 vendor/github.com/docker/cli/AUTHORS create mode 100644 vendor/github.com/docker/cli/LICENSE create mode 100644 vendor/github.com/docker/cli/NOTICE create mode 100644 vendor/github.com/docker/cli/cli/config/types/authconfig.go create mode 100644 vendor/github.com/google/go-github/v53/github/codespaces.go create mode 100644 vendor/github.com/google/go-github/v53/github/codespaces_secrets.go create mode 100644 vendor/github.com/google/go-github/v53/github/orgs_rules.go create mode 100644 vendor/github.com/google/go-github/v53/github/repos_rules.go create mode 100644 vendor/oras.land/oras-go/v2/registry/remote/warning.go diff --git a/go.mod b/go.mod index 87c9902056f..ddbebb37bae 100644 --- a/go.mod +++ b/go.mod @@ -7,10 +7,10 @@ toolchain go1.21.8 require ( github.com/Masterminds/semver v1.5.0 github.com/Pallinder/go-randomdata v1.2.0 - github.com/argoproj/argo-cd/v2 v2.8.19 + github.com/argoproj/argo-cd/v2 v2.9.21 github.com/argoproj/argo-workflows/v3 v3.5.10 - github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 - github.com/aws/aws-sdk-go v1.44.290 + github.com/argoproj/gitops-engine v0.7.1-0.20240715141028-c68bce0f979c + github.com/aws/aws-sdk-go v1.44.317 github.com/aws/aws-sdk-go-v2/service/ecr v1.20.0 github.com/caarlos0/env v3.5.0+incompatible github.com/caarlos0/env/v6 v6.7.2 @@ -22,7 +22,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/deckarep/golang-set v1.8.0 github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 - github.com/devtron-labs/common-lib v0.0.25-0.20240812113340-f14be466613d + github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63 github.com/devtron-labs/go-bitbucket v0.9.60-beta github.com/devtron-labs/protos v0.0.3-0.20240802105333-92ee9bb85d80 github.com/evanphx/json-patch v5.7.0+incompatible @@ -60,7 +60,7 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/satori/go.uuid v1.2.0 github.com/stretchr/testify v1.8.4 - github.com/tidwall/gjson v1.14.3 + github.com/tidwall/gjson v1.14.4 github.com/tidwall/sjson v1.2.4 github.com/xanzy/go-gitlab v0.107.0 github.com/xeipuuv/gojsonschema v1.2.0 @@ -126,7 +126,7 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect github.com/bombsimon/logrusr/v2 v2.0.1 // indirect - github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.6.0 // indirect github.com/casbin/govaluate v1.1.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -134,6 +134,7 @@ require ( github.com/cloudflare/circl v1.3.7 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/docker/cli v24.0.6+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -160,7 +161,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-github/v53 v53.0.0 // indirect + github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/s2a-go v0.1.4 // indirect @@ -270,7 +271,7 @@ require ( k8s.io/kube-aggregator v0.26.4 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect mellium.im/sasl v0.3.1 // indirect - oras.land/oras-go/v2 v2.2.0 // indirect + oras.land/oras-go/v2 v2.3.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/go.sum b/go.sum index fb7bccc7732..fcbc80679f8 100644 --- a/go.sum +++ b/go.sum @@ -69,8 +69,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.30.3 h1:hrqDB4cHFSHQf4gO3xu6YKQg8PqJpNjLYsQAFYHstqw= -github.com/alicebob/miniredis/v2 v2.30.3/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= +github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= +github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= @@ -83,17 +83,18 @@ github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/argoproj/argo-cd/v2 v2.8.19 h1:/oY2Hc2PjEK1nujcKnbylyL6XjeB7JrjwXlsNQuKmiE= -github.com/argoproj/argo-cd/v2 v2.8.19/go.mod h1:KlJ82U5ON9ZDddDIhXbW522l2T4wyBwKsFHZYHIcl2Y= -github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 h1:oTaLRbCwjnGtScIX2ZRdIEDsiDxonwh9/BbUxdXrjYc= -github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814/go.mod h1:1TchqKw9XmYYZluyEHa1dTJQoZgbV6PhabB/e8Wf3KY= +github.com/argoproj/argo-cd/v2 v2.9.21 h1:asVFgXfv0pvw7Q9STOhT75sWUU9cIKHRWWrAzJWmRgo= +github.com/argoproj/argo-cd/v2 v2.9.21/go.mod h1:V9EKQR1U5kJV/aLVRgUV46muOStnP6C5c4wTeT6nkoY= +github.com/argoproj/gitops-engine v0.7.1-0.20240715141028-c68bce0f979c h1:kkHx4mvqnUCLruADf1t/aO6yXnLcrl6rhsINaJomukc= +github.com/argoproj/gitops-engine v0.7.1-0.20240715141028-c68bce0f979c/go.mod h1:/GMN0JuoJUUpnKlNLp2Wn/mfK8sglFsdPn+eoxSddmg= github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e h1:kuLQvJqwwRMQTheT4MFyKVM8Txncu21CHT4yBWUl1Mk= github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e/go.mod h1:xBN5PLx2MoK63dmPfMo/PGBvd77K1Y0m/rzZOe4cs1s= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.290 h1:Md4+os9DQtJjow0lWLMzeJljsimD+XS2xwwHDr5Z+Lk= github.com/aws/aws-sdk-go v1.44.290/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU= +github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= @@ -112,8 +113,8 @@ github.com/bmatcuk/doublestar/v4 v4.6.0 h1:HTuxyug8GyFbRkrffIpzNCSK4luc0TY3wzXvz github.com/bmatcuk/doublestar/v4 v4.6.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bombsimon/logrusr/v2 v2.0.1 h1:1VgxVNQMCvjirZIYaT9JYn6sAVGVEcNtRE0y4mvaOAM= github.com/bombsimon/logrusr/v2 v2.0.1/go.mod h1:ByVAX+vHdLGAfdroiMg6q0zgq2FODY2lc5YJvzmOJio= -github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 h1:yaYcGQ7yEIGbsJfW/9z7v1sLiZg/5rSNNXwmMct5XaE= -github.com/bradleyfalzon/ghinstallation/v2 v2.5.0/go.mod h1:amcvPQMrRkWNdueWOjPytGL25xQGzox7425qMgzo+Vo= +github.com/bradleyfalzon/ghinstallation/v2 v2.6.0 h1:IRY7Xy588KylkoycsUhFpW7cdGpy5Y5BPsz4IfuJtGk= +github.com/bradleyfalzon/ghinstallation/v2 v2.6.0/go.mod h1:oQ3etOwN3TRH4EwgW5/7MxSVMGlMlzG/O8TU7eYdoSk= github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w= github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= @@ -187,8 +188,8 @@ github.com/devtron-labs/argo-workflows/v3 v3.5.10 h1:6rxQOesOzDz6SgQCMDQNHaehsKF github.com/devtron-labs/argo-workflows/v3 v3.5.10/go.mod h1:/vqxcovDPT4zqr4DjR5v7CF8ggpY1l3TSa2CIG3jmjA= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 h1:2+Q7Jdhpo/uMiaQiZZzAh+ZX7wEJIFuMFG6DEiMuo64= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8/go.mod h1:702R6WIf5y9UzKGoCGxQ+x3l5Ws+l0fXg2xlCpSGFZI= -github.com/devtron-labs/common-lib v0.0.25-0.20240812113340-f14be466613d h1:+iWXiVOyf9E0bcTia6x2sLFTM7xJc+9Z8q+BfbYr6eM= -github.com/devtron-labs/common-lib v0.0.25-0.20240812113340-f14be466613d/go.mod h1:a7aCClaxYfnyYEENSe1RnkQCeW2AwmCAPYsuvgk0aW0= +github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63 h1:C5SMozwP2rVIKItqEZs3PtWkBhNnEeHIm9xtnDkK5VA= +github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63/go.mod h1:rAY9Xd6iz+OqNQ3nO3reVHapAVr1N6Osf4Irdc0A08Q= github.com/devtron-labs/go-bitbucket v0.9.60-beta h1:VEx1jvDgdtDPS6A1uUFoaEi0l1/oLhbr+90xOwr6sDU= github.com/devtron-labs/go-bitbucket v0.9.60-beta/go.mod h1:GnuiCesvh8xyHeMCb+twm8lBR/kQzJYSKL28ZfObp1Y= github.com/devtron-labs/protos v0.0.3-0.20240802105333-92ee9bb85d80 h1:xwbTeijNTf4/j1v+tSfwVqwLVnReas/NqEKeQHvSTys= @@ -196,6 +197,8 @@ github.com/devtron-labs/protos v0.0.3-0.20240802105333-92ee9bb85d80/go.mod h1:yp github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -260,6 +263,8 @@ github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMj github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk= github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -381,8 +386,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v53 v53.0.0 h1:T1RyHbSnpHYnoF0ZYKiIPSgPtuJ8G6vgc0MKodXsQDQ= -github.com/google/go-github/v53 v53.0.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao= +github.com/google/go-github/v53 v53.2.0 h1:wvz3FyF53v4BK+AsnvCmeNhf8AkTaeh2SoYu/XUvTtI= +github.com/google/go-github/v53 v53.2.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -768,8 +773,8 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw= -github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= @@ -1172,8 +1177,6 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637/go.mod h1:BHsqpu/nsuzkT5BpiH1EMZPLyqSMM8JbIavyFACoFNk= @@ -1246,8 +1249,8 @@ launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80 launchpad.net/xmlpath v0.0.0-20130614043138-000000000004/go.mod h1:vqyExLOM3qBx7mvYRkoxjSCF945s0mbe7YynlKYXtsA= mellium.im/sasl v0.3.1 h1:wE0LW6g7U83vhvxjC1IY8DnXM+EU095yeo8XClvCdfo= mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= -oras.land/oras-go/v2 v2.2.0 h1:E1fqITD56Eg5neZbxBtAdZVgDHD6wBabJo6xESTcQyo= -oras.land/oras-go/v2 v2.2.0/go.mod h1:pXjn0+KfarspMHHNR3A56j3tgvr+mxArHuI8qVn59v8= +oras.land/oras-go/v2 v2.3.0 h1:lqX1aXdN+DAmDTKjiDyvq85cIaI4RkIKp/PghWlAGIU= +oras.land/oras-go/v2 v2.3.0/go.mod h1:GeAwLuC4G/JpNwkd+bSZ6SkDMGaaYglt6YK2WvZP7uQ= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= diff --git a/vendor/github.com/argoproj/argo-cd/v2/common/common.go b/vendor/github.com/argoproj/argo-cd/v2/common/common.go index b52fc859905..d7c2d24738b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/common/common.go +++ b/vendor/github.com/argoproj/argo-cd/v2/common/common.go @@ -12,6 +12,11 @@ import ( "google.golang.org/grpc/status" ) +// Component names +const ( + ApplicationController = "argocd-application-controller" +) + // Default service addresses and URLS of Argo CD internal services const ( // DefaultRepoServerAddr is the gRPC address of the Argo CD repo server @@ -34,6 +39,8 @@ const ( // ArgoCDTLSCertsConfigMapName contains TLS certificate data for connecting repositories. Will get mounted as volume to pods ArgoCDTLSCertsConfigMapName = "argocd-tls-certs-cm" ArgoCDGPGKeysConfigMapName = "argocd-gpg-keys-cm" + // ArgoCDAppControllerShardConfigMapName contains the application controller to shard mapping + ArgoCDAppControllerShardConfigMapName = "argocd-app-controller-shard-cm" ) // Some default configurables @@ -109,6 +116,8 @@ const ( // RoundRobinShardingAlgorithm is a flag value that can be opted for Sharding Algorithm it uses an equal distribution accross all shards RoundRobinShardingAlgorithm = "round-robin" DefaultShardingAlgorithm = LegacyShardingAlgorithm + // AppControllerHeartbeatUpdateRetryCount is the retry count for updating the Shard Mapping to the Shard Mapping ConfigMap used by Application Controller + AppControllerHeartbeatUpdateRetryCount = 3 ) // Dex related constants @@ -138,6 +147,8 @@ const ( // LabelKeyAppInstance is the label key to use to uniquely identify the instance of an application // The Argo CD application name is used as the instance name LabelKeyAppInstance = "app.kubernetes.io/instance" + // LabelKeyAppName is the label key to use to uniquely identify the name of the Kubernetes application + LabelKeyAppName = "app.kubernetes.io/name" // LabelKeyLegacyApplicationName is the legacy label (v0.10 and below) and is superseded by 'app.kubernetes.io/instance' LabelKeyLegacyApplicationName = "applications.argoproj.io/app-name" // LabelKeySecretType contains the type of argocd secret (currently: 'cluster', 'repository', 'repo-config' or 'repo-creds') @@ -207,10 +218,14 @@ const ( EnvPauseGenerationRequests = "ARGOCD_PAUSE_GEN_REQUESTS" // EnvControllerReplicas is the number of controller replicas EnvControllerReplicas = "ARGOCD_CONTROLLER_REPLICAS" + // EnvControllerHeartbeatTime will update the heartbeat for application controller to claim shard + EnvControllerHeartbeatTime = "ARGOCD_CONTROLLER_HEARTBEAT_TIME" // EnvControllerShard is the shard number that should be handled by controller EnvControllerShard = "ARGOCD_CONTROLLER_SHARD" // EnvControllerShardingAlgorithm is the distribution sharding algorithm to be used: legacy or round-robin EnvControllerShardingAlgorithm = "ARGOCD_CONTROLLER_SHARDING_ALGORITHM" + //EnvEnableDynamicClusterDistribution enables dynamic sharding (ALPHA) + EnvEnableDynamicClusterDistribution = "ARGOCD_ENABLE_DYNAMIC_CLUSTER_DISTRIBUTION" // EnvEnableGRPCTimeHistogramEnv enables gRPC metrics collection EnvEnableGRPCTimeHistogramEnv = "ARGOCD_ENABLE_GRPC_TIME_HISTOGRAM" // EnvGithubAppCredsExpirationDuration controls the caching of Github app credentials. This value is in minutes (default: 60) @@ -233,6 +248,16 @@ const ( EnvCMPWorkDir = "ARGOCD_CMP_WORKDIR" // EnvGPGDataPath overrides the location where GPG keyring for signature verification is stored EnvGPGDataPath = "ARGOCD_GPG_DATA_PATH" + // EnvServerName is the name of the Argo CD server component, as specified by the value under the LabelKeyAppName label key. + EnvServerName = "ARGOCD_SERVER_NAME" + // EnvRepoServerName is the name of the Argo CD repo server component, as specified by the value under the LabelKeyAppName label key. + EnvRepoServerName = "ARGOCD_REPO_SERVER_NAME" + // EnvAppControllerName is the name of the Argo CD application controller component, as specified by the value under the LabelKeyAppName label key. + EnvAppControllerName = "ARGOCD_APPLICATION_CONTROLLER_NAME" + // EnvRedisName is the name of the Argo CD redis component, as specified by the value under the LabelKeyAppName label key. + EnvRedisName = "ARGOCD_REDIS_NAME" + // EnvRedisHaProxyName is the name of the Argo CD Redis HA proxy component, as specified by the value under the LabelKeyAppName label key. + EnvRedisHaProxyName = "ARGOCD_REDIS_HAPROXY_NAME" ) // Config Management Plugin related constants @@ -268,6 +293,16 @@ const ( DefaultGitRetryFactor = int64(2) ) +// Constants represent the pod selector labels of the Argo CD component names. These values are determined by the +// installation manifests. +const ( + DefaultServerName = "argocd-server" + DefaultRepoServerName = "argocd-repo-server" + DefaultApplicationControllerName = "argocd-application-controller" + DefaultRedisName = "argocd-redis" + DefaultRedisHaProxyName = "argocd-redis-ha-haproxy" +) + // GetGnuPGHomePath retrieves the path to use for GnuPG home directory, which is either taken from GNUPGHOME environment or a default value func GetGnuPGHomePath() string { if gnuPgHome := os.Getenv(EnvGnuPGHome); gnuPgHome == "" { diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go index b2e4e5d87a3..7a7cc971f9b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go @@ -63,11 +63,13 @@ type ApplicationSetSpec struct { PreservedFields *ApplicationPreservedFields `json:"preservedFields,omitempty" protobuf:"bytes,6,opt,name=preservedFields"` GoTemplateOptions []string `json:"goTemplateOptions,omitempty" protobuf:"bytes,7,opt,name=goTemplateOptions"` // ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators - ApplyNestedSelectors bool `json:"applyNestedSelectors,omitempty" protobuf:"bytes,8,name=applyNestedSelectors"` + ApplyNestedSelectors bool `json:"applyNestedSelectors,omitempty" protobuf:"bytes,8,name=applyNestedSelectors"` + IgnoreApplicationDifferences ApplicationSetIgnoreDifferences `json:"ignoreApplicationDifferences,omitempty" protobuf:"bytes,9,name=ignoreApplicationDifferences"` } type ApplicationPreservedFields struct { Annotations []string `json:"annotations,omitempty" protobuf:"bytes,1,name=annotations"` + Labels []string `json:"labels,omitempty" protobuf:"bytes,2,name=labels"` } // ApplicationSetStrategy configures how generated Applications are updated in sequence. @@ -126,6 +128,39 @@ type ApplicationSetSyncPolicy struct { ApplicationsSync *ApplicationsSyncPolicy `json:"applicationsSync,omitempty" protobuf:"bytes,2,opt,name=applicationsSync,casttype=ApplicationsSyncPolicy"` } +// ApplicationSetIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live +// applications when applying changes from generated applications. +type ApplicationSetIgnoreDifferences []ApplicationSetResourceIgnoreDifferences + +func (a ApplicationSetIgnoreDifferences) ToApplicationIgnoreDifferences() []ResourceIgnoreDifferences { + var result []ResourceIgnoreDifferences + for _, item := range a { + result = append(result, item.ToApplicationResourceIgnoreDifferences()) + } + return result +} + +// ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live +// applications when applying changes from generated applications. +type ApplicationSetResourceIgnoreDifferences struct { + // Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications. + Name string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + // JSONPointers is a list of JSON pointers to fields to ignore differences for. + JSONPointers []string `json:"jsonPointers,omitempty" protobuf:"bytes,2,name=jsonPointers"` + // JQPathExpressions is a list of JQ path expressions to fields to ignore differences for. + JQPathExpressions []string `json:"jqPathExpressions,omitempty" protobuf:"bytes,3,name=jqExpressions"` +} + +func (a *ApplicationSetResourceIgnoreDifferences) ToApplicationResourceIgnoreDifferences() ResourceIgnoreDifferences { + return ResourceIgnoreDifferences{ + Kind: ApplicationSchemaGroupVersionKind.Kind, + Group: ApplicationSchemaGroupVersionKind.Group, + Name: a.Name, + JSONPointers: a.JSONPointers, + JQPathExpressions: a.JQPathExpressions, + } +} + // ApplicationSetTemplate represents argocd ApplicationSpec type ApplicationSetTemplate struct { ApplicationSetTemplateMeta `json:"metadata" protobuf:"bytes,1,name=metadata"` @@ -442,6 +477,14 @@ type SCMProviderGeneratorGitlab struct { AllBranches bool `json:"allBranches,omitempty" protobuf:"varint,5,opt,name=allBranches"` // Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false Insecure bool `json:"insecure,omitempty" protobuf:"varint,6,opt,name=insecure"` + // When recursing through subgroups, also include shared Projects (true) or scan only the subgroups under same path (false). Defaults to "true" + IncludeSharedProjects *bool `json:"includeSharedProjects,omitempty" protobuf:"varint,7,opt,name=includeSharedProjects"` + // Filter repos list based on Gitlab Topic. + Topic string `json:"topic,omitempty" protobuf:"bytes,8,opt,name=topic"` +} + +func (s *SCMProviderGeneratorGitlab) WillIncludeSharedProjects() bool { + return s.IncludeSharedProjects == nil || *s.IncludeSharedProjects } // SCMProviderGeneratorBitbucket defines connection info specific to Bitbucket Cloud (API version 2). diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go index 532ed95f9f5..13d8d444666 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go @@ -515,10 +515,40 @@ func (m *ApplicationSetNestedGenerator) XXX_DiscardUnknown() { var xxx_messageInfo_ApplicationSetNestedGenerator proto.InternalMessageInfo +func (m *ApplicationSetResourceIgnoreDifferences) Reset() { + *m = ApplicationSetResourceIgnoreDifferences{} +} +func (*ApplicationSetResourceIgnoreDifferences) ProtoMessage() {} +func (*ApplicationSetResourceIgnoreDifferences) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{17} +} +func (m *ApplicationSetResourceIgnoreDifferences) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ApplicationSetResourceIgnoreDifferences) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ApplicationSetResourceIgnoreDifferences) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplicationSetResourceIgnoreDifferences.Merge(m, src) +} +func (m *ApplicationSetResourceIgnoreDifferences) XXX_Size() int { + return m.Size() +} +func (m *ApplicationSetResourceIgnoreDifferences) XXX_DiscardUnknown() { + xxx_messageInfo_ApplicationSetResourceIgnoreDifferences.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplicationSetResourceIgnoreDifferences proto.InternalMessageInfo + func (m *ApplicationSetRolloutStep) Reset() { *m = ApplicationSetRolloutStep{} } func (*ApplicationSetRolloutStep) ProtoMessage() {} func (*ApplicationSetRolloutStep) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{17} + return fileDescriptor_030104ce3b95bcac, []int{18} } func (m *ApplicationSetRolloutStep) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -546,7 +576,7 @@ var xxx_messageInfo_ApplicationSetRolloutStep proto.InternalMessageInfo func (m *ApplicationSetRolloutStrategy) Reset() { *m = ApplicationSetRolloutStrategy{} } func (*ApplicationSetRolloutStrategy) ProtoMessage() {} func (*ApplicationSetRolloutStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{18} + return fileDescriptor_030104ce3b95bcac, []int{19} } func (m *ApplicationSetRolloutStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -574,7 +604,7 @@ var xxx_messageInfo_ApplicationSetRolloutStrategy proto.InternalMessageInfo func (m *ApplicationSetSpec) Reset() { *m = ApplicationSetSpec{} } func (*ApplicationSetSpec) ProtoMessage() {} func (*ApplicationSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{19} + return fileDescriptor_030104ce3b95bcac, []int{20} } func (m *ApplicationSetSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -602,7 +632,7 @@ var xxx_messageInfo_ApplicationSetSpec proto.InternalMessageInfo func (m *ApplicationSetStatus) Reset() { *m = ApplicationSetStatus{} } func (*ApplicationSetStatus) ProtoMessage() {} func (*ApplicationSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{20} + return fileDescriptor_030104ce3b95bcac, []int{21} } func (m *ApplicationSetStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -630,7 +660,7 @@ var xxx_messageInfo_ApplicationSetStatus proto.InternalMessageInfo func (m *ApplicationSetStrategy) Reset() { *m = ApplicationSetStrategy{} } func (*ApplicationSetStrategy) ProtoMessage() {} func (*ApplicationSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{21} + return fileDescriptor_030104ce3b95bcac, []int{22} } func (m *ApplicationSetStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -658,7 +688,7 @@ var xxx_messageInfo_ApplicationSetStrategy proto.InternalMessageInfo func (m *ApplicationSetSyncPolicy) Reset() { *m = ApplicationSetSyncPolicy{} } func (*ApplicationSetSyncPolicy) ProtoMessage() {} func (*ApplicationSetSyncPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{22} + return fileDescriptor_030104ce3b95bcac, []int{23} } func (m *ApplicationSetSyncPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -686,7 +716,7 @@ var xxx_messageInfo_ApplicationSetSyncPolicy proto.InternalMessageInfo func (m *ApplicationSetTemplate) Reset() { *m = ApplicationSetTemplate{} } func (*ApplicationSetTemplate) ProtoMessage() {} func (*ApplicationSetTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{23} + return fileDescriptor_030104ce3b95bcac, []int{24} } func (m *ApplicationSetTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -714,7 +744,7 @@ var xxx_messageInfo_ApplicationSetTemplate proto.InternalMessageInfo func (m *ApplicationSetTemplateMeta) Reset() { *m = ApplicationSetTemplateMeta{} } func (*ApplicationSetTemplateMeta) ProtoMessage() {} func (*ApplicationSetTemplateMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{24} + return fileDescriptor_030104ce3b95bcac, []int{25} } func (m *ApplicationSetTemplateMeta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -742,7 +772,7 @@ var xxx_messageInfo_ApplicationSetTemplateMeta proto.InternalMessageInfo func (m *ApplicationSetTerminalGenerator) Reset() { *m = ApplicationSetTerminalGenerator{} } func (*ApplicationSetTerminalGenerator) ProtoMessage() {} func (*ApplicationSetTerminalGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{25} + return fileDescriptor_030104ce3b95bcac, []int{26} } func (m *ApplicationSetTerminalGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -770,7 +800,7 @@ var xxx_messageInfo_ApplicationSetTerminalGenerator proto.InternalMessageInfo func (m *ApplicationSource) Reset() { *m = ApplicationSource{} } func (*ApplicationSource) ProtoMessage() {} func (*ApplicationSource) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{26} + return fileDescriptor_030104ce3b95bcac, []int{27} } func (m *ApplicationSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -798,7 +828,7 @@ var xxx_messageInfo_ApplicationSource proto.InternalMessageInfo func (m *ApplicationSourceDirectory) Reset() { *m = ApplicationSourceDirectory{} } func (*ApplicationSourceDirectory) ProtoMessage() {} func (*ApplicationSourceDirectory) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{27} + return fileDescriptor_030104ce3b95bcac, []int{28} } func (m *ApplicationSourceDirectory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -826,7 +856,7 @@ var xxx_messageInfo_ApplicationSourceDirectory proto.InternalMessageInfo func (m *ApplicationSourceHelm) Reset() { *m = ApplicationSourceHelm{} } func (*ApplicationSourceHelm) ProtoMessage() {} func (*ApplicationSourceHelm) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{28} + return fileDescriptor_030104ce3b95bcac, []int{29} } func (m *ApplicationSourceHelm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -854,7 +884,7 @@ var xxx_messageInfo_ApplicationSourceHelm proto.InternalMessageInfo func (m *ApplicationSourceJsonnet) Reset() { *m = ApplicationSourceJsonnet{} } func (*ApplicationSourceJsonnet) ProtoMessage() {} func (*ApplicationSourceJsonnet) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{29} + return fileDescriptor_030104ce3b95bcac, []int{30} } func (m *ApplicationSourceJsonnet) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -882,7 +912,7 @@ var xxx_messageInfo_ApplicationSourceJsonnet proto.InternalMessageInfo func (m *ApplicationSourceKustomize) Reset() { *m = ApplicationSourceKustomize{} } func (*ApplicationSourceKustomize) ProtoMessage() {} func (*ApplicationSourceKustomize) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{30} + return fileDescriptor_030104ce3b95bcac, []int{31} } func (m *ApplicationSourceKustomize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -910,7 +940,7 @@ var xxx_messageInfo_ApplicationSourceKustomize proto.InternalMessageInfo func (m *ApplicationSourcePlugin) Reset() { *m = ApplicationSourcePlugin{} } func (*ApplicationSourcePlugin) ProtoMessage() {} func (*ApplicationSourcePlugin) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{31} + return fileDescriptor_030104ce3b95bcac, []int{32} } func (m *ApplicationSourcePlugin) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -938,7 +968,7 @@ var xxx_messageInfo_ApplicationSourcePlugin proto.InternalMessageInfo func (m *ApplicationSourcePluginParameter) Reset() { *m = ApplicationSourcePluginParameter{} } func (*ApplicationSourcePluginParameter) ProtoMessage() {} func (*ApplicationSourcePluginParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{32} + return fileDescriptor_030104ce3b95bcac, []int{33} } func (m *ApplicationSourcePluginParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -966,7 +996,7 @@ var xxx_messageInfo_ApplicationSourcePluginParameter proto.InternalMessageInfo func (m *ApplicationSpec) Reset() { *m = ApplicationSpec{} } func (*ApplicationSpec) ProtoMessage() {} func (*ApplicationSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{33} + return fileDescriptor_030104ce3b95bcac, []int{34} } func (m *ApplicationSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -994,7 +1024,7 @@ var xxx_messageInfo_ApplicationSpec proto.InternalMessageInfo func (m *ApplicationStatus) Reset() { *m = ApplicationStatus{} } func (*ApplicationStatus) ProtoMessage() {} func (*ApplicationStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{34} + return fileDescriptor_030104ce3b95bcac, []int{35} } func (m *ApplicationStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1022,7 +1052,7 @@ var xxx_messageInfo_ApplicationStatus proto.InternalMessageInfo func (m *ApplicationSummary) Reset() { *m = ApplicationSummary{} } func (*ApplicationSummary) ProtoMessage() {} func (*ApplicationSummary) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{35} + return fileDescriptor_030104ce3b95bcac, []int{36} } func (m *ApplicationSummary) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1050,7 +1080,7 @@ var xxx_messageInfo_ApplicationSummary proto.InternalMessageInfo func (m *ApplicationTree) Reset() { *m = ApplicationTree{} } func (*ApplicationTree) ProtoMessage() {} func (*ApplicationTree) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{36} + return fileDescriptor_030104ce3b95bcac, []int{37} } func (m *ApplicationTree) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1078,7 +1108,7 @@ var xxx_messageInfo_ApplicationTree proto.InternalMessageInfo func (m *ApplicationWatchEvent) Reset() { *m = ApplicationWatchEvent{} } func (*ApplicationWatchEvent) ProtoMessage() {} func (*ApplicationWatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{37} + return fileDescriptor_030104ce3b95bcac, []int{38} } func (m *ApplicationWatchEvent) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1106,7 +1136,7 @@ var xxx_messageInfo_ApplicationWatchEvent proto.InternalMessageInfo func (m *Backoff) Reset() { *m = Backoff{} } func (*Backoff) ProtoMessage() {} func (*Backoff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{38} + return fileDescriptor_030104ce3b95bcac, []int{39} } func (m *Backoff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1134,7 +1164,7 @@ var xxx_messageInfo_Backoff proto.InternalMessageInfo func (m *BasicAuthBitbucketServer) Reset() { *m = BasicAuthBitbucketServer{} } func (*BasicAuthBitbucketServer) ProtoMessage() {} func (*BasicAuthBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{39} + return fileDescriptor_030104ce3b95bcac, []int{40} } func (m *BasicAuthBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1162,7 +1192,7 @@ var xxx_messageInfo_BasicAuthBitbucketServer proto.InternalMessageInfo func (m *BearerTokenBitbucketCloud) Reset() { *m = BearerTokenBitbucketCloud{} } func (*BearerTokenBitbucketCloud) ProtoMessage() {} func (*BearerTokenBitbucketCloud) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{40} + return fileDescriptor_030104ce3b95bcac, []int{41} } func (m *BearerTokenBitbucketCloud) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1190,7 +1220,7 @@ var xxx_messageInfo_BearerTokenBitbucketCloud proto.InternalMessageInfo func (m *ChartDetails) Reset() { *m = ChartDetails{} } func (*ChartDetails) ProtoMessage() {} func (*ChartDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{41} + return fileDescriptor_030104ce3b95bcac, []int{42} } func (m *ChartDetails) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1248,7 @@ var xxx_messageInfo_ChartDetails proto.InternalMessageInfo func (m *Cluster) Reset() { *m = Cluster{} } func (*Cluster) ProtoMessage() {} func (*Cluster) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{42} + return fileDescriptor_030104ce3b95bcac, []int{43} } func (m *Cluster) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1246,7 +1276,7 @@ var xxx_messageInfo_Cluster proto.InternalMessageInfo func (m *ClusterCacheInfo) Reset() { *m = ClusterCacheInfo{} } func (*ClusterCacheInfo) ProtoMessage() {} func (*ClusterCacheInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{43} + return fileDescriptor_030104ce3b95bcac, []int{44} } func (m *ClusterCacheInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1274,7 +1304,7 @@ var xxx_messageInfo_ClusterCacheInfo proto.InternalMessageInfo func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } func (*ClusterConfig) ProtoMessage() {} func (*ClusterConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{44} + return fileDescriptor_030104ce3b95bcac, []int{45} } func (m *ClusterConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1302,7 +1332,7 @@ var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo func (m *ClusterGenerator) Reset() { *m = ClusterGenerator{} } func (*ClusterGenerator) ProtoMessage() {} func (*ClusterGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{45} + return fileDescriptor_030104ce3b95bcac, []int{46} } func (m *ClusterGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1330,7 +1360,7 @@ var xxx_messageInfo_ClusterGenerator proto.InternalMessageInfo func (m *ClusterInfo) Reset() { *m = ClusterInfo{} } func (*ClusterInfo) ProtoMessage() {} func (*ClusterInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{46} + return fileDescriptor_030104ce3b95bcac, []int{47} } func (m *ClusterInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1358,7 +1388,7 @@ var xxx_messageInfo_ClusterInfo proto.InternalMessageInfo func (m *ClusterList) Reset() { *m = ClusterList{} } func (*ClusterList) ProtoMessage() {} func (*ClusterList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{47} + return fileDescriptor_030104ce3b95bcac, []int{48} } func (m *ClusterList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1386,7 +1416,7 @@ var xxx_messageInfo_ClusterList proto.InternalMessageInfo func (m *Command) Reset() { *m = Command{} } func (*Command) ProtoMessage() {} func (*Command) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{48} + return fileDescriptor_030104ce3b95bcac, []int{49} } func (m *Command) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1414,7 +1444,7 @@ var xxx_messageInfo_Command proto.InternalMessageInfo func (m *ComparedTo) Reset() { *m = ComparedTo{} } func (*ComparedTo) ProtoMessage() {} func (*ComparedTo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{49} + return fileDescriptor_030104ce3b95bcac, []int{50} } func (m *ComparedTo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1442,7 +1472,7 @@ var xxx_messageInfo_ComparedTo proto.InternalMessageInfo func (m *ComponentParameter) Reset() { *m = ComponentParameter{} } func (*ComponentParameter) ProtoMessage() {} func (*ComponentParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{50} + return fileDescriptor_030104ce3b95bcac, []int{51} } func (m *ComponentParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1470,7 +1500,7 @@ var xxx_messageInfo_ComponentParameter proto.InternalMessageInfo func (m *ConfigManagementPlugin) Reset() { *m = ConfigManagementPlugin{} } func (*ConfigManagementPlugin) ProtoMessage() {} func (*ConfigManagementPlugin) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{51} + return fileDescriptor_030104ce3b95bcac, []int{52} } func (m *ConfigManagementPlugin) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1498,7 +1528,7 @@ var xxx_messageInfo_ConfigManagementPlugin proto.InternalMessageInfo func (m *ConnectionState) Reset() { *m = ConnectionState{} } func (*ConnectionState) ProtoMessage() {} func (*ConnectionState) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{52} + return fileDescriptor_030104ce3b95bcac, []int{53} } func (m *ConnectionState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1526,7 +1556,7 @@ var xxx_messageInfo_ConnectionState proto.InternalMessageInfo func (m *DuckTypeGenerator) Reset() { *m = DuckTypeGenerator{} } func (*DuckTypeGenerator) ProtoMessage() {} func (*DuckTypeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{53} + return fileDescriptor_030104ce3b95bcac, []int{54} } func (m *DuckTypeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1554,7 +1584,7 @@ var xxx_messageInfo_DuckTypeGenerator proto.InternalMessageInfo func (m *EnvEntry) Reset() { *m = EnvEntry{} } func (*EnvEntry) ProtoMessage() {} func (*EnvEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{54} + return fileDescriptor_030104ce3b95bcac, []int{55} } func (m *EnvEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1582,7 +1612,7 @@ var xxx_messageInfo_EnvEntry proto.InternalMessageInfo func (m *ErrApplicationNotAllowedToUseProject) Reset() { *m = ErrApplicationNotAllowedToUseProject{} } func (*ErrApplicationNotAllowedToUseProject) ProtoMessage() {} func (*ErrApplicationNotAllowedToUseProject) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{55} + return fileDescriptor_030104ce3b95bcac, []int{56} } func (m *ErrApplicationNotAllowedToUseProject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1610,7 +1640,7 @@ var xxx_messageInfo_ErrApplicationNotAllowedToUseProject proto.InternalMessageIn func (m *ExecProviderConfig) Reset() { *m = ExecProviderConfig{} } func (*ExecProviderConfig) ProtoMessage() {} func (*ExecProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{56} + return fileDescriptor_030104ce3b95bcac, []int{57} } func (m *ExecProviderConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1638,7 +1668,7 @@ var xxx_messageInfo_ExecProviderConfig proto.InternalMessageInfo func (m *GitDirectoryGeneratorItem) Reset() { *m = GitDirectoryGeneratorItem{} } func (*GitDirectoryGeneratorItem) ProtoMessage() {} func (*GitDirectoryGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{57} + return fileDescriptor_030104ce3b95bcac, []int{58} } func (m *GitDirectoryGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1666,7 +1696,7 @@ var xxx_messageInfo_GitDirectoryGeneratorItem proto.InternalMessageInfo func (m *GitFileGeneratorItem) Reset() { *m = GitFileGeneratorItem{} } func (*GitFileGeneratorItem) ProtoMessage() {} func (*GitFileGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{58} + return fileDescriptor_030104ce3b95bcac, []int{59} } func (m *GitFileGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1694,7 +1724,7 @@ var xxx_messageInfo_GitFileGeneratorItem proto.InternalMessageInfo func (m *GitGenerator) Reset() { *m = GitGenerator{} } func (*GitGenerator) ProtoMessage() {} func (*GitGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{59} + return fileDescriptor_030104ce3b95bcac, []int{60} } func (m *GitGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1722,7 +1752,7 @@ var xxx_messageInfo_GitGenerator proto.InternalMessageInfo func (m *GnuPGPublicKey) Reset() { *m = GnuPGPublicKey{} } func (*GnuPGPublicKey) ProtoMessage() {} func (*GnuPGPublicKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{60} + return fileDescriptor_030104ce3b95bcac, []int{61} } func (m *GnuPGPublicKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1750,7 +1780,7 @@ var xxx_messageInfo_GnuPGPublicKey proto.InternalMessageInfo func (m *GnuPGPublicKeyList) Reset() { *m = GnuPGPublicKeyList{} } func (*GnuPGPublicKeyList) ProtoMessage() {} func (*GnuPGPublicKeyList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{61} + return fileDescriptor_030104ce3b95bcac, []int{62} } func (m *GnuPGPublicKeyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1778,7 +1808,7 @@ var xxx_messageInfo_GnuPGPublicKeyList proto.InternalMessageInfo func (m *HealthStatus) Reset() { *m = HealthStatus{} } func (*HealthStatus) ProtoMessage() {} func (*HealthStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{62} + return fileDescriptor_030104ce3b95bcac, []int{63} } func (m *HealthStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1806,7 +1836,7 @@ var xxx_messageInfo_HealthStatus proto.InternalMessageInfo func (m *HelmFileParameter) Reset() { *m = HelmFileParameter{} } func (*HelmFileParameter) ProtoMessage() {} func (*HelmFileParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{63} + return fileDescriptor_030104ce3b95bcac, []int{64} } func (m *HelmFileParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1834,7 +1864,7 @@ var xxx_messageInfo_HelmFileParameter proto.InternalMessageInfo func (m *HelmOptions) Reset() { *m = HelmOptions{} } func (*HelmOptions) ProtoMessage() {} func (*HelmOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{64} + return fileDescriptor_030104ce3b95bcac, []int{65} } func (m *HelmOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1862,7 +1892,7 @@ var xxx_messageInfo_HelmOptions proto.InternalMessageInfo func (m *HelmParameter) Reset() { *m = HelmParameter{} } func (*HelmParameter) ProtoMessage() {} func (*HelmParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{65} + return fileDescriptor_030104ce3b95bcac, []int{66} } func (m *HelmParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1890,7 +1920,7 @@ var xxx_messageInfo_HelmParameter proto.InternalMessageInfo func (m *HostInfo) Reset() { *m = HostInfo{} } func (*HostInfo) ProtoMessage() {} func (*HostInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{66} + return fileDescriptor_030104ce3b95bcac, []int{67} } func (m *HostInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1918,7 +1948,7 @@ var xxx_messageInfo_HostInfo proto.InternalMessageInfo func (m *HostResourceInfo) Reset() { *m = HostResourceInfo{} } func (*HostResourceInfo) ProtoMessage() {} func (*HostResourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{67} + return fileDescriptor_030104ce3b95bcac, []int{68} } func (m *HostResourceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1946,7 +1976,7 @@ var xxx_messageInfo_HostResourceInfo proto.InternalMessageInfo func (m *Info) Reset() { *m = Info{} } func (*Info) ProtoMessage() {} func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{68} + return fileDescriptor_030104ce3b95bcac, []int{69} } func (m *Info) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1974,7 +2004,7 @@ var xxx_messageInfo_Info proto.InternalMessageInfo func (m *InfoItem) Reset() { *m = InfoItem{} } func (*InfoItem) ProtoMessage() {} func (*InfoItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{69} + return fileDescriptor_030104ce3b95bcac, []int{70} } func (m *InfoItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2002,7 +2032,7 @@ var xxx_messageInfo_InfoItem proto.InternalMessageInfo func (m *JWTToken) Reset() { *m = JWTToken{} } func (*JWTToken) ProtoMessage() {} func (*JWTToken) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{70} + return fileDescriptor_030104ce3b95bcac, []int{71} } func (m *JWTToken) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2030,7 +2060,7 @@ var xxx_messageInfo_JWTToken proto.InternalMessageInfo func (m *JWTTokens) Reset() { *m = JWTTokens{} } func (*JWTTokens) ProtoMessage() {} func (*JWTTokens) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{71} + return fileDescriptor_030104ce3b95bcac, []int{72} } func (m *JWTTokens) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2058,7 +2088,7 @@ var xxx_messageInfo_JWTTokens proto.InternalMessageInfo func (m *JsonnetVar) Reset() { *m = JsonnetVar{} } func (*JsonnetVar) ProtoMessage() {} func (*JsonnetVar) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{72} + return fileDescriptor_030104ce3b95bcac, []int{73} } func (m *JsonnetVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2086,7 +2116,7 @@ var xxx_messageInfo_JsonnetVar proto.InternalMessageInfo func (m *KnownTypeField) Reset() { *m = KnownTypeField{} } func (*KnownTypeField) ProtoMessage() {} func (*KnownTypeField) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{73} + return fileDescriptor_030104ce3b95bcac, []int{74} } func (m *KnownTypeField) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2111,10 +2141,38 @@ func (m *KnownTypeField) XXX_DiscardUnknown() { var xxx_messageInfo_KnownTypeField proto.InternalMessageInfo +func (m *KustomizeGvk) Reset() { *m = KustomizeGvk{} } +func (*KustomizeGvk) ProtoMessage() {} +func (*KustomizeGvk) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{75} +} +func (m *KustomizeGvk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KustomizeGvk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KustomizeGvk) XXX_Merge(src proto.Message) { + xxx_messageInfo_KustomizeGvk.Merge(m, src) +} +func (m *KustomizeGvk) XXX_Size() int { + return m.Size() +} +func (m *KustomizeGvk) XXX_DiscardUnknown() { + xxx_messageInfo_KustomizeGvk.DiscardUnknown(m) +} + +var xxx_messageInfo_KustomizeGvk proto.InternalMessageInfo + func (m *KustomizeOptions) Reset() { *m = KustomizeOptions{} } func (*KustomizeOptions) ProtoMessage() {} func (*KustomizeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{74} + return fileDescriptor_030104ce3b95bcac, []int{76} } func (m *KustomizeOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2139,10 +2197,38 @@ func (m *KustomizeOptions) XXX_DiscardUnknown() { var xxx_messageInfo_KustomizeOptions proto.InternalMessageInfo +func (m *KustomizePatch) Reset() { *m = KustomizePatch{} } +func (*KustomizePatch) ProtoMessage() {} +func (*KustomizePatch) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{77} +} +func (m *KustomizePatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KustomizePatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KustomizePatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_KustomizePatch.Merge(m, src) +} +func (m *KustomizePatch) XXX_Size() int { + return m.Size() +} +func (m *KustomizePatch) XXX_DiscardUnknown() { + xxx_messageInfo_KustomizePatch.DiscardUnknown(m) +} + +var xxx_messageInfo_KustomizePatch proto.InternalMessageInfo + func (m *KustomizeReplica) Reset() { *m = KustomizeReplica{} } func (*KustomizeReplica) ProtoMessage() {} func (*KustomizeReplica) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{75} + return fileDescriptor_030104ce3b95bcac, []int{78} } func (m *KustomizeReplica) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2167,10 +2253,66 @@ func (m *KustomizeReplica) XXX_DiscardUnknown() { var xxx_messageInfo_KustomizeReplica proto.InternalMessageInfo +func (m *KustomizeResId) Reset() { *m = KustomizeResId{} } +func (*KustomizeResId) ProtoMessage() {} +func (*KustomizeResId) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{79} +} +func (m *KustomizeResId) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KustomizeResId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KustomizeResId) XXX_Merge(src proto.Message) { + xxx_messageInfo_KustomizeResId.Merge(m, src) +} +func (m *KustomizeResId) XXX_Size() int { + return m.Size() +} +func (m *KustomizeResId) XXX_DiscardUnknown() { + xxx_messageInfo_KustomizeResId.DiscardUnknown(m) +} + +var xxx_messageInfo_KustomizeResId proto.InternalMessageInfo + +func (m *KustomizeSelector) Reset() { *m = KustomizeSelector{} } +func (*KustomizeSelector) ProtoMessage() {} +func (*KustomizeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{80} +} +func (m *KustomizeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KustomizeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KustomizeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_KustomizeSelector.Merge(m, src) +} +func (m *KustomizeSelector) XXX_Size() int { + return m.Size() +} +func (m *KustomizeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_KustomizeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_KustomizeSelector proto.InternalMessageInfo + func (m *ListGenerator) Reset() { *m = ListGenerator{} } func (*ListGenerator) ProtoMessage() {} func (*ListGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{76} + return fileDescriptor_030104ce3b95bcac, []int{81} } func (m *ListGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2198,7 +2340,7 @@ var xxx_messageInfo_ListGenerator proto.InternalMessageInfo func (m *ManagedNamespaceMetadata) Reset() { *m = ManagedNamespaceMetadata{} } func (*ManagedNamespaceMetadata) ProtoMessage() {} func (*ManagedNamespaceMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{77} + return fileDescriptor_030104ce3b95bcac, []int{82} } func (m *ManagedNamespaceMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2226,7 +2368,7 @@ var xxx_messageInfo_ManagedNamespaceMetadata proto.InternalMessageInfo func (m *MatrixGenerator) Reset() { *m = MatrixGenerator{} } func (*MatrixGenerator) ProtoMessage() {} func (*MatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{78} + return fileDescriptor_030104ce3b95bcac, []int{83} } func (m *MatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2254,7 +2396,7 @@ var xxx_messageInfo_MatrixGenerator proto.InternalMessageInfo func (m *MergeGenerator) Reset() { *m = MergeGenerator{} } func (*MergeGenerator) ProtoMessage() {} func (*MergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{79} + return fileDescriptor_030104ce3b95bcac, []int{84} } func (m *MergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2282,7 +2424,7 @@ var xxx_messageInfo_MergeGenerator proto.InternalMessageInfo func (m *NestedMatrixGenerator) Reset() { *m = NestedMatrixGenerator{} } func (*NestedMatrixGenerator) ProtoMessage() {} func (*NestedMatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{80} + return fileDescriptor_030104ce3b95bcac, []int{85} } func (m *NestedMatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2310,7 +2452,7 @@ var xxx_messageInfo_NestedMatrixGenerator proto.InternalMessageInfo func (m *NestedMergeGenerator) Reset() { *m = NestedMergeGenerator{} } func (*NestedMergeGenerator) ProtoMessage() {} func (*NestedMergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{81} + return fileDescriptor_030104ce3b95bcac, []int{86} } func (m *NestedMergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2338,7 +2480,7 @@ var xxx_messageInfo_NestedMergeGenerator proto.InternalMessageInfo func (m *Operation) Reset() { *m = Operation{} } func (*Operation) ProtoMessage() {} func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{82} + return fileDescriptor_030104ce3b95bcac, []int{87} } func (m *Operation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2366,7 +2508,7 @@ var xxx_messageInfo_Operation proto.InternalMessageInfo func (m *OperationInitiator) Reset() { *m = OperationInitiator{} } func (*OperationInitiator) ProtoMessage() {} func (*OperationInitiator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{83} + return fileDescriptor_030104ce3b95bcac, []int{88} } func (m *OperationInitiator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2394,7 +2536,7 @@ var xxx_messageInfo_OperationInitiator proto.InternalMessageInfo func (m *OperationState) Reset() { *m = OperationState{} } func (*OperationState) ProtoMessage() {} func (*OperationState) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{84} + return fileDescriptor_030104ce3b95bcac, []int{89} } func (m *OperationState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2422,7 +2564,7 @@ var xxx_messageInfo_OperationState proto.InternalMessageInfo func (m *OptionalArray) Reset() { *m = OptionalArray{} } func (*OptionalArray) ProtoMessage() {} func (*OptionalArray) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{85} + return fileDescriptor_030104ce3b95bcac, []int{90} } func (m *OptionalArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2450,7 +2592,7 @@ var xxx_messageInfo_OptionalArray proto.InternalMessageInfo func (m *OptionalMap) Reset() { *m = OptionalMap{} } func (*OptionalMap) ProtoMessage() {} func (*OptionalMap) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{86} + return fileDescriptor_030104ce3b95bcac, []int{91} } func (m *OptionalMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2478,7 +2620,7 @@ var xxx_messageInfo_OptionalMap proto.InternalMessageInfo func (m *OrphanedResourceKey) Reset() { *m = OrphanedResourceKey{} } func (*OrphanedResourceKey) ProtoMessage() {} func (*OrphanedResourceKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{87} + return fileDescriptor_030104ce3b95bcac, []int{92} } func (m *OrphanedResourceKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2506,7 +2648,7 @@ var xxx_messageInfo_OrphanedResourceKey proto.InternalMessageInfo func (m *OrphanedResourcesMonitorSettings) Reset() { *m = OrphanedResourcesMonitorSettings{} } func (*OrphanedResourcesMonitorSettings) ProtoMessage() {} func (*OrphanedResourcesMonitorSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{88} + return fileDescriptor_030104ce3b95bcac, []int{93} } func (m *OrphanedResourcesMonitorSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2534,7 +2676,7 @@ var xxx_messageInfo_OrphanedResourcesMonitorSettings proto.InternalMessageInfo func (m *OverrideIgnoreDiff) Reset() { *m = OverrideIgnoreDiff{} } func (*OverrideIgnoreDiff) ProtoMessage() {} func (*OverrideIgnoreDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{89} + return fileDescriptor_030104ce3b95bcac, []int{94} } func (m *OverrideIgnoreDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2562,7 +2704,7 @@ var xxx_messageInfo_OverrideIgnoreDiff proto.InternalMessageInfo func (m *PluginConfigMapRef) Reset() { *m = PluginConfigMapRef{} } func (*PluginConfigMapRef) ProtoMessage() {} func (*PluginConfigMapRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{90} + return fileDescriptor_030104ce3b95bcac, []int{95} } func (m *PluginConfigMapRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2590,7 +2732,7 @@ var xxx_messageInfo_PluginConfigMapRef proto.InternalMessageInfo func (m *PluginGenerator) Reset() { *m = PluginGenerator{} } func (*PluginGenerator) ProtoMessage() {} func (*PluginGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{91} + return fileDescriptor_030104ce3b95bcac, []int{96} } func (m *PluginGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2618,7 +2760,7 @@ var xxx_messageInfo_PluginGenerator proto.InternalMessageInfo func (m *PluginInput) Reset() { *m = PluginInput{} } func (*PluginInput) ProtoMessage() {} func (*PluginInput) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{92} + return fileDescriptor_030104ce3b95bcac, []int{97} } func (m *PluginInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2646,7 +2788,7 @@ var xxx_messageInfo_PluginInput proto.InternalMessageInfo func (m *ProjectRole) Reset() { *m = ProjectRole{} } func (*ProjectRole) ProtoMessage() {} func (*ProjectRole) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{93} + return fileDescriptor_030104ce3b95bcac, []int{98} } func (m *ProjectRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2674,7 +2816,7 @@ var xxx_messageInfo_ProjectRole proto.InternalMessageInfo func (m *PullRequestGenerator) Reset() { *m = PullRequestGenerator{} } func (*PullRequestGenerator) ProtoMessage() {} func (*PullRequestGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{94} + return fileDescriptor_030104ce3b95bcac, []int{99} } func (m *PullRequestGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2702,7 +2844,7 @@ var xxx_messageInfo_PullRequestGenerator proto.InternalMessageInfo func (m *PullRequestGeneratorAzureDevOps) Reset() { *m = PullRequestGeneratorAzureDevOps{} } func (*PullRequestGeneratorAzureDevOps) ProtoMessage() {} func (*PullRequestGeneratorAzureDevOps) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{95} + return fileDescriptor_030104ce3b95bcac, []int{100} } func (m *PullRequestGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2730,7 +2872,7 @@ var xxx_messageInfo_PullRequestGeneratorAzureDevOps proto.InternalMessageInfo func (m *PullRequestGeneratorBitbucket) Reset() { *m = PullRequestGeneratorBitbucket{} } func (*PullRequestGeneratorBitbucket) ProtoMessage() {} func (*PullRequestGeneratorBitbucket) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{96} + return fileDescriptor_030104ce3b95bcac, []int{101} } func (m *PullRequestGeneratorBitbucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2758,7 +2900,7 @@ var xxx_messageInfo_PullRequestGeneratorBitbucket proto.InternalMessageInfo func (m *PullRequestGeneratorBitbucketServer) Reset() { *m = PullRequestGeneratorBitbucketServer{} } func (*PullRequestGeneratorBitbucketServer) ProtoMessage() {} func (*PullRequestGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{97} + return fileDescriptor_030104ce3b95bcac, []int{102} } func (m *PullRequestGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2786,7 +2928,7 @@ var xxx_messageInfo_PullRequestGeneratorBitbucketServer proto.InternalMessageInf func (m *PullRequestGeneratorFilter) Reset() { *m = PullRequestGeneratorFilter{} } func (*PullRequestGeneratorFilter) ProtoMessage() {} func (*PullRequestGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{98} + return fileDescriptor_030104ce3b95bcac, []int{103} } func (m *PullRequestGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2814,7 +2956,7 @@ var xxx_messageInfo_PullRequestGeneratorFilter proto.InternalMessageInfo func (m *PullRequestGeneratorGitLab) Reset() { *m = PullRequestGeneratorGitLab{} } func (*PullRequestGeneratorGitLab) ProtoMessage() {} func (*PullRequestGeneratorGitLab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{99} + return fileDescriptor_030104ce3b95bcac, []int{104} } func (m *PullRequestGeneratorGitLab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2842,7 +2984,7 @@ var xxx_messageInfo_PullRequestGeneratorGitLab proto.InternalMessageInfo func (m *PullRequestGeneratorGitea) Reset() { *m = PullRequestGeneratorGitea{} } func (*PullRequestGeneratorGitea) ProtoMessage() {} func (*PullRequestGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{100} + return fileDescriptor_030104ce3b95bcac, []int{105} } func (m *PullRequestGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2870,7 +3012,7 @@ var xxx_messageInfo_PullRequestGeneratorGitea proto.InternalMessageInfo func (m *PullRequestGeneratorGithub) Reset() { *m = PullRequestGeneratorGithub{} } func (*PullRequestGeneratorGithub) ProtoMessage() {} func (*PullRequestGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{101} + return fileDescriptor_030104ce3b95bcac, []int{106} } func (m *PullRequestGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2898,7 +3040,7 @@ var xxx_messageInfo_PullRequestGeneratorGithub proto.InternalMessageInfo func (m *RefTarget) Reset() { *m = RefTarget{} } func (*RefTarget) ProtoMessage() {} func (*RefTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{102} + return fileDescriptor_030104ce3b95bcac, []int{107} } func (m *RefTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2926,7 +3068,7 @@ var xxx_messageInfo_RefTarget proto.InternalMessageInfo func (m *RepoCreds) Reset() { *m = RepoCreds{} } func (*RepoCreds) ProtoMessage() {} func (*RepoCreds) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{103} + return fileDescriptor_030104ce3b95bcac, []int{108} } func (m *RepoCreds) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2954,7 +3096,7 @@ var xxx_messageInfo_RepoCreds proto.InternalMessageInfo func (m *RepoCredsList) Reset() { *m = RepoCredsList{} } func (*RepoCredsList) ProtoMessage() {} func (*RepoCredsList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{104} + return fileDescriptor_030104ce3b95bcac, []int{109} } func (m *RepoCredsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2982,7 +3124,7 @@ var xxx_messageInfo_RepoCredsList proto.InternalMessageInfo func (m *Repository) Reset() { *m = Repository{} } func (*Repository) ProtoMessage() {} func (*Repository) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{105} + return fileDescriptor_030104ce3b95bcac, []int{110} } func (m *Repository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3010,7 +3152,7 @@ var xxx_messageInfo_Repository proto.InternalMessageInfo func (m *RepositoryCertificate) Reset() { *m = RepositoryCertificate{} } func (*RepositoryCertificate) ProtoMessage() {} func (*RepositoryCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{106} + return fileDescriptor_030104ce3b95bcac, []int{111} } func (m *RepositoryCertificate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3038,7 +3180,7 @@ var xxx_messageInfo_RepositoryCertificate proto.InternalMessageInfo func (m *RepositoryCertificateList) Reset() { *m = RepositoryCertificateList{} } func (*RepositoryCertificateList) ProtoMessage() {} func (*RepositoryCertificateList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{107} + return fileDescriptor_030104ce3b95bcac, []int{112} } func (m *RepositoryCertificateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3066,7 +3208,7 @@ var xxx_messageInfo_RepositoryCertificateList proto.InternalMessageInfo func (m *RepositoryList) Reset() { *m = RepositoryList{} } func (*RepositoryList) ProtoMessage() {} func (*RepositoryList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{108} + return fileDescriptor_030104ce3b95bcac, []int{113} } func (m *RepositoryList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3094,7 +3236,7 @@ var xxx_messageInfo_RepositoryList proto.InternalMessageInfo func (m *ResourceAction) Reset() { *m = ResourceAction{} } func (*ResourceAction) ProtoMessage() {} func (*ResourceAction) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{109} + return fileDescriptor_030104ce3b95bcac, []int{114} } func (m *ResourceAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3122,7 +3264,7 @@ var xxx_messageInfo_ResourceAction proto.InternalMessageInfo func (m *ResourceActionDefinition) Reset() { *m = ResourceActionDefinition{} } func (*ResourceActionDefinition) ProtoMessage() {} func (*ResourceActionDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{110} + return fileDescriptor_030104ce3b95bcac, []int{115} } func (m *ResourceActionDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3150,7 +3292,7 @@ var xxx_messageInfo_ResourceActionDefinition proto.InternalMessageInfo func (m *ResourceActionParam) Reset() { *m = ResourceActionParam{} } func (*ResourceActionParam) ProtoMessage() {} func (*ResourceActionParam) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{111} + return fileDescriptor_030104ce3b95bcac, []int{116} } func (m *ResourceActionParam) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3178,7 +3320,7 @@ var xxx_messageInfo_ResourceActionParam proto.InternalMessageInfo func (m *ResourceActions) Reset() { *m = ResourceActions{} } func (*ResourceActions) ProtoMessage() {} func (*ResourceActions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{112} + return fileDescriptor_030104ce3b95bcac, []int{117} } func (m *ResourceActions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3206,7 +3348,7 @@ var xxx_messageInfo_ResourceActions proto.InternalMessageInfo func (m *ResourceDiff) Reset() { *m = ResourceDiff{} } func (*ResourceDiff) ProtoMessage() {} func (*ResourceDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{113} + return fileDescriptor_030104ce3b95bcac, []int{118} } func (m *ResourceDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3234,7 +3376,7 @@ var xxx_messageInfo_ResourceDiff proto.InternalMessageInfo func (m *ResourceIgnoreDifferences) Reset() { *m = ResourceIgnoreDifferences{} } func (*ResourceIgnoreDifferences) ProtoMessage() {} func (*ResourceIgnoreDifferences) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{114} + return fileDescriptor_030104ce3b95bcac, []int{119} } func (m *ResourceIgnoreDifferences) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3262,7 +3404,7 @@ var xxx_messageInfo_ResourceIgnoreDifferences proto.InternalMessageInfo func (m *ResourceNetworkingInfo) Reset() { *m = ResourceNetworkingInfo{} } func (*ResourceNetworkingInfo) ProtoMessage() {} func (*ResourceNetworkingInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{115} + return fileDescriptor_030104ce3b95bcac, []int{120} } func (m *ResourceNetworkingInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3290,7 +3432,7 @@ var xxx_messageInfo_ResourceNetworkingInfo proto.InternalMessageInfo func (m *ResourceNode) Reset() { *m = ResourceNode{} } func (*ResourceNode) ProtoMessage() {} func (*ResourceNode) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{116} + return fileDescriptor_030104ce3b95bcac, []int{121} } func (m *ResourceNode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3318,7 +3460,7 @@ var xxx_messageInfo_ResourceNode proto.InternalMessageInfo func (m *ResourceOverride) Reset() { *m = ResourceOverride{} } func (*ResourceOverride) ProtoMessage() {} func (*ResourceOverride) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{117} + return fileDescriptor_030104ce3b95bcac, []int{122} } func (m *ResourceOverride) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3346,7 +3488,7 @@ var xxx_messageInfo_ResourceOverride proto.InternalMessageInfo func (m *ResourceRef) Reset() { *m = ResourceRef{} } func (*ResourceRef) ProtoMessage() {} func (*ResourceRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{118} + return fileDescriptor_030104ce3b95bcac, []int{123} } func (m *ResourceRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3374,7 +3516,7 @@ var xxx_messageInfo_ResourceRef proto.InternalMessageInfo func (m *ResourceResult) Reset() { *m = ResourceResult{} } func (*ResourceResult) ProtoMessage() {} func (*ResourceResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{119} + return fileDescriptor_030104ce3b95bcac, []int{124} } func (m *ResourceResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3402,7 +3544,7 @@ var xxx_messageInfo_ResourceResult proto.InternalMessageInfo func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } func (*ResourceStatus) ProtoMessage() {} func (*ResourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{120} + return fileDescriptor_030104ce3b95bcac, []int{125} } func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3430,7 +3572,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{121} + return fileDescriptor_030104ce3b95bcac, []int{126} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3458,7 +3600,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *RevisionHistory) Reset() { *m = RevisionHistory{} } func (*RevisionHistory) ProtoMessage() {} func (*RevisionHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{122} + return fileDescriptor_030104ce3b95bcac, []int{127} } func (m *RevisionHistory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3486,7 +3628,7 @@ var xxx_messageInfo_RevisionHistory proto.InternalMessageInfo func (m *RevisionMetadata) Reset() { *m = RevisionMetadata{} } func (*RevisionMetadata) ProtoMessage() {} func (*RevisionMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{123} + return fileDescriptor_030104ce3b95bcac, []int{128} } func (m *RevisionMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3514,7 +3656,7 @@ var xxx_messageInfo_RevisionMetadata proto.InternalMessageInfo func (m *SCMProviderGenerator) Reset() { *m = SCMProviderGenerator{} } func (*SCMProviderGenerator) ProtoMessage() {} func (*SCMProviderGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{124} + return fileDescriptor_030104ce3b95bcac, []int{129} } func (m *SCMProviderGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3542,7 +3684,7 @@ var xxx_messageInfo_SCMProviderGenerator proto.InternalMessageInfo func (m *SCMProviderGeneratorAWSCodeCommit) Reset() { *m = SCMProviderGeneratorAWSCodeCommit{} } func (*SCMProviderGeneratorAWSCodeCommit) ProtoMessage() {} func (*SCMProviderGeneratorAWSCodeCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{125} + return fileDescriptor_030104ce3b95bcac, []int{130} } func (m *SCMProviderGeneratorAWSCodeCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3570,7 +3712,7 @@ var xxx_messageInfo_SCMProviderGeneratorAWSCodeCommit proto.InternalMessageInfo func (m *SCMProviderGeneratorAzureDevOps) Reset() { *m = SCMProviderGeneratorAzureDevOps{} } func (*SCMProviderGeneratorAzureDevOps) ProtoMessage() {} func (*SCMProviderGeneratorAzureDevOps) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{126} + return fileDescriptor_030104ce3b95bcac, []int{131} } func (m *SCMProviderGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3598,7 +3740,7 @@ var xxx_messageInfo_SCMProviderGeneratorAzureDevOps proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucket) Reset() { *m = SCMProviderGeneratorBitbucket{} } func (*SCMProviderGeneratorBitbucket) ProtoMessage() {} func (*SCMProviderGeneratorBitbucket) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{127} + return fileDescriptor_030104ce3b95bcac, []int{132} } func (m *SCMProviderGeneratorBitbucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3626,7 +3768,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucket proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucketServer) Reset() { *m = SCMProviderGeneratorBitbucketServer{} } func (*SCMProviderGeneratorBitbucketServer) ProtoMessage() {} func (*SCMProviderGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{128} + return fileDescriptor_030104ce3b95bcac, []int{133} } func (m *SCMProviderGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3654,7 +3796,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucketServer proto.InternalMessageInf func (m *SCMProviderGeneratorFilter) Reset() { *m = SCMProviderGeneratorFilter{} } func (*SCMProviderGeneratorFilter) ProtoMessage() {} func (*SCMProviderGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{129} + return fileDescriptor_030104ce3b95bcac, []int{134} } func (m *SCMProviderGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3682,7 +3824,7 @@ var xxx_messageInfo_SCMProviderGeneratorFilter proto.InternalMessageInfo func (m *SCMProviderGeneratorGitea) Reset() { *m = SCMProviderGeneratorGitea{} } func (*SCMProviderGeneratorGitea) ProtoMessage() {} func (*SCMProviderGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{130} + return fileDescriptor_030104ce3b95bcac, []int{135} } func (m *SCMProviderGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3710,7 +3852,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitea proto.InternalMessageInfo func (m *SCMProviderGeneratorGithub) Reset() { *m = SCMProviderGeneratorGithub{} } func (*SCMProviderGeneratorGithub) ProtoMessage() {} func (*SCMProviderGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{131} + return fileDescriptor_030104ce3b95bcac, []int{136} } func (m *SCMProviderGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3738,7 +3880,7 @@ var xxx_messageInfo_SCMProviderGeneratorGithub proto.InternalMessageInfo func (m *SCMProviderGeneratorGitlab) Reset() { *m = SCMProviderGeneratorGitlab{} } func (*SCMProviderGeneratorGitlab) ProtoMessage() {} func (*SCMProviderGeneratorGitlab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{132} + return fileDescriptor_030104ce3b95bcac, []int{137} } func (m *SCMProviderGeneratorGitlab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3766,7 +3908,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitlab proto.InternalMessageInfo func (m *SecretRef) Reset() { *m = SecretRef{} } func (*SecretRef) ProtoMessage() {} func (*SecretRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{133} + return fileDescriptor_030104ce3b95bcac, []int{138} } func (m *SecretRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3794,7 +3936,7 @@ var xxx_messageInfo_SecretRef proto.InternalMessageInfo func (m *SignatureKey) Reset() { *m = SignatureKey{} } func (*SignatureKey) ProtoMessage() {} func (*SignatureKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{134} + return fileDescriptor_030104ce3b95bcac, []int{139} } func (m *SignatureKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3822,7 +3964,7 @@ var xxx_messageInfo_SignatureKey proto.InternalMessageInfo func (m *SyncOperation) Reset() { *m = SyncOperation{} } func (*SyncOperation) ProtoMessage() {} func (*SyncOperation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{135} + return fileDescriptor_030104ce3b95bcac, []int{140} } func (m *SyncOperation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3850,7 +3992,7 @@ var xxx_messageInfo_SyncOperation proto.InternalMessageInfo func (m *SyncOperationResource) Reset() { *m = SyncOperationResource{} } func (*SyncOperationResource) ProtoMessage() {} func (*SyncOperationResource) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{136} + return fileDescriptor_030104ce3b95bcac, []int{141} } func (m *SyncOperationResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3878,7 +4020,7 @@ var xxx_messageInfo_SyncOperationResource proto.InternalMessageInfo func (m *SyncOperationResult) Reset() { *m = SyncOperationResult{} } func (*SyncOperationResult) ProtoMessage() {} func (*SyncOperationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{137} + return fileDescriptor_030104ce3b95bcac, []int{142} } func (m *SyncOperationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3906,7 +4048,7 @@ var xxx_messageInfo_SyncOperationResult proto.InternalMessageInfo func (m *SyncPolicy) Reset() { *m = SyncPolicy{} } func (*SyncPolicy) ProtoMessage() {} func (*SyncPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{138} + return fileDescriptor_030104ce3b95bcac, []int{143} } func (m *SyncPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3934,7 +4076,7 @@ var xxx_messageInfo_SyncPolicy proto.InternalMessageInfo func (m *SyncPolicyAutomated) Reset() { *m = SyncPolicyAutomated{} } func (*SyncPolicyAutomated) ProtoMessage() {} func (*SyncPolicyAutomated) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{139} + return fileDescriptor_030104ce3b95bcac, []int{144} } func (m *SyncPolicyAutomated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3962,7 +4104,7 @@ var xxx_messageInfo_SyncPolicyAutomated proto.InternalMessageInfo func (m *SyncStatus) Reset() { *m = SyncStatus{} } func (*SyncStatus) ProtoMessage() {} func (*SyncStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{140} + return fileDescriptor_030104ce3b95bcac, []int{145} } func (m *SyncStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3990,7 +4132,7 @@ var xxx_messageInfo_SyncStatus proto.InternalMessageInfo func (m *SyncStrategy) Reset() { *m = SyncStrategy{} } func (*SyncStrategy) ProtoMessage() {} func (*SyncStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{141} + return fileDescriptor_030104ce3b95bcac, []int{146} } func (m *SyncStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4018,7 +4160,7 @@ var xxx_messageInfo_SyncStrategy proto.InternalMessageInfo func (m *SyncStrategyApply) Reset() { *m = SyncStrategyApply{} } func (*SyncStrategyApply) ProtoMessage() {} func (*SyncStrategyApply) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{142} + return fileDescriptor_030104ce3b95bcac, []int{147} } func (m *SyncStrategyApply) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4046,7 +4188,7 @@ var xxx_messageInfo_SyncStrategyApply proto.InternalMessageInfo func (m *SyncStrategyHook) Reset() { *m = SyncStrategyHook{} } func (*SyncStrategyHook) ProtoMessage() {} func (*SyncStrategyHook) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{143} + return fileDescriptor_030104ce3b95bcac, []int{148} } func (m *SyncStrategyHook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4074,7 +4216,7 @@ var xxx_messageInfo_SyncStrategyHook proto.InternalMessageInfo func (m *SyncWindow) Reset() { *m = SyncWindow{} } func (*SyncWindow) ProtoMessage() {} func (*SyncWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{144} + return fileDescriptor_030104ce3b95bcac, []int{149} } func (m *SyncWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4102,7 +4244,7 @@ var xxx_messageInfo_SyncWindow proto.InternalMessageInfo func (m *TLSClientConfig) Reset() { *m = TLSClientConfig{} } func (*TLSClientConfig) ProtoMessage() {} func (*TLSClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{145} + return fileDescriptor_030104ce3b95bcac, []int{150} } func (m *TLSClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4130,7 +4272,7 @@ var xxx_messageInfo_TLSClientConfig proto.InternalMessageInfo func (m *TagFilter) Reset() { *m = TagFilter{} } func (*TagFilter) ProtoMessage() {} func (*TagFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{146} + return fileDescriptor_030104ce3b95bcac, []int{151} } func (m *TagFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4174,6 +4316,7 @@ func init() { proto.RegisterType((*ApplicationSetGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetGenerator") proto.RegisterType((*ApplicationSetList)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetList") proto.RegisterType((*ApplicationSetNestedGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetNestedGenerator") + proto.RegisterType((*ApplicationSetResourceIgnoreDifferences)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetResourceIgnoreDifferences") proto.RegisterType((*ApplicationSetRolloutStep)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetRolloutStep") proto.RegisterType((*ApplicationSetRolloutStrategy)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetRolloutStrategy") proto.RegisterType((*ApplicationSetSpec)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ApplicationSetSpec") @@ -4241,8 +4384,13 @@ func init() { proto.RegisterType((*JWTTokens)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.JWTTokens") proto.RegisterType((*JsonnetVar)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.JsonnetVar") proto.RegisterType((*KnownTypeField)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KnownTypeField") + proto.RegisterType((*KustomizeGvk)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizeGvk") proto.RegisterType((*KustomizeOptions)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizeOptions") + proto.RegisterType((*KustomizePatch)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizePatch") + proto.RegisterMapType((map[string]bool)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizePatch.OptionsEntry") proto.RegisterType((*KustomizeReplica)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizeReplica") + proto.RegisterType((*KustomizeResId)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizeResId") + proto.RegisterType((*KustomizeSelector)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.KustomizeSelector") proto.RegisterType((*ListGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ListGenerator") proto.RegisterType((*ManagedNamespaceMetadata)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ManagedNamespaceMetadata") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ManagedNamespaceMetadata.AnnotationsEntry") @@ -4329,672 +4477,693 @@ func init() { } var fileDescriptor_030104ce3b95bcac = []byte{ - // 10635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x25, 0xd9, - 0x75, 0x90, 0xfb, 0x7d, 0x48, 0xef, 0x1d, 0x69, 0x3e, 0x74, 0x67, 0x66, 0x57, 0x3b, 0xde, 0x5d, - 0x4d, 0x7a, 0xc9, 0x7a, 0x8d, 0x77, 0xa5, 0xec, 0x78, 0xd7, 0x2c, 0xd9, 0xc4, 0x8e, 0x9e, 0x34, - 0xa3, 0xd1, 0x8c, 0x34, 0xd2, 0x5e, 0x69, 0x66, 0xec, 0x75, 0xd6, 0xeb, 0x56, 0xbf, 0xab, 0xa7, - 0x1e, 0xf5, 0xeb, 0xee, 0xed, 0xee, 0xa7, 0x91, 0x36, 0xb6, 0x63, 0xc7, 0xf9, 0x30, 0xf8, 0x13, - 0x1b, 0x2a, 0x0e, 0xe0, 0xe0, 0xc4, 0x81, 0x22, 0x05, 0x5b, 0x04, 0xf8, 0x41, 0x20, 0x50, 0xa9, - 0x24, 0xfc, 0x30, 0x65, 0x28, 0x52, 0x54, 0x2a, 0x0e, 0x90, 0x08, 0x5b, 0x14, 0x05, 0x45, 0x15, - 0xa9, 0x0a, 0xf0, 0x03, 0x06, 0x0a, 0xa8, 0xfb, 0x7d, 0xbb, 0xdf, 0x7b, 0xa3, 0x27, 0xa9, 0x35, - 0x33, 0x36, 0xfb, 0xef, 0xbd, 0x7b, 0x4e, 0x9f, 0x73, 0xfa, 0xf6, 0xbd, 0xe7, 0x9e, 0x7b, 0xee, - 0x39, 0xe7, 0xc2, 0x42, 0xcb, 0x4b, 0x37, 0x3a, 0x6b, 0x93, 0x6e, 0xd8, 0x9e, 0x72, 0xe2, 0x56, - 0x18, 0xc5, 0xe1, 0x6d, 0xf6, 0xe3, 0x39, 0xb7, 0x39, 0xb5, 0x75, 0x71, 0x2a, 0xda, 0x6c, 0x4d, - 0x39, 0x91, 0x97, 0x4c, 0x39, 0x51, 0xe4, 0x7b, 0xae, 0x93, 0x7a, 0x61, 0x30, 0xb5, 0xf5, 0xbc, - 0xe3, 0x47, 0x1b, 0xce, 0xf3, 0x53, 0x2d, 0x12, 0x90, 0xd8, 0x49, 0x49, 0x73, 0x32, 0x8a, 0xc3, - 0x34, 0x44, 0x3f, 0xa2, 0xa9, 0x4d, 0x4a, 0x6a, 0xec, 0xc7, 0xeb, 0x6e, 0x73, 0x72, 0xeb, 0xe2, - 0x64, 0xb4, 0xd9, 0x9a, 0xa4, 0xd4, 0x26, 0x0d, 0x6a, 0x93, 0x92, 0xda, 0xf9, 0xe7, 0x0c, 0x59, - 0x5a, 0x61, 0x2b, 0x9c, 0x62, 0x44, 0xd7, 0x3a, 0xeb, 0xec, 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x66, - 0xe7, 0xed, 0xcd, 0x97, 0x92, 0x49, 0x2f, 0xa4, 0xe2, 0x4d, 0xb9, 0x61, 0x4c, 0xa6, 0xb6, 0xba, - 0x04, 0x3a, 0x7f, 0x45, 0xe3, 0x90, 0xed, 0x94, 0x04, 0x89, 0x17, 0x06, 0xc9, 0x73, 0x54, 0x04, - 0x12, 0x6f, 0x91, 0xd8, 0x7c, 0x3d, 0x03, 0xa1, 0x17, 0xa5, 0x17, 0x34, 0xa5, 0xb6, 0xe3, 0x6e, - 0x78, 0x01, 0x89, 0x77, 0xf4, 0xe3, 0x6d, 0x92, 0x3a, 0xbd, 0x9e, 0x9a, 0xea, 0xf7, 0x54, 0xdc, - 0x09, 0x52, 0xaf, 0x4d, 0xba, 0x1e, 0x78, 0xdf, 0x7e, 0x0f, 0x24, 0xee, 0x06, 0x69, 0x3b, 0x5d, - 0xcf, 0xbd, 0xb7, 0xdf, 0x73, 0x9d, 0xd4, 0xf3, 0xa7, 0xbc, 0x20, 0x4d, 0xd2, 0x38, 0xff, 0x90, - 0xfd, 0x06, 0x9c, 0x98, 0xbe, 0xb5, 0x32, 0xdd, 0x49, 0x37, 0x66, 0xc2, 0x60, 0xdd, 0x6b, 0xa1, - 0x17, 0x61, 0xc4, 0xf5, 0x3b, 0x49, 0x4a, 0xe2, 0xeb, 0x4e, 0x9b, 0x8c, 0x5b, 0x17, 0xac, 0x67, - 0xea, 0x8d, 0x33, 0xdf, 0xdc, 0x9d, 0x78, 0xc7, 0xde, 0xee, 0xc4, 0xc8, 0x8c, 0x06, 0x61, 0x13, - 0x0f, 0xbd, 0x1b, 0x86, 0xe3, 0xd0, 0x27, 0xd3, 0xf8, 0xfa, 0x78, 0x89, 0x3d, 0x72, 0x4a, 0x3c, - 0x32, 0x8c, 0x79, 0x33, 0x96, 0x70, 0xfb, 0xf7, 0x4b, 0x00, 0xd3, 0x51, 0xb4, 0x1c, 0x87, 0xb7, - 0x89, 0x9b, 0xa2, 0x8f, 0x42, 0x8d, 0x76, 0x5d, 0xd3, 0x49, 0x1d, 0xc6, 0x6d, 0xe4, 0xe2, 0x0f, - 0x4d, 0xf2, 0x37, 0x99, 0x34, 0xdf, 0x44, 0x0f, 0x1c, 0x8a, 0x3d, 0xb9, 0xf5, 0xfc, 0xe4, 0xd2, - 0x1a, 0x7d, 0x7e, 0x91, 0xa4, 0x4e, 0x03, 0x09, 0x66, 0xa0, 0xdb, 0xb0, 0xa2, 0x8a, 0x02, 0xa8, - 0x24, 0x11, 0x71, 0x99, 0x60, 0x23, 0x17, 0x17, 0x26, 0x8f, 0x32, 0x42, 0x27, 0xb5, 0xe4, 0x2b, - 0x11, 0x71, 0x1b, 0xa3, 0x82, 0x73, 0x85, 0xfe, 0xc3, 0x8c, 0x0f, 0xda, 0x82, 0xa1, 0x24, 0x75, - 0xd2, 0x4e, 0x32, 0x5e, 0x66, 0x1c, 0xaf, 0x17, 0xc6, 0x91, 0x51, 0x6d, 0x9c, 0x14, 0x3c, 0x87, - 0xf8, 0x7f, 0x2c, 0xb8, 0xd9, 0x7f, 0x64, 0xc1, 0x49, 0x8d, 0xbc, 0xe0, 0x25, 0x29, 0xfa, 0xf1, - 0xae, 0xce, 0x9d, 0x1c, 0xac, 0x73, 0xe9, 0xd3, 0xac, 0x6b, 0x4f, 0x0b, 0x66, 0x35, 0xd9, 0x62, - 0x74, 0x6c, 0x1b, 0xaa, 0x5e, 0x4a, 0xda, 0xc9, 0x78, 0xe9, 0x42, 0xf9, 0x99, 0x91, 0x8b, 0x57, - 0x8a, 0x7a, 0xcf, 0xc6, 0x09, 0xc1, 0xb4, 0x3a, 0x4f, 0xc9, 0x63, 0xce, 0xc5, 0xfe, 0xd5, 0x51, - 0xf3, 0xfd, 0x68, 0x87, 0xa3, 0xe7, 0x61, 0x24, 0x09, 0x3b, 0xb1, 0x4b, 0x30, 0x89, 0xc2, 0x64, - 0xdc, 0xba, 0x50, 0xa6, 0x43, 0x8f, 0x8e, 0xd4, 0x15, 0xdd, 0x8c, 0x4d, 0x1c, 0xf4, 0x05, 0x0b, - 0x46, 0x9b, 0x24, 0x49, 0xbd, 0x80, 0xf1, 0x97, 0xc2, 0xaf, 0x1e, 0x59, 0x78, 0xd9, 0x38, 0xab, - 0x89, 0x37, 0xce, 0x8a, 0x17, 0x19, 0x35, 0x1a, 0x13, 0x9c, 0xe1, 0x4f, 0x67, 0x5c, 0x93, 0x24, - 0x6e, 0xec, 0x45, 0xf4, 0x3f, 0x1b, 0x33, 0xc6, 0x8c, 0x9b, 0xd5, 0x20, 0x6c, 0xe2, 0xa1, 0x00, - 0xaa, 0x74, 0x46, 0x25, 0xe3, 0x15, 0x26, 0xff, 0xfc, 0xd1, 0xe4, 0x17, 0x9d, 0x4a, 0x27, 0xab, - 0xee, 0x7d, 0xfa, 0x2f, 0xc1, 0x9c, 0x0d, 0xfa, 0xbc, 0x05, 0xe3, 0x62, 0xc6, 0x63, 0xc2, 0x3b, - 0xf4, 0xd6, 0x86, 0x97, 0x12, 0xdf, 0x4b, 0xd2, 0xf1, 0x2a, 0x93, 0x61, 0x6a, 0xb0, 0xb1, 0x35, - 0x17, 0x87, 0x9d, 0xe8, 0x9a, 0x17, 0x34, 0x1b, 0x17, 0x04, 0xa7, 0xf1, 0x99, 0x3e, 0x84, 0x71, - 0x5f, 0x96, 0xe8, 0x2b, 0x16, 0x9c, 0x0f, 0x9c, 0x36, 0x49, 0x22, 0x87, 0x7e, 0x5a, 0x0e, 0x6e, - 0xf8, 0x8e, 0xbb, 0xc9, 0x24, 0x1a, 0x3a, 0x9c, 0x44, 0xb6, 0x90, 0xe8, 0xfc, 0xf5, 0xbe, 0xa4, - 0xf1, 0x3d, 0xd8, 0xa2, 0x6f, 0x58, 0x30, 0x16, 0xc6, 0xd1, 0x86, 0x13, 0x90, 0xa6, 0x84, 0x26, - 0xe3, 0xc3, 0x6c, 0xea, 0x7d, 0xe4, 0x68, 0x9f, 0x68, 0x29, 0x4f, 0x76, 0x31, 0x0c, 0xbc, 0x34, - 0x8c, 0x57, 0x48, 0x9a, 0x7a, 0x41, 0x2b, 0x69, 0x9c, 0xdb, 0xdb, 0x9d, 0x18, 0xeb, 0xc2, 0xc2, - 0xdd, 0xf2, 0xa0, 0x9f, 0x80, 0x91, 0x64, 0x27, 0x70, 0x6f, 0x79, 0x41, 0x33, 0xbc, 0x93, 0x8c, - 0xd7, 0x8a, 0x98, 0xbe, 0x2b, 0x8a, 0xa0, 0x98, 0x80, 0x9a, 0x01, 0x36, 0xb9, 0xf5, 0xfe, 0x70, - 0x7a, 0x28, 0xd5, 0x8b, 0xfe, 0x70, 0x7a, 0x30, 0xdd, 0x83, 0x2d, 0xfa, 0x39, 0x0b, 0x4e, 0x24, - 0x5e, 0x2b, 0x70, 0xd2, 0x4e, 0x4c, 0xae, 0x91, 0x9d, 0x64, 0x1c, 0x98, 0x20, 0x57, 0x8f, 0xd8, - 0x2b, 0x06, 0xc9, 0xc6, 0x39, 0x21, 0xe3, 0x09, 0xb3, 0x35, 0xc1, 0x59, 0xbe, 0xbd, 0x26, 0x9a, - 0x1e, 0xd6, 0x23, 0xc5, 0x4e, 0x34, 0x3d, 0xa8, 0xfb, 0xb2, 0x44, 0x3f, 0x06, 0xa7, 0x79, 0x93, - 0xea, 0xd9, 0x64, 0x7c, 0x94, 0x29, 0xda, 0xb3, 0x7b, 0xbb, 0x13, 0xa7, 0x57, 0x72, 0x30, 0xdc, - 0x85, 0x8d, 0xde, 0x80, 0x89, 0x88, 0xc4, 0x6d, 0x2f, 0x5d, 0x0a, 0xfc, 0x1d, 0xa9, 0xbe, 0xdd, - 0x30, 0x22, 0x4d, 0x21, 0x4e, 0x32, 0x7e, 0xe2, 0x82, 0xf5, 0x4c, 0xad, 0xf1, 0x2e, 0x21, 0xe6, - 0xc4, 0xf2, 0xbd, 0xd1, 0xf1, 0x7e, 0xf4, 0xec, 0x7f, 0x56, 0x82, 0xd3, 0xf9, 0x85, 0x13, 0xfd, - 0x0d, 0x0b, 0x4e, 0xdd, 0xbe, 0x93, 0xae, 0x86, 0x9b, 0x24, 0x48, 0x1a, 0x3b, 0x54, 0xbd, 0xb1, - 0x25, 0x63, 0xe4, 0xa2, 0x5b, 0xec, 0x12, 0x3d, 0x79, 0x35, 0xcb, 0xe5, 0x52, 0x90, 0xc6, 0x3b, - 0x8d, 0x47, 0xc5, 0xdb, 0x9d, 0xba, 0x7a, 0x6b, 0xd5, 0x84, 0xe2, 0xbc, 0x50, 0xe7, 0x3f, 0x6b, - 0xc1, 0xd9, 0x5e, 0x24, 0xd0, 0x69, 0x28, 0x6f, 0x92, 0x1d, 0x6e, 0x95, 0x61, 0xfa, 0x13, 0xbd, - 0x06, 0xd5, 0x2d, 0xc7, 0xef, 0x10, 0x61, 0xdd, 0xcc, 0x1d, 0xed, 0x45, 0x94, 0x64, 0x98, 0x53, - 0xfd, 0xe1, 0xd2, 0x4b, 0x96, 0xfd, 0x2f, 0xcb, 0x30, 0x62, 0xac, 0x6f, 0xf7, 0xc1, 0x62, 0x0b, - 0x33, 0x16, 0xdb, 0x62, 0x61, 0x4b, 0x73, 0x5f, 0x93, 0xed, 0x4e, 0xce, 0x64, 0x5b, 0x2a, 0x8e, - 0xe5, 0x3d, 0x6d, 0x36, 0x94, 0x42, 0x3d, 0x8c, 0xa8, 0x45, 0x4e, 0x97, 0xfe, 0x4a, 0x11, 0x9f, - 0x70, 0x49, 0x92, 0x6b, 0x9c, 0xd8, 0xdb, 0x9d, 0xa8, 0xab, 0xbf, 0x58, 0x33, 0xb2, 0xbf, 0x6d, - 0xc1, 0x59, 0x43, 0xc6, 0x99, 0x30, 0x68, 0x7a, 0xec, 0xd3, 0x5e, 0x80, 0x4a, 0xba, 0x13, 0x49, - 0xb3, 0x5f, 0xf5, 0xd4, 0xea, 0x4e, 0x44, 0x30, 0x83, 0x50, 0x43, 0xbf, 0x4d, 0x92, 0xc4, 0x69, - 0x91, 0xbc, 0xa1, 0xbf, 0xc8, 0x9b, 0xb1, 0x84, 0xa3, 0x18, 0x90, 0xef, 0x24, 0xe9, 0x6a, 0xec, - 0x04, 0x09, 0x23, 0xbf, 0xea, 0xb5, 0x89, 0xe8, 0xe0, 0x3f, 0x3d, 0xd8, 0x88, 0xa1, 0x4f, 0x34, - 0x1e, 0xd9, 0xdb, 0x9d, 0x40, 0x0b, 0x5d, 0x94, 0x70, 0x0f, 0xea, 0xf6, 0x57, 0x2c, 0x78, 0xa4, - 0xb7, 0x2d, 0x86, 0x9e, 0x86, 0x21, 0xbe, 0xe5, 0x13, 0x6f, 0xa7, 0x3f, 0x09, 0x6b, 0xc5, 0x02, - 0x8a, 0xa6, 0xa0, 0xae, 0xd6, 0x09, 0xf1, 0x8e, 0x63, 0x02, 0xb5, 0xae, 0x17, 0x17, 0x8d, 0x43, - 0x3b, 0x8d, 0xfe, 0x11, 0x96, 0x9b, 0xea, 0x34, 0xb6, 0x49, 0x62, 0x10, 0xfb, 0xdf, 0x59, 0x70, - 0xca, 0x90, 0xea, 0x3e, 0x98, 0xe6, 0x41, 0xd6, 0x34, 0x9f, 0x2f, 0x6c, 0x3c, 0xf7, 0xb1, 0xcd, - 0x3f, 0x6f, 0xc1, 0x79, 0x03, 0x6b, 0xd1, 0x49, 0xdd, 0x8d, 0x4b, 0xdb, 0x51, 0x4c, 0x12, 0xba, - 0x9d, 0x46, 0x4f, 0x18, 0x7a, 0xab, 0x31, 0x22, 0x28, 0x94, 0xaf, 0x91, 0x1d, 0xae, 0xc4, 0x9e, - 0x85, 0x1a, 0x1f, 0x9c, 0x61, 0x2c, 0x7a, 0x5c, 0xbd, 0xdb, 0x92, 0x68, 0xc7, 0x0a, 0x03, 0xd9, - 0x30, 0xc4, 0x94, 0x13, 0x9d, 0xac, 0x74, 0x19, 0x02, 0xfa, 0x11, 0x6f, 0xb2, 0x16, 0x2c, 0x20, - 0xf6, 0x52, 0x46, 0x9c, 0xe5, 0x98, 0xb0, 0x8f, 0xdb, 0xbc, 0xec, 0x11, 0xbf, 0x99, 0xd0, 0x6d, - 0x83, 0x13, 0x04, 0x61, 0x2a, 0x76, 0x00, 0xc6, 0xb6, 0x61, 0x5a, 0x37, 0x63, 0x13, 0xc7, 0xde, - 0x2b, 0xb1, 0xcd, 0x87, 0x9a, 0xd6, 0xe4, 0x7e, 0xec, 0x5c, 0xe3, 0x8c, 0x1e, 0x5c, 0x2e, 0x4e, - 0x29, 0x91, 0xfe, 0xbb, 0xd7, 0x37, 0x73, 0xaa, 0x10, 0x17, 0xca, 0xf5, 0xde, 0x3b, 0xd8, 0xdf, - 0x2a, 0xc1, 0x44, 0xf6, 0x81, 0x2e, 0x4d, 0x4a, 0xb7, 0x4b, 0x06, 0xa3, 0xbc, 0x83, 0xc2, 0xc0, - 0xc7, 0x26, 0x5e, 0x1f, 0x65, 0x54, 0x3a, 0x4e, 0x65, 0x64, 0xea, 0xca, 0xf2, 0x3e, 0xba, 0xf2, - 0x69, 0xd5, 0xeb, 0x95, 0x9c, 0x72, 0xca, 0xae, 0x17, 0x17, 0xa0, 0x92, 0xa4, 0x24, 0x1a, 0xaf, - 0x66, 0x75, 0xcd, 0x4a, 0x4a, 0x22, 0xcc, 0x20, 0xf6, 0x7f, 0x2e, 0xc1, 0xa3, 0xd9, 0x3e, 0xd4, - 0xea, 0xfd, 0x03, 0x19, 0xf5, 0xfe, 0x1e, 0x53, 0xbd, 0xdf, 0xdd, 0x9d, 0x78, 0x67, 0x9f, 0xc7, - 0xbe, 0x67, 0xb4, 0x3f, 0x9a, 0xcb, 0xf5, 0xe2, 0x54, 0xb6, 0x17, 0xef, 0xee, 0x4e, 0x3c, 0xd1, - 0xe7, 0x1d, 0x73, 0xdd, 0xfc, 0x34, 0x0c, 0xc5, 0xc4, 0x49, 0xc2, 0x40, 0x74, 0xb4, 0xfa, 0x1c, - 0x98, 0xb5, 0x62, 0x01, 0xb5, 0xff, 0x55, 0x3d, 0xdf, 0xd9, 0x73, 0xdc, 0xc1, 0x16, 0xc6, 0xc8, - 0x83, 0x0a, 0x33, 0xd9, 0xb9, 0x6a, 0xb8, 0x76, 0xb4, 0x69, 0x44, 0x55, 0xbc, 0x22, 0xdd, 0xa8, - 0xd1, 0xaf, 0x46, 0x9b, 0x30, 0x63, 0x81, 0xb6, 0xa1, 0xe6, 0x4a, 0x4b, 0xba, 0x54, 0x84, 0xcf, - 0x49, 0xd8, 0xd1, 0x9a, 0xe3, 0x28, 0xd5, 0xc5, 0xca, 0xfc, 0x56, 0xdc, 0x10, 0x81, 0x72, 0xcb, - 0x4b, 0xc5, 0x67, 0x3d, 0xe2, 0x5e, 0x69, 0xce, 0x33, 0x5e, 0x71, 0x98, 0x2e, 0x10, 0x73, 0x5e, - 0x8a, 0x29, 0x7d, 0xf4, 0x33, 0x16, 0x8c, 0x24, 0x6e, 0x7b, 0x39, 0x0e, 0xb7, 0xbc, 0x26, 0x89, - 0x85, 0xa5, 0x74, 0x44, 0xd5, 0xb4, 0x32, 0xb3, 0x28, 0x09, 0x6a, 0xbe, 0x7c, 0xef, 0xaa, 0x21, - 0xd8, 0xe4, 0x4b, 0x77, 0x10, 0x8f, 0x8a, 0x77, 0x9f, 0x25, 0xae, 0x47, 0xd7, 0x36, 0xb9, 0x61, - 0x62, 0x23, 0xe5, 0xc8, 0x96, 0xe3, 0x6c, 0xc7, 0xdd, 0xa4, 0xf3, 0x4d, 0x0b, 0xf4, 0xce, 0xbd, - 0xdd, 0x89, 0x47, 0x67, 0x7a, 0xf3, 0xc4, 0xfd, 0x84, 0x61, 0x1d, 0x16, 0x75, 0x7c, 0x1f, 0x93, - 0x37, 0x3a, 0x84, 0xb9, 0x43, 0x0a, 0xe8, 0xb0, 0x65, 0x4d, 0x30, 0xd7, 0x61, 0x06, 0x04, 0x9b, - 0x7c, 0xd1, 0x1b, 0x30, 0xd4, 0x76, 0xd2, 0xd8, 0xdb, 0x16, 0x3e, 0x90, 0x23, 0xda, 0xf2, 0x8b, - 0x8c, 0x96, 0x66, 0xce, 0x96, 0x7e, 0xde, 0x88, 0x05, 0x23, 0xd4, 0x86, 0x6a, 0x9b, 0xc4, 0x2d, - 0x32, 0x5e, 0x2b, 0xc2, 0xdf, 0xbb, 0x48, 0x49, 0x69, 0x86, 0x75, 0x6a, 0xf9, 0xb0, 0x36, 0xcc, - 0xb9, 0xa0, 0xd7, 0xa0, 0x96, 0x10, 0x9f, 0xb8, 0xd4, 0x76, 0xa9, 0x33, 0x8e, 0xef, 0x1d, 0xd0, - 0x8e, 0x73, 0xd6, 0x88, 0xbf, 0x22, 0x1e, 0xe5, 0x13, 0x4c, 0xfe, 0xc3, 0x8a, 0x24, 0xed, 0xc0, - 0xc8, 0xef, 0xb4, 0xbc, 0x60, 0x1c, 0x8a, 0xe8, 0xc0, 0x65, 0x46, 0x2b, 0xd7, 0x81, 0xbc, 0x11, - 0x0b, 0x46, 0xf6, 0x7f, 0xb0, 0x00, 0x65, 0x95, 0xda, 0x7d, 0x30, 0x58, 0xdf, 0xc8, 0x1a, 0xac, - 0x0b, 0x45, 0x5a, 0x1d, 0x7d, 0x6c, 0xd6, 0xdf, 0xa8, 0x43, 0x6e, 0x39, 0xb8, 0x4e, 0x92, 0x94, - 0x34, 0xdf, 0x56, 0xe1, 0x6f, 0xab, 0xf0, 0xb7, 0x55, 0xb8, 0x52, 0xe1, 0x6b, 0x39, 0x15, 0xfe, - 0x7e, 0x63, 0xd6, 0xeb, 0x03, 0xd3, 0xd7, 0xd5, 0x89, 0xaa, 0x29, 0x81, 0x81, 0x40, 0x35, 0xc1, - 0xd5, 0x95, 0xa5, 0xeb, 0x3d, 0x75, 0xf6, 0xeb, 0x59, 0x9d, 0x7d, 0x54, 0x16, 0xff, 0x3f, 0x68, - 0xe9, 0xbf, 0x52, 0x82, 0xc7, 0xb2, 0xda, 0x0b, 0x87, 0xbe, 0x1f, 0x76, 0x52, 0xba, 0x17, 0x40, - 0xbf, 0x68, 0xc1, 0xe9, 0x76, 0x76, 0x13, 0x9e, 0x08, 0x5f, 0xe7, 0x07, 0x0b, 0x53, 0xad, 0xb9, - 0x5d, 0x7e, 0x63, 0x5c, 0xa8, 0xd9, 0xd3, 0x39, 0x40, 0x82, 0xbb, 0x64, 0x41, 0xaf, 0x41, 0xbd, - 0xed, 0x6c, 0xdf, 0x88, 0x9a, 0x4e, 0x2a, 0xb7, 0x61, 0xfd, 0x77, 0xcf, 0x9d, 0xd4, 0xf3, 0x27, - 0xf9, 0x09, 0xf6, 0xe4, 0x7c, 0x90, 0x2e, 0xc5, 0x2b, 0x69, 0xec, 0x05, 0x2d, 0xee, 0xe1, 0x5a, - 0x94, 0x64, 0xb0, 0xa6, 0x68, 0x7f, 0xcd, 0xca, 0xeb, 0x76, 0xd5, 0x3b, 0xb1, 0x93, 0x92, 0xd6, - 0x0e, 0xfa, 0x18, 0x54, 0xe9, 0x7e, 0x49, 0xf6, 0xca, 0xad, 0x22, 0x17, 0x1c, 0xe3, 0x4b, 0xe8, - 0xb5, 0x87, 0xfe, 0x4b, 0x30, 0x67, 0x6a, 0x7f, 0x65, 0x38, 0xbf, 0xc6, 0xb2, 0xf3, 0xcc, 0x8b, - 0x00, 0xad, 0x70, 0x95, 0xb4, 0x23, 0x9f, 0x76, 0x8b, 0xc5, 0x9c, 0xe2, 0xca, 0x45, 0x30, 0xa7, - 0x20, 0xd8, 0xc0, 0x42, 0x7f, 0xce, 0x02, 0x68, 0xc9, 0xa1, 0x22, 0xd7, 0xcf, 0x1b, 0x45, 0xbe, - 0x8e, 0x1e, 0x88, 0x5a, 0x16, 0xc5, 0x10, 0x1b, 0xcc, 0xd1, 0x4f, 0x59, 0x50, 0x4b, 0xa5, 0xf8, - 0x7c, 0x45, 0x59, 0x2d, 0x52, 0x12, 0xf9, 0xd2, 0xda, 0x94, 0x50, 0x5d, 0xa2, 0xf8, 0xa2, 0x9f, - 0xb5, 0x00, 0x92, 0x9d, 0xc0, 0x5d, 0x0e, 0x7d, 0xcf, 0xdd, 0x11, 0x0b, 0xcd, 0xcd, 0x42, 0xdd, - 0x18, 0x8a, 0x7a, 0xe3, 0x24, 0xed, 0x0d, 0xfd, 0x1f, 0x1b, 0x9c, 0xd1, 0x27, 0xa0, 0x96, 0x88, - 0xe1, 0x26, 0x96, 0x96, 0xd5, 0x62, 0x9d, 0x29, 0x9c, 0xb6, 0xd0, 0x4a, 0xe2, 0x1f, 0x56, 0x3c, - 0xd1, 0xcf, 0x5b, 0x70, 0x2a, 0xca, 0xba, 0xbe, 0xc4, 0x2a, 0x52, 0x9c, 0x0e, 0xc8, 0xb9, 0xd6, - 0x1a, 0x67, 0xf6, 0x76, 0x27, 0x4e, 0xe5, 0x1a, 0x71, 0x5e, 0x0a, 0x34, 0x03, 0x63, 0x7a, 0x04, - 0x2f, 0x45, 0xdc, 0x0d, 0x37, 0xcc, 0xdc, 0x70, 0xec, 0x14, 0x73, 0x2e, 0x0f, 0xc4, 0xdd, 0xf8, - 0x68, 0x19, 0xce, 0x52, 0xe9, 0x76, 0xb8, 0xd5, 0x26, 0xb5, 0x72, 0xc2, 0xd6, 0x90, 0x5a, 0xe3, - 0x71, 0x31, 0x42, 0x98, 0xa3, 0x3b, 0x8f, 0x83, 0x7b, 0x3e, 0x69, 0x7f, 0xab, 0x94, 0xf1, 0x8b, - 0x2b, 0x87, 0x15, 0x9b, 0x63, 0xae, 0xf4, 0x15, 0x48, 0x95, 0x51, 0xe8, 0x1c, 0x53, 0x9e, 0x08, - 0x3d, 0xc7, 0x54, 0x53, 0x82, 0x0d, 0xe6, 0xd4, 0x80, 0x19, 0x73, 0xf2, 0x6e, 0x31, 0x31, 0xed, - 0x5f, 0x2b, 0x52, 0xa4, 0xee, 0x53, 0x8c, 0xc7, 0x84, 0x68, 0x63, 0x5d, 0x20, 0xdc, 0x2d, 0x92, - 0xfd, 0xad, 0xac, 0x2f, 0xde, 0x18, 0xb1, 0x03, 0x9c, 0x33, 0x7c, 0xc1, 0x82, 0x91, 0x38, 0xf4, - 0x7d, 0x2f, 0x68, 0xd1, 0xd9, 0x25, 0x96, 0x88, 0x0f, 0x1f, 0x8b, 0x96, 0x16, 0xd3, 0x88, 0x99, - 0x41, 0x58, 0xf3, 0xc4, 0xa6, 0x00, 0xf6, 0x1f, 0x59, 0x30, 0xde, 0x4f, 0x0b, 0x20, 0x02, 0xef, - 0x94, 0x43, 0x5c, 0x9d, 0xb2, 0x2f, 0x05, 0xb3, 0xc4, 0x27, 0xca, 0x49, 0x59, 0x6b, 0x3c, 0x25, - 0x5e, 0xf3, 0x9d, 0xcb, 0xfd, 0x51, 0xf1, 0xbd, 0xe8, 0xa0, 0x57, 0xe1, 0xb4, 0xf1, 0x5e, 0x89, - 0xea, 0x98, 0x7a, 0x63, 0x92, 0x2e, 0xbb, 0xd3, 0x39, 0xd8, 0xdd, 0xdd, 0x89, 0x47, 0xf2, 0x6d, - 0x42, 0x4d, 0x75, 0xd1, 0xb1, 0x7f, 0xa5, 0x94, 0xff, 0x5a, 0x6a, 0x85, 0xf9, 0xaa, 0xd5, 0xb5, - 0xf5, 0xfb, 0xe0, 0x71, 0x68, 0x75, 0xb6, 0x49, 0x54, 0x07, 0xf9, 0xfd, 0x71, 0x1e, 0xe0, 0x49, - 0xa1, 0xfd, 0xcf, 0x2b, 0x70, 0x0f, 0xc9, 0xd4, 0x59, 0x90, 0xd5, 0xef, 0x2c, 0xe8, 0xe0, 0xc7, - 0x4b, 0x9f, 0xb3, 0x60, 0xc8, 0xa7, 0x56, 0x28, 0x3f, 0xef, 0x18, 0xb9, 0xd8, 0x3c, 0xae, 0xbe, - 0xe7, 0xc6, 0x6e, 0xc2, 0x4f, 0xab, 0x95, 0xcb, 0x93, 0x37, 0x62, 0x21, 0x03, 0xfa, 0xba, 0x95, - 0x3d, 0x3c, 0xe1, 0xe1, 0x47, 0xde, 0xb1, 0xc9, 0x64, 0x9c, 0xc8, 0x70, 0xc1, 0xb4, 0xaf, 0xbf, - 0xcf, 0x59, 0x0d, 0x9a, 0x04, 0x58, 0xf7, 0x02, 0xc7, 0xf7, 0xde, 0xa4, 0xbb, 0xe9, 0x2a, 0x5b, - 0x56, 0xd8, 0x3a, 0x7d, 0x59, 0xb5, 0x62, 0x03, 0xe3, 0xfc, 0x9f, 0x85, 0x11, 0xe3, 0xcd, 0x7b, - 0x1c, 0xb2, 0x9f, 0x35, 0x0f, 0xd9, 0xeb, 0xc6, 0xd9, 0xf8, 0xf9, 0xf7, 0xc3, 0xe9, 0xbc, 0x80, - 0x07, 0x79, 0xde, 0xfe, 0x1f, 0xc3, 0xf9, 0x13, 0x8f, 0x55, 0x12, 0xb7, 0xa9, 0x68, 0x6f, 0x7b, - 0x21, 0xde, 0xf6, 0x42, 0xbc, 0xed, 0x85, 0x30, 0x1d, 0xc9, 0x62, 0x87, 0x3d, 0x7c, 0x9f, 0x76, - 0xd8, 0x19, 0x9f, 0x41, 0xad, 0x70, 0x9f, 0x81, 0xbd, 0x57, 0x85, 0x8c, 0x1d, 0xc5, 0xfb, 0xfb, - 0xdd, 0x30, 0x1c, 0x93, 0x28, 0xbc, 0x81, 0x17, 0xc4, 0x1a, 0xa2, 0x03, 0xa9, 0x79, 0x33, 0x96, - 0x70, 0xba, 0xd6, 0x44, 0x4e, 0xba, 0x21, 0x16, 0x11, 0xb5, 0xd6, 0x2c, 0x3b, 0xe9, 0x06, 0x66, - 0x10, 0xf4, 0x7e, 0x38, 0x99, 0x3a, 0x71, 0x8b, 0xa4, 0x98, 0x6c, 0xb1, 0xcf, 0x2a, 0xce, 0xc5, - 0x1e, 0x11, 0xb8, 0x27, 0x57, 0x33, 0x50, 0x9c, 0xc3, 0x46, 0x6f, 0x40, 0x65, 0x83, 0xf8, 0x6d, - 0xd1, 0xe5, 0x2b, 0xc5, 0xe9, 0x78, 0xf6, 0xae, 0x57, 0x88, 0xdf, 0xe6, 0x1a, 0x88, 0xfe, 0xc2, - 0x8c, 0x15, 0x1d, 0x6f, 0xf5, 0xcd, 0x4e, 0x92, 0x86, 0x6d, 0xef, 0x4d, 0xe9, 0x0e, 0xfa, 0x60, - 0xc1, 0x8c, 0xaf, 0x49, 0xfa, 0xdc, 0x81, 0xa0, 0xfe, 0x62, 0xcd, 0x99, 0xc9, 0xd1, 0xf4, 0x62, - 0xf6, 0xa9, 0x76, 0x84, 0x57, 0xa7, 0x68, 0x39, 0x66, 0x25, 0x7d, 0x2e, 0x87, 0xfa, 0x8b, 0x35, - 0x67, 0xb4, 0xa3, 0xc6, 0xfd, 0x08, 0x93, 0xe1, 0x46, 0xc1, 0x32, 0xf0, 0x31, 0xdf, 0x73, 0xfc, - 0x3f, 0x05, 0x55, 0x77, 0xc3, 0x89, 0xd3, 0xf1, 0x51, 0x36, 0x68, 0x94, 0x23, 0x63, 0x86, 0x36, - 0x62, 0x0e, 0x43, 0x4f, 0x40, 0x39, 0x26, 0xeb, 0x2c, 0x7e, 0xcf, 0x88, 0xec, 0xc0, 0x64, 0x1d, - 0xd3, 0x76, 0xfb, 0x97, 0x4a, 0x59, 0x73, 0x29, 0xfb, 0xde, 0x7c, 0xb4, 0xbb, 0x9d, 0x38, 0x91, - 0xce, 0x0e, 0x63, 0xb4, 0xb3, 0x66, 0x2c, 0xe1, 0xe8, 0x53, 0x16, 0x0c, 0xdf, 0x4e, 0xc2, 0x20, - 0x20, 0xa9, 0x58, 0x9a, 0x6e, 0x16, 0xdc, 0x15, 0x57, 0x39, 0x75, 0x2d, 0x83, 0x68, 0xc0, 0x92, - 0x2f, 0x15, 0x97, 0x6c, 0xbb, 0x7e, 0xa7, 0xd9, 0x75, 0xa0, 0x7f, 0x89, 0x37, 0x63, 0x09, 0xa7, - 0xa8, 0x5e, 0xc0, 0x51, 0x2b, 0x59, 0xd4, 0xf9, 0x40, 0xa0, 0x0a, 0xb8, 0xfd, 0x97, 0x86, 0xe0, - 0x5c, 0xcf, 0xc9, 0x41, 0x0d, 0x19, 0x66, 0x2a, 0x5c, 0xf6, 0x7c, 0x22, 0xc3, 0x54, 0x98, 0x21, - 0x73, 0x53, 0xb5, 0x62, 0x03, 0x03, 0xfd, 0x24, 0x40, 0xe4, 0xc4, 0x4e, 0x9b, 0x88, 0x05, 0xbc, - 0x7c, 0x74, 0x7b, 0x81, 0xca, 0xb1, 0x2c, 0x69, 0xea, 0xbd, 0xa9, 0x6a, 0x4a, 0xb0, 0xc1, 0x12, - 0xbd, 0x08, 0x23, 0x31, 0xf1, 0x89, 0x93, 0xb0, 0xf0, 0xcf, 0x7c, 0x2c, 0x3b, 0xd6, 0x20, 0x6c, - 0xe2, 0xa1, 0xa7, 0x55, 0x44, 0x4f, 0x2e, 0xfa, 0x21, 0x1b, 0xd5, 0x83, 0xbe, 0x68, 0xc1, 0xc9, - 0x75, 0xcf, 0x27, 0x9a, 0xbb, 0x88, 0x3c, 0x5f, 0x3a, 0xfa, 0x4b, 0x5e, 0x36, 0xe9, 0x6a, 0x0d, - 0x99, 0x69, 0x4e, 0x70, 0x8e, 0x3d, 0xfd, 0xcc, 0x5b, 0x24, 0x66, 0xaa, 0x75, 0x28, 0xfb, 0x99, - 0x6f, 0xf2, 0x66, 0x2c, 0xe1, 0x68, 0x1a, 0x4e, 0x45, 0x4e, 0x92, 0xcc, 0xc4, 0xa4, 0x49, 0x82, - 0xd4, 0x73, 0x7c, 0x1e, 0x17, 0x5e, 0xd3, 0x71, 0xa1, 0xcb, 0x59, 0x30, 0xce, 0xe3, 0xa3, 0x0f, - 0xc1, 0xa3, 0x5e, 0x2b, 0x08, 0x63, 0xb2, 0xe8, 0x25, 0x89, 0x17, 0xb4, 0xf4, 0x30, 0x10, 0x4e, - 0x8f, 0x09, 0x41, 0xea, 0xd1, 0xf9, 0xde, 0x68, 0xb8, 0xdf, 0xf3, 0xe8, 0x59, 0xa8, 0x25, 0x9b, - 0x5e, 0x34, 0x13, 0x37, 0x13, 0xe6, 0x20, 0xaf, 0x69, 0x17, 0xdb, 0x8a, 0x68, 0xc7, 0x0a, 0x03, - 0xb9, 0x30, 0xca, 0x3f, 0x09, 0x0f, 0x5b, 0x12, 0xfa, 0xf1, 0xb9, 0xbe, 0xcb, 0xa3, 0x48, 0x5d, - 0x9a, 0xc4, 0xce, 0x9d, 0x4b, 0xd2, 0x5d, 0xdf, 0x38, 0xbd, 0xb7, 0x3b, 0x31, 0x7a, 0xd3, 0x20, - 0x83, 0x33, 0x44, 0xed, 0x5f, 0x28, 0x65, 0x77, 0xdc, 0xe6, 0x24, 0x45, 0x09, 0x9d, 0x8a, 0xe9, - 0x4d, 0x27, 0x96, 0xde, 0x98, 0x23, 0x86, 0xaf, 0x0b, 0xba, 0x37, 0x9d, 0xd8, 0x9c, 0xd4, 0x8c, - 0x01, 0x96, 0x9c, 0xd0, 0x6d, 0xa8, 0xa4, 0xbe, 0x53, 0x50, 0xbe, 0x8b, 0xc1, 0x51, 0x3b, 0x40, - 0x16, 0xa6, 0x13, 0xcc, 0x78, 0xa0, 0xc7, 0xa9, 0xd5, 0xbf, 0x26, 0x63, 0xdc, 0x84, 0xa1, 0xbe, - 0x96, 0x60, 0xd6, 0x6a, 0xff, 0xdf, 0x5a, 0x0f, 0xbd, 0xaa, 0x16, 0x32, 0x74, 0x11, 0x80, 0x6e, - 0x20, 0x97, 0x63, 0xb2, 0xee, 0x6d, 0x0b, 0x43, 0x42, 0xcd, 0xdd, 0xeb, 0x0a, 0x82, 0x0d, 0x2c, - 0xf9, 0xcc, 0x4a, 0x67, 0x9d, 0x3e, 0x53, 0xea, 0x7e, 0x86, 0x43, 0xb0, 0x81, 0x85, 0x5e, 0x80, - 0x21, 0xaf, 0xed, 0xb4, 0x54, 0x28, 0xde, 0xe3, 0x74, 0xd2, 0xce, 0xb3, 0x96, 0xbb, 0xbb, 0x13, - 0x27, 0x95, 0x40, 0xac, 0x09, 0x0b, 0x5c, 0xf4, 0x2b, 0x16, 0x8c, 0xba, 0x61, 0xbb, 0x1d, 0x06, - 0x7c, 0xdb, 0x25, 0xf6, 0x90, 0xb7, 0x8f, 0x6b, 0x99, 0x9f, 0x9c, 0x31, 0x98, 0xf1, 0x4d, 0xa4, - 0x4a, 0xcc, 0x31, 0x41, 0x38, 0x23, 0x95, 0x39, 0xb7, 0xab, 0xfb, 0xcc, 0xed, 0x5f, 0xb7, 0x60, - 0x8c, 0x3f, 0x6b, 0xec, 0x06, 0x45, 0x0e, 0x4a, 0x78, 0xcc, 0xaf, 0xd5, 0xb5, 0x41, 0x56, 0x5e, - 0xba, 0x2e, 0x38, 0xee, 0x16, 0x12, 0xcd, 0xc1, 0xd8, 0x7a, 0x18, 0xbb, 0xc4, 0xec, 0x08, 0xa1, - 0x98, 0x14, 0xa1, 0xcb, 0x79, 0x04, 0xdc, 0xfd, 0x0c, 0xba, 0x09, 0x8f, 0x18, 0x8d, 0x66, 0x3f, - 0x70, 0xdd, 0xf4, 0xa4, 0xa0, 0xf6, 0xc8, 0xe5, 0x9e, 0x58, 0xb8, 0xcf, 0xd3, 0x59, 0x87, 0x49, - 0x7d, 0x00, 0x87, 0xc9, 0xeb, 0xf0, 0x98, 0xdb, 0xdd, 0x33, 0x5b, 0x49, 0x67, 0x2d, 0xe1, 0x9a, - 0xaa, 0xd6, 0xf8, 0x01, 0x41, 0xe0, 0xb1, 0x99, 0x7e, 0x88, 0xb8, 0x3f, 0x0d, 0xf4, 0x31, 0xa8, - 0xc5, 0x84, 0x7d, 0x95, 0x44, 0x24, 0x64, 0x1c, 0x71, 0x97, 0xac, 0x2d, 0x50, 0x4e, 0x56, 0xeb, - 0x5e, 0xd1, 0x90, 0x60, 0xc5, 0xf1, 0xfc, 0x07, 0x60, 0xac, 0x6b, 0x3c, 0x1f, 0xc8, 0x67, 0x31, - 0x0b, 0x8f, 0xf4, 0x1e, 0x39, 0x07, 0xf2, 0x5c, 0xfc, 0xfd, 0x5c, 0x9c, 0xa1, 0x61, 0x4d, 0x0e, - 0xe0, 0x05, 0x73, 0xa0, 0x4c, 0x82, 0x2d, 0xa1, 0x48, 0x2f, 0x1f, 0xad, 0xf7, 0x2e, 0x05, 0x5b, - 0x7c, 0xe0, 0xb3, 0xad, 0xfe, 0xa5, 0x60, 0x0b, 0x53, 0xda, 0xe8, 0xcb, 0x56, 0xc6, 0x1a, 0xe2, - 0xbe, 0xb3, 0x8f, 0x1c, 0x8b, 0xf9, 0x3c, 0xb0, 0x81, 0x64, 0xff, 0x8b, 0x12, 0x5c, 0xd8, 0x8f, - 0xc8, 0x00, 0xdd, 0xf7, 0x14, 0x0c, 0x25, 0xec, 0x08, 0x54, 0x68, 0xa6, 0x11, 0xaa, 0x95, 0xf8, - 0xa1, 0xe8, 0xeb, 0x58, 0x80, 0x90, 0x0f, 0xe5, 0xb6, 0x13, 0x09, 0x97, 0xca, 0xfc, 0x51, 0xb3, - 0x0a, 0xe8, 0x7f, 0xc7, 0x5f, 0x74, 0x22, 0xbe, 0x51, 0x37, 0x1a, 0x30, 0x65, 0x83, 0x52, 0xa8, - 0x3a, 0x71, 0xec, 0xc8, 0xf3, 0xb6, 0x6b, 0xc5, 0xf0, 0x9b, 0xa6, 0x24, 0x1b, 0x63, 0x7b, 0xbb, - 0x13, 0x27, 0x32, 0x4d, 0x98, 0x33, 0xb3, 0x3f, 0x37, 0x9c, 0x89, 0xac, 0x67, 0x87, 0xa8, 0x09, - 0x0c, 0x09, 0x4f, 0x8a, 0x55, 0x74, 0x32, 0x07, 0x4f, 0x8d, 0x62, 0x9b, 0x25, 0x91, 0x60, 0x2a, - 0x58, 0xa1, 0xcf, 0x5a, 0x2c, 0x8d, 0x53, 0x66, 0x1b, 0x88, 0x2d, 0xca, 0xf1, 0x64, 0x95, 0x9a, - 0xc9, 0xa1, 0xb2, 0x11, 0x9b, 0xdc, 0xe9, 0xd2, 0x15, 0xf1, 0x84, 0xa4, 0xfc, 0x46, 0x45, 0x26, - 0x7a, 0x4a, 0x38, 0xda, 0xee, 0x71, 0x58, 0x5a, 0x40, 0x2a, 0xe0, 0x00, 0xc7, 0xa3, 0x5f, 0xb7, - 0x60, 0x8c, 0x9b, 0xa3, 0xb3, 0xde, 0xfa, 0x3a, 0x89, 0x49, 0xe0, 0x12, 0x69, 0xd0, 0x1f, 0xf1, - 0x38, 0x5e, 0xba, 0xaf, 0xe6, 0xf3, 0xe4, 0xf5, 0x9a, 0xd6, 0x05, 0xc2, 0xdd, 0xc2, 0xa0, 0x26, - 0x54, 0xbc, 0x60, 0x3d, 0x14, 0x2b, 0x79, 0xe3, 0x68, 0x42, 0xcd, 0x07, 0xeb, 0xa1, 0x9e, 0xcd, - 0xf4, 0x1f, 0x66, 0xd4, 0xd1, 0x02, 0x9c, 0x8d, 0x85, 0xcb, 0xe5, 0x8a, 0x97, 0xd0, 0x8d, 0xf1, - 0x82, 0xd7, 0xf6, 0x52, 0xb6, 0x0a, 0x97, 0x1b, 0xe3, 0x7b, 0xbb, 0x13, 0x67, 0x71, 0x0f, 0x38, - 0xee, 0xf9, 0x14, 0x7a, 0x13, 0x86, 0x65, 0xde, 0x69, 0xad, 0x88, 0xcd, 0x51, 0xf7, 0xf8, 0x57, - 0x83, 0x69, 0x45, 0xa4, 0x98, 0x4a, 0x86, 0xf6, 0x17, 0x47, 0xa0, 0xfb, 0x6c, 0x10, 0x7d, 0x1c, - 0xea, 0xb1, 0xca, 0x85, 0xb5, 0x8a, 0x88, 0xef, 0x93, 0xdf, 0x57, 0x9c, 0x4b, 0x2a, 0x7b, 0x40, - 0x67, 0xbd, 0x6a, 0x8e, 0xd4, 0x6a, 0x4f, 0xf4, 0x11, 0x62, 0x01, 0x63, 0x5b, 0x70, 0xd5, 0xc7, - 0x43, 0x3b, 0x81, 0x8b, 0x19, 0x0f, 0x14, 0xc3, 0xd0, 0x06, 0x71, 0xfc, 0x74, 0xa3, 0x18, 0x4f, - 0xf6, 0x15, 0x46, 0x2b, 0x9f, 0x35, 0xc1, 0x5b, 0xb1, 0xe0, 0x84, 0xb6, 0x61, 0x78, 0x83, 0x0f, - 0x00, 0x61, 0x48, 0x2f, 0x1e, 0xb5, 0x73, 0x33, 0xa3, 0x4a, 0x7f, 0x6e, 0xd1, 0x80, 0x25, 0x3b, - 0x16, 0x69, 0x61, 0x1c, 0x8b, 0xf3, 0xa9, 0x5b, 0x5c, 0xc2, 0xc8, 0xe0, 0x67, 0xe2, 0x1f, 0x85, - 0xd1, 0x98, 0xb8, 0x61, 0xe0, 0x7a, 0x3e, 0x69, 0x4e, 0x4b, 0x2f, 0xf5, 0x41, 0xd2, 0x0c, 0xd8, - 0x66, 0x14, 0x1b, 0x34, 0x70, 0x86, 0x22, 0xfa, 0x8c, 0x05, 0x27, 0x55, 0x02, 0x1d, 0xfd, 0x20, - 0x44, 0x78, 0x45, 0x17, 0x0a, 0x4a, 0xd7, 0x63, 0x34, 0x1b, 0x68, 0x6f, 0x77, 0xe2, 0x64, 0xb6, - 0x0d, 0xe7, 0xf8, 0xa2, 0x57, 0x01, 0xc2, 0x35, 0x1e, 0x4e, 0x31, 0x9d, 0x0a, 0x17, 0xe9, 0x41, - 0x5e, 0xf5, 0x24, 0xcf, 0x37, 0x92, 0x14, 0xb0, 0x41, 0x0d, 0x5d, 0x03, 0xe0, 0xd3, 0x66, 0x75, - 0x27, 0x92, 0xd6, 0xb6, 0xcc, 0x13, 0x81, 0x15, 0x05, 0xb9, 0xbb, 0x3b, 0xd1, 0xed, 0xb2, 0x62, - 0xa7, 0xf7, 0xc6, 0xe3, 0xe8, 0x27, 0x60, 0x38, 0xe9, 0xb4, 0xdb, 0x8e, 0x72, 0xa0, 0x16, 0x98, - 0xc1, 0xc4, 0xe9, 0x1a, 0xaa, 0x88, 0x37, 0x60, 0xc9, 0x11, 0xdd, 0xa6, 0x4a, 0x35, 0x11, 0xbe, - 0x34, 0x36, 0x8b, 0xb8, 0x4d, 0x30, 0xc2, 0xde, 0xe9, 0x7d, 0x32, 0x3a, 0x04, 0xf7, 0xc0, 0xb9, - 0xbb, 0x3b, 0xf1, 0x48, 0xb6, 0x7d, 0x21, 0x14, 0x39, 0x45, 0x3d, 0x69, 0xa2, 0xab, 0xb2, 0x0c, - 0x05, 0x7d, 0x6d, 0x99, 0x1d, 0xfd, 0x8c, 0x2e, 0x43, 0xc1, 0x9a, 0xfb, 0xf7, 0x99, 0xf9, 0x30, - 0x5a, 0x84, 0x33, 0x6e, 0x18, 0xa4, 0x71, 0xe8, 0xfb, 0xbc, 0xb6, 0x0a, 0xdf, 0xf8, 0x70, 0x07, - 0xeb, 0x3b, 0x85, 0xd8, 0x67, 0x66, 0xba, 0x51, 0x70, 0xaf, 0xe7, 0xec, 0x20, 0x1b, 0x67, 0x26, - 0x3a, 0xe7, 0x05, 0x18, 0x25, 0xdb, 0x29, 0x89, 0x03, 0xc7, 0xbf, 0x81, 0x17, 0xa4, 0x6b, 0x91, - 0xcd, 0x81, 0x4b, 0x46, 0x3b, 0xce, 0x60, 0x21, 0x5b, 0xed, 0xf6, 0x4b, 0x3a, 0xf1, 0x8e, 0xef, - 0xf6, 0xe5, 0xde, 0xde, 0xfe, 0x9f, 0xa5, 0x8c, 0x41, 0xb6, 0x1a, 0x13, 0x82, 0x42, 0xa8, 0x06, - 0x61, 0x53, 0xe9, 0xfe, 0xab, 0xc5, 0xe8, 0xfe, 0xeb, 0x61, 0xd3, 0xa8, 0x55, 0x41, 0xff, 0x25, - 0x98, 0xf3, 0x61, 0xc9, 0xfc, 0xb2, 0xea, 0x01, 0x03, 0x88, 0x8d, 0x46, 0x91, 0x9c, 0x55, 0x32, - 0xff, 0x92, 0xc9, 0x08, 0x67, 0xf9, 0xa2, 0x4d, 0xa8, 0x6e, 0x84, 0x49, 0x2a, 0xb7, 0x1f, 0x47, - 0xdc, 0xe9, 0x5c, 0x09, 0x93, 0x94, 0x59, 0x11, 0xea, 0xb5, 0x69, 0x4b, 0x82, 0x39, 0x0f, 0xfb, - 0x3f, 0x5a, 0x19, 0x47, 0xf2, 0x2d, 0x16, 0x73, 0xb9, 0x45, 0x02, 0x3a, 0xad, 0xcd, 0x78, 0x9b, - 0x3f, 0x93, 0x4b, 0xfc, 0x7a, 0x57, 0xbf, 0xca, 0x41, 0x77, 0x28, 0x85, 0x49, 0x46, 0xc2, 0x08, - 0xcd, 0xf9, 0xa4, 0x95, 0x4d, 0xc1, 0x2b, 0x15, 0xb1, 0xc1, 0x30, 0x53, 0x4c, 0xf7, 0xcd, 0xe6, - 0xb3, 0xbf, 0x6c, 0xc1, 0x70, 0xc3, 0x71, 0x37, 0xc3, 0xf5, 0x75, 0xf4, 0x2c, 0xd4, 0x9a, 0x9d, - 0xd8, 0xcc, 0x06, 0x54, 0xbb, 0xe7, 0x59, 0xd1, 0x8e, 0x15, 0x06, 0x1d, 0xc3, 0xeb, 0x8e, 0x2b, - 0x13, 0x4d, 0xcb, 0x7c, 0x0c, 0x5f, 0x66, 0x2d, 0x58, 0x40, 0xd0, 0x8b, 0x30, 0xd2, 0x76, 0xb6, - 0xe5, 0xc3, 0x79, 0x2f, 0xf6, 0xa2, 0x06, 0x61, 0x13, 0xcf, 0xfe, 0xa7, 0x16, 0x8c, 0x37, 0x9c, - 0xc4, 0x73, 0xa7, 0x3b, 0xe9, 0x46, 0xc3, 0x4b, 0xd7, 0x3a, 0xee, 0x26, 0x49, 0x79, 0x76, 0x31, - 0x95, 0xb2, 0x93, 0xd0, 0xa9, 0xa4, 0xf6, 0x75, 0x4a, 0xca, 0x1b, 0xa2, 0x1d, 0x2b, 0x0c, 0xf4, - 0x26, 0x8c, 0x44, 0x4e, 0x92, 0xdc, 0x09, 0xe3, 0x26, 0x26, 0xeb, 0xc5, 0xe4, 0xf6, 0xaf, 0x10, - 0x37, 0x26, 0x29, 0x26, 0xeb, 0xe2, 0xa4, 0x55, 0xd3, 0xc7, 0x26, 0x33, 0xfb, 0x0b, 0x16, 0x3c, - 0xd6, 0x20, 0x4e, 0x4c, 0x62, 0x56, 0x0a, 0x40, 0xbd, 0xc8, 0x8c, 0x1f, 0x76, 0x9a, 0xe8, 0x0d, - 0xa8, 0xa5, 0xb4, 0x99, 0x8a, 0x65, 0x15, 0x2b, 0x16, 0x3b, 0x28, 0x5d, 0x15, 0xc4, 0xb1, 0x62, - 0x63, 0xff, 0x65, 0x0b, 0x46, 0xd9, 0x99, 0xd3, 0x2c, 0x49, 0x1d, 0xcf, 0xef, 0xaa, 0x98, 0x63, - 0x0d, 0x58, 0x31, 0xe7, 0x02, 0x54, 0x36, 0xc2, 0x36, 0xc9, 0x9f, 0x97, 0x5e, 0x09, 0xe9, 0xb6, - 0x9a, 0x42, 0xd0, 0xf3, 0xf4, 0xc3, 0x7b, 0x41, 0xea, 0xd0, 0x29, 0x20, 0x7d, 0x9a, 0xa7, 0xf8, - 0x47, 0x57, 0xcd, 0xd8, 0xc4, 0xb1, 0x7f, 0xab, 0x0e, 0xc3, 0xe2, 0x50, 0x7d, 0xe0, 0x0c, 0x73, - 0xb9, 0xbf, 0x2f, 0xf5, 0xdd, 0xdf, 0x27, 0x30, 0xe4, 0xb2, 0x7a, 0x5c, 0xc2, 0x8c, 0xbc, 0x56, - 0x48, 0x14, 0x06, 0x2f, 0xf1, 0xa5, 0xc5, 0xe2, 0xff, 0xb1, 0x60, 0x85, 0xbe, 0x64, 0xc1, 0x29, - 0x37, 0x0c, 0x02, 0xe2, 0x6a, 0x1b, 0xa7, 0x52, 0xc4, 0x61, 0xfb, 0x4c, 0x96, 0xa8, 0x3e, 0xf0, - 0xc8, 0x01, 0x70, 0x9e, 0x3d, 0x7a, 0x19, 0x4e, 0xf0, 0x3e, 0xbb, 0x99, 0x71, 0xc4, 0xea, 0x42, - 0x2a, 0x26, 0x10, 0x67, 0x71, 0xd1, 0x24, 0x77, 0x68, 0x8b, 0x92, 0x25, 0x43, 0xfa, 0xf4, 0xcc, - 0x28, 0x56, 0x62, 0x60, 0xa0, 0x18, 0x50, 0x4c, 0xd6, 0x63, 0x92, 0x6c, 0x88, 0xa0, 0x03, 0x66, - 0x5f, 0x0d, 0x1f, 0x2e, 0x63, 0x15, 0x77, 0x51, 0xc2, 0x3d, 0xa8, 0xa3, 0x4d, 0xb1, 0xc1, 0xac, - 0x15, 0xa1, 0x43, 0xc5, 0x67, 0xee, 0xbb, 0xcf, 0x9c, 0x80, 0x6a, 0xb2, 0xe1, 0xc4, 0x4d, 0x66, - 0xd7, 0x95, 0x79, 0x96, 0xc4, 0x0a, 0x6d, 0xc0, 0xbc, 0x1d, 0xcd, 0xc2, 0xe9, 0x5c, 0x19, 0x98, - 0x44, 0x38, 0x4c, 0x55, 0x68, 0x7f, 0xae, 0x80, 0x4c, 0x82, 0xbb, 0x9e, 0x30, 0x9d, 0x0f, 0x23, - 0xfb, 0x38, 0x1f, 0x76, 0x54, 0x68, 0xdb, 0x28, 0x5b, 0x1f, 0x5f, 0x29, 0xa4, 0x03, 0x06, 0x8a, - 0x63, 0xfb, 0x7c, 0x2e, 0x8e, 0xed, 0x04, 0x13, 0xe0, 0x66, 0x31, 0x02, 0x1c, 0x3c, 0x68, 0xed, - 0x41, 0x06, 0xa1, 0xfd, 0x77, 0x0b, 0xe4, 0x77, 0x9d, 0x71, 0xdc, 0x0d, 0x42, 0x87, 0x0c, 0x7a, - 0x3f, 0x9c, 0x54, 0x5b, 0xe8, 0x99, 0xb0, 0x13, 0xf0, 0xf8, 0xb3, 0xb2, 0x3e, 0x19, 0xc5, 0x19, - 0x28, 0xce, 0x61, 0xa3, 0x29, 0xa8, 0xd3, 0x7e, 0xe2, 0x8f, 0xf2, 0xb5, 0x56, 0x6d, 0xd3, 0xa7, - 0x97, 0xe7, 0xc5, 0x53, 0x1a, 0x07, 0x85, 0x30, 0xe6, 0x3b, 0x49, 0xca, 0x24, 0xa0, 0x3b, 0xea, - 0x43, 0xe6, 0x8b, 0xb3, 0xf8, 0xf1, 0x85, 0x3c, 0x21, 0xdc, 0x4d, 0xdb, 0xfe, 0x76, 0x05, 0x4e, - 0x64, 0x34, 0xe3, 0x01, 0x17, 0xe9, 0x67, 0xa1, 0x26, 0xd7, 0xcd, 0x7c, 0xd5, 0x0a, 0xb5, 0xb8, - 0x2a, 0x0c, 0xba, 0x68, 0xad, 0xe9, 0x55, 0x35, 0x6f, 0x54, 0x18, 0x0b, 0x2e, 0x36, 0xf1, 0x98, - 0x52, 0x4e, 0xfd, 0x64, 0xc6, 0xf7, 0x48, 0x90, 0x72, 0x31, 0x8b, 0x51, 0xca, 0xab, 0x0b, 0x2b, - 0x26, 0x51, 0xad, 0x94, 0x73, 0x00, 0x9c, 0x67, 0x8f, 0x7e, 0xda, 0x82, 0x13, 0xce, 0x9d, 0x44, - 0x17, 0x8d, 0x14, 0x11, 0x6b, 0x47, 0x5c, 0xa4, 0x32, 0x75, 0x28, 0xb9, 0xcb, 0x37, 0xd3, 0x84, - 0xb3, 0x4c, 0xd1, 0x57, 0x2d, 0x40, 0x64, 0x9b, 0xb8, 0x32, 0xa6, 0x4e, 0xc8, 0x32, 0x54, 0xc4, - 0x4e, 0xf3, 0x52, 0x17, 0x5d, 0xae, 0xd5, 0xbb, 0xdb, 0x71, 0x0f, 0x19, 0xec, 0x7f, 0x54, 0x56, - 0x13, 0x4a, 0x87, 0x71, 0x3a, 0x46, 0x38, 0x99, 0x75, 0xf8, 0x70, 0x32, 0x7d, 0x2c, 0xdf, 0x9d, - 0x86, 0x96, 0x49, 0xbf, 0x29, 0x3d, 0xa0, 0xf4, 0x9b, 0x9f, 0xb2, 0x32, 0xf5, 0x59, 0x46, 0x2e, - 0xbe, 0x5a, 0x6c, 0x08, 0xe9, 0x24, 0x0f, 0x19, 0xc8, 0x69, 0xf7, 0x6c, 0xa4, 0x08, 0xd5, 0xa6, - 0x06, 0xda, 0x81, 0xb4, 0xe1, 0xbf, 0x29, 0xc3, 0x88, 0xb1, 0x92, 0xf6, 0x34, 0x8b, 0xac, 0x87, - 0xcc, 0x2c, 0x2a, 0x1d, 0xc0, 0x2c, 0xfa, 0x49, 0xa8, 0xbb, 0x52, 0xcb, 0x17, 0x53, 0xa1, 0x34, - 0xbf, 0x76, 0x68, 0x45, 0xaf, 0x9a, 0xb0, 0xe6, 0x89, 0xe6, 0x32, 0xf9, 0x2b, 0x62, 0x85, 0xa8, - 0xb0, 0x15, 0xa2, 0x57, 0x82, 0x89, 0x58, 0x29, 0xba, 0x9f, 0x61, 0x65, 0x7c, 0x22, 0x4f, 0xbc, - 0x97, 0x0c, 0xf4, 0xe6, 0x65, 0x7c, 0x96, 0xe7, 0x65, 0x33, 0x36, 0x71, 0xec, 0x6f, 0x5b, 0xea, - 0xe3, 0xde, 0x87, 0xa4, 0xf6, 0xdb, 0xd9, 0xa4, 0xf6, 0x4b, 0x85, 0x74, 0x73, 0x9f, 0x6c, 0xf6, - 0xeb, 0x30, 0x3c, 0x13, 0xb6, 0xdb, 0x4e, 0xd0, 0x44, 0x3f, 0x08, 0xc3, 0x2e, 0xff, 0x29, 0x1c, - 0x3b, 0xec, 0x78, 0x50, 0x40, 0xb1, 0x84, 0xa1, 0xc7, 0xa1, 0xe2, 0xc4, 0x2d, 0xe9, 0xcc, 0x61, - 0x11, 0x26, 0xd3, 0x71, 0x2b, 0xc1, 0xac, 0xd5, 0xfe, 0x7b, 0x15, 0x80, 0x99, 0xb0, 0x1d, 0x39, - 0x31, 0x69, 0xae, 0x86, 0xac, 0x42, 0xda, 0xb1, 0x1e, 0xaa, 0xe9, 0xcd, 0xd2, 0xc3, 0x7c, 0xb0, - 0x66, 0x1c, 0xae, 0x94, 0xef, 0xf3, 0xe1, 0x4a, 0x9f, 0xf3, 0xb2, 0xca, 0x43, 0x74, 0x5e, 0x66, - 0x7f, 0xce, 0x02, 0x44, 0x07, 0x4d, 0x18, 0x90, 0x20, 0xd5, 0x07, 0xda, 0x53, 0x50, 0x77, 0x65, - 0xab, 0x30, 0xac, 0xb4, 0x8a, 0x90, 0x00, 0xac, 0x71, 0x06, 0xd8, 0x21, 0x3f, 0x25, 0xf5, 0x77, - 0x39, 0x1b, 0x9c, 0xca, 0xb4, 0xbe, 0x50, 0xe7, 0xf6, 0x6f, 0x97, 0xe0, 0x11, 0xbe, 0x24, 0x2f, - 0x3a, 0x81, 0xd3, 0x22, 0x6d, 0x2a, 0xd5, 0xa0, 0x21, 0x0a, 0x2e, 0xdd, 0x9a, 0x79, 0x32, 0xd8, - 0xf4, 0xa8, 0x73, 0x97, 0xcf, 0x39, 0x3e, 0xcb, 0xe6, 0x03, 0x2f, 0xc5, 0x8c, 0x38, 0x4a, 0xa0, - 0x26, 0x4b, 0x72, 0x0b, 0x5d, 0x5c, 0x10, 0x23, 0xa5, 0x96, 0xc4, 0xba, 0x49, 0xb0, 0x62, 0x44, - 0x0d, 0x57, 0x3f, 0x74, 0x37, 0x31, 0x89, 0x42, 0xa6, 0x77, 0x8d, 0x58, 0xbf, 0x05, 0xd1, 0x8e, - 0x15, 0x86, 0xfd, 0xdb, 0x16, 0xe4, 0x57, 0x24, 0xa3, 0x5c, 0x95, 0x75, 0xcf, 0x72, 0x55, 0x07, - 0xa8, 0x17, 0xf5, 0xe3, 0x30, 0xe2, 0xa4, 0xd4, 0x88, 0xe0, 0xdb, 0xee, 0xf2, 0xe1, 0x8e, 0x35, - 0x16, 0xc3, 0xa6, 0xb7, 0xee, 0xb1, 0xed, 0xb6, 0x49, 0xce, 0xfe, 0xaf, 0x15, 0x18, 0xeb, 0x4a, - 0x89, 0x40, 0x2f, 0xc1, 0xa8, 0x2b, 0x86, 0x47, 0x24, 0x1d, 0x5a, 0x75, 0x33, 0x36, 0x4c, 0xc3, - 0x70, 0x06, 0x73, 0x80, 0x01, 0x3a, 0x0f, 0x67, 0x62, 0xba, 0xd1, 0xef, 0x90, 0xe9, 0xf5, 0x94, - 0xc4, 0x2b, 0xc4, 0x0d, 0x83, 0x26, 0x2f, 0xaa, 0x56, 0x6e, 0x3c, 0xba, 0xb7, 0x3b, 0x71, 0x06, - 0x77, 0x83, 0x71, 0xaf, 0x67, 0x50, 0x04, 0x27, 0x7c, 0xd3, 0x06, 0x14, 0x1b, 0x80, 0x43, 0x99, - 0x8f, 0xca, 0x46, 0xc8, 0x34, 0xe3, 0x2c, 0x83, 0xac, 0x21, 0x59, 0x7d, 0x40, 0x86, 0xe4, 0xa7, - 0xb5, 0x21, 0xc9, 0xcf, 0xdf, 0x3f, 0x5c, 0x70, 0x4a, 0xcc, 0x71, 0x5b, 0x92, 0xaf, 0x40, 0x4d, - 0xc6, 0x26, 0x0d, 0x14, 0xd3, 0x63, 0xd2, 0xe9, 0xa3, 0xd1, 0x9e, 0x86, 0x3f, 0x75, 0x29, 0x8e, - 0x8d, 0xce, 0xbc, 0x1e, 0xa6, 0xd3, 0xbe, 0x1f, 0xde, 0xa1, 0x8b, 0xf4, 0x8d, 0x84, 0x08, 0x0f, - 0x8b, 0x7d, 0xb7, 0x04, 0x3d, 0x36, 0x2b, 0x74, 0x3e, 0x6a, 0xcb, 0x20, 0x33, 0x1f, 0x0f, 0x66, - 0x1d, 0xa0, 0x6d, 0x1e, 0xbf, 0xc5, 0xd7, 0xc0, 0x0f, 0x15, 0xbd, 0xd9, 0xd2, 0x21, 0x5d, 0x2a, - 0xa3, 0x40, 0x85, 0x75, 0x5d, 0x04, 0xd0, 0x06, 0x9d, 0x88, 0x17, 0x57, 0xc7, 0xc3, 0xda, 0xee, - 0xc3, 0x06, 0x16, 0xdd, 0x7b, 0x7b, 0x41, 0x92, 0x3a, 0xbe, 0x7f, 0xc5, 0x0b, 0x52, 0xe1, 0x44, - 0x54, 0x8b, 0xfd, 0xbc, 0x06, 0x61, 0x13, 0xef, 0xfc, 0xfb, 0x8c, 0xef, 0x77, 0x90, 0xef, 0xbe, - 0x01, 0x8f, 0xcd, 0x79, 0xa9, 0xca, 0x72, 0x50, 0xe3, 0x8d, 0xda, 0x6b, 0x2a, 0x6b, 0xc7, 0xea, - 0x9b, 0xb5, 0x63, 0x64, 0x19, 0x94, 0xb2, 0x49, 0x11, 0xf9, 0x2c, 0x03, 0xfb, 0x25, 0x38, 0x3b, - 0xe7, 0xa5, 0x97, 0x3d, 0x9f, 0x1c, 0x90, 0x89, 0xfd, 0x9b, 0x43, 0x30, 0x6a, 0xe6, 0xc9, 0x1d, - 0x24, 0xf1, 0xe8, 0x0b, 0xd4, 0x24, 0x13, 0x6f, 0xe7, 0xa9, 0xc3, 0xb5, 0x5b, 0x47, 0x4e, 0xda, - 0xeb, 0xdd, 0x63, 0x86, 0x55, 0xa6, 0x79, 0x62, 0x53, 0x00, 0x74, 0x07, 0xaa, 0xeb, 0x2c, 0x0a, - 0xbe, 0x5c, 0x44, 0x04, 0x42, 0xaf, 0x1e, 0xd5, 0xd3, 0x91, 0xc7, 0xd1, 0x73, 0x7e, 0x74, 0x25, - 0x8d, 0xb3, 0xa9, 0x55, 0x46, 0xe4, 0xa6, 0x48, 0xaa, 0x52, 0x18, 0xfd, 0x96, 0x84, 0xea, 0x21, - 0x96, 0x84, 0x8c, 0x82, 0x1e, 0x7a, 0x40, 0x0a, 0x9a, 0x65, 0x34, 0xa4, 0x1b, 0xcc, 0xce, 0x13, - 0xa1, 0xe6, 0xc3, 0xac, 0x13, 0x8c, 0x8c, 0x86, 0x0c, 0x18, 0xe7, 0xf1, 0xd1, 0x27, 0x94, 0x8a, - 0xaf, 0x15, 0xe1, 0x7f, 0x35, 0x47, 0xf4, 0x71, 0x6b, 0xf7, 0xcf, 0x95, 0xe0, 0xe4, 0x5c, 0xd0, - 0x59, 0x9e, 0x5b, 0xee, 0xac, 0xf9, 0x9e, 0x7b, 0x8d, 0xec, 0x50, 0x15, 0xbe, 0x49, 0x76, 0xe6, - 0x67, 0xc5, 0x0c, 0x52, 0x63, 0xe6, 0x1a, 0x6d, 0xc4, 0x1c, 0x46, 0x95, 0xd1, 0xba, 0x17, 0xb4, - 0x48, 0x1c, 0xc5, 0x9e, 0x70, 0x8d, 0x1a, 0xca, 0xe8, 0xb2, 0x06, 0x61, 0x13, 0x8f, 0xd2, 0x0e, - 0xef, 0x04, 0x24, 0xce, 0x1b, 0xbc, 0x4b, 0xb4, 0x11, 0x73, 0x18, 0x45, 0x4a, 0xe3, 0x4e, 0x92, - 0x8a, 0xc1, 0xa8, 0x90, 0x56, 0x69, 0x23, 0xe6, 0x30, 0x3a, 0xd3, 0x93, 0xce, 0x1a, 0x0b, 0xf0, - 0xc8, 0xc5, 0xb5, 0xaf, 0xf0, 0x66, 0x2c, 0xe1, 0x14, 0x75, 0x93, 0xec, 0xcc, 0xd2, 0xdd, 0x71, - 0x2e, 0xbd, 0xe5, 0x1a, 0x6f, 0xc6, 0x12, 0xce, 0xaa, 0xc6, 0x65, 0xbb, 0xe3, 0x7b, 0xae, 0x6a, - 0x5c, 0x56, 0xfc, 0x3e, 0xfb, 0xec, 0x5f, 0xb6, 0x60, 0xd4, 0x0c, 0xcb, 0x42, 0xad, 0x9c, 0x2d, - 0xbc, 0xd4, 0x55, 0x74, 0xf4, 0x47, 0x7b, 0xdd, 0xb0, 0xd4, 0xf2, 0xd2, 0x30, 0x4a, 0x9e, 0x23, - 0x41, 0xcb, 0x0b, 0x08, 0x3b, 0x6d, 0xe7, 0xe1, 0x5c, 0x99, 0x98, 0xaf, 0x99, 0xb0, 0x49, 0x0e, - 0x61, 0x4c, 0xdb, 0xb7, 0x60, 0xac, 0x2b, 0xa7, 0x69, 0x00, 0x13, 0x64, 0xdf, 0x8c, 0x52, 0x1b, - 0xc3, 0x08, 0x25, 0x2c, 0x4b, 0xb0, 0xcc, 0xc0, 0x18, 0x9f, 0x48, 0x94, 0xd3, 0x8a, 0xbb, 0x41, - 0xda, 0x2a, 0x4f, 0x8d, 0xf9, 0xe1, 0x6f, 0xe6, 0x81, 0xb8, 0x1b, 0xdf, 0xfe, 0xbc, 0x05, 0x27, - 0x32, 0x69, 0x66, 0x05, 0x19, 0x4b, 0x6c, 0xa6, 0x85, 0x2c, 0x4a, 0x90, 0x85, 0x4a, 0x97, 0xd9, - 0x62, 0xaa, 0x67, 0x9a, 0x06, 0x61, 0x13, 0xcf, 0xfe, 0x72, 0x09, 0x6a, 0x32, 0xd2, 0x62, 0x00, - 0x51, 0x3e, 0x6b, 0xc1, 0x09, 0x75, 0xf6, 0xc1, 0x9c, 0x6a, 0xa5, 0x22, 0x72, 0x02, 0xa8, 0x04, - 0x6a, 0x5b, 0x1e, 0xac, 0x87, 0xda, 0x72, 0xc7, 0x26, 0x33, 0x9c, 0xe5, 0x8d, 0x6e, 0x02, 0x24, - 0x3b, 0x49, 0x4a, 0xda, 0x86, 0x7b, 0xcf, 0x36, 0x66, 0xdc, 0xa4, 0x1b, 0xc6, 0x84, 0xce, 0xaf, - 0xeb, 0x61, 0x93, 0xac, 0x28, 0x4c, 0x6d, 0x42, 0xe9, 0x36, 0x6c, 0x50, 0xb2, 0xff, 0x4e, 0x09, - 0x4e, 0xe7, 0x45, 0x42, 0x1f, 0x86, 0x51, 0xc9, 0xdd, 0xb8, 0x2d, 0x4a, 0x86, 0x97, 0x8c, 0x62, - 0x03, 0x76, 0x77, 0x77, 0x62, 0xa2, 0xfb, 0xb6, 0xae, 0x49, 0x13, 0x05, 0x67, 0x88, 0xf1, 0x03, - 0x28, 0x71, 0x52, 0xda, 0xd8, 0x99, 0x8e, 0x22, 0x71, 0x8a, 0x64, 0x1c, 0x40, 0x99, 0x50, 0x9c, - 0xc3, 0x46, 0xcb, 0x70, 0xd6, 0x68, 0xb9, 0x4e, 0xbc, 0xd6, 0xc6, 0x5a, 0x18, 0xcb, 0x1d, 0xd8, - 0xe3, 0x3a, 0x00, 0xac, 0x1b, 0x07, 0xf7, 0x7c, 0x92, 0xae, 0xf6, 0xae, 0x13, 0x39, 0xae, 0x97, - 0xee, 0x08, 0x7f, 0xa5, 0xd2, 0x4d, 0x33, 0xa2, 0x1d, 0x2b, 0x0c, 0x7b, 0x11, 0x2a, 0x03, 0x8e, - 0xa0, 0x81, 0x2c, 0xff, 0x57, 0xa0, 0x46, 0xc9, 0x49, 0xf3, 0xae, 0x08, 0x92, 0x21, 0xd4, 0xe4, - 0x85, 0x0f, 0xc8, 0x86, 0xb2, 0xe7, 0xc8, 0x33, 0x3e, 0xf5, 0x5a, 0xf3, 0x49, 0xd2, 0x61, 0x9b, - 0x69, 0x0a, 0x44, 0x4f, 0x41, 0x99, 0x6c, 0x47, 0xf9, 0xc3, 0xbc, 0x4b, 0xdb, 0x91, 0x17, 0x93, - 0x84, 0x22, 0x91, 0xed, 0x08, 0x9d, 0x87, 0x92, 0xd7, 0x14, 0x8b, 0x14, 0x08, 0x9c, 0xd2, 0xfc, - 0x2c, 0x2e, 0x79, 0x4d, 0x7b, 0x1b, 0xea, 0xea, 0x86, 0x09, 0xb4, 0x29, 0x75, 0xb7, 0x55, 0x44, - 0x68, 0x94, 0xa4, 0xdb, 0x47, 0x6b, 0x77, 0x00, 0x74, 0xbe, 0x5d, 0x51, 0xfa, 0xe5, 0x02, 0x54, - 0xdc, 0x50, 0xe4, 0x02, 0xd7, 0x34, 0x19, 0xa6, 0xb4, 0x19, 0xc4, 0xbe, 0x05, 0x27, 0xaf, 0x05, - 0xe1, 0x1d, 0x56, 0x42, 0x9b, 0x95, 0xbe, 0xa2, 0x84, 0xd7, 0xe9, 0x8f, 0xbc, 0x89, 0xc0, 0xa0, - 0x98, 0xc3, 0x54, 0x79, 0xa4, 0x52, 0xbf, 0xf2, 0x48, 0xf6, 0x27, 0x2d, 0x38, 0xad, 0xb2, 0x86, - 0xa4, 0x36, 0x7e, 0x09, 0x46, 0xd7, 0x3a, 0x9e, 0xdf, 0x94, 0x05, 0xb5, 0x72, 0xee, 0x8c, 0x86, - 0x01, 0xc3, 0x19, 0x4c, 0xba, 0xa9, 0x5a, 0xf3, 0x02, 0x27, 0xde, 0x59, 0xd6, 0xea, 0x5f, 0x69, - 0x84, 0x86, 0x82, 0x60, 0x03, 0xcb, 0xfe, 0xac, 0x29, 0x82, 0xc8, 0x53, 0x1a, 0xa0, 0x67, 0x6f, - 0x40, 0xd5, 0x55, 0x67, 0xc2, 0x87, 0x2a, 0xfa, 0xa7, 0xf2, 0xd0, 0xd9, 0xb9, 0x00, 0xa7, 0x66, - 0xff, 0xe3, 0x12, 0x9c, 0xc8, 0xd4, 0x36, 0x41, 0x3e, 0xd4, 0x88, 0xcf, 0x5c, 0x7e, 0x72, 0x88, - 0x1d, 0xb5, 0xac, 0xa4, 0x9a, 0x16, 0x97, 0x04, 0x5d, 0xac, 0x38, 0x3c, 0x1c, 0x47, 0x6f, 0x2f, - 0xc1, 0xa8, 0x14, 0xe8, 0x43, 0x4e, 0xdb, 0x17, 0xb3, 0x50, 0x0d, 0x80, 0x4b, 0x06, 0x0c, 0x67, - 0x30, 0xed, 0xdf, 0x29, 0xc3, 0x38, 0xf7, 0x91, 0x36, 0x55, 0x74, 0xcc, 0xa2, 0xb4, 0xb2, 0xfe, - 0xbc, 0xae, 0x40, 0xc4, 0x3b, 0x72, 0xed, 0xa8, 0x55, 0x9c, 0x7b, 0x33, 0x1a, 0x28, 0x6e, 0xe3, - 0x17, 0x73, 0x71, 0x1b, 0x7c, 0xb1, 0x6d, 0x1d, 0x93, 0x44, 0xdf, 0x5b, 0x81, 0x1c, 0x7f, 0xb3, - 0x04, 0xa7, 0x72, 0x25, 0xb2, 0xd1, 0x17, 0xb3, 0xe5, 0x21, 0xad, 0x22, 0x3c, 0x69, 0xf7, 0xac, - 0x9a, 0x7c, 0xb0, 0x22, 0x91, 0x0f, 0x68, 0xaa, 0xd8, 0xbf, 0x57, 0x82, 0x93, 0xd9, 0xda, 0xde, - 0x0f, 0x61, 0x4f, 0xbd, 0x07, 0xea, 0xac, 0x7c, 0x2d, 0xbb, 0x8f, 0x8c, 0x3b, 0xe2, 0x78, 0xc9, - 0x53, 0xd9, 0x88, 0x35, 0xfc, 0xa1, 0xa8, 0xbd, 0x69, 0xff, 0x2d, 0x0b, 0xce, 0xf1, 0xb7, 0xcc, - 0x8f, 0xc3, 0xbf, 0xd0, 0xab, 0x77, 0x5f, 0x2b, 0x56, 0xc0, 0x5c, 0xe5, 0xac, 0xfd, 0xfa, 0x97, - 0xdd, 0x83, 0x24, 0xa4, 0xcd, 0x0e, 0x85, 0x87, 0x50, 0xd8, 0x03, 0x0d, 0x06, 0xfb, 0xf7, 0xca, - 0xa0, 0xaf, 0x7e, 0x42, 0x9e, 0xc8, 0x80, 0x2a, 0xa4, 0x82, 0xd8, 0xca, 0x4e, 0xe0, 0xea, 0x4b, - 0xa6, 0x6a, 0xb9, 0x04, 0xa8, 0x9f, 0xb3, 0x60, 0xc4, 0x0b, 0xbc, 0xd4, 0x73, 0x98, 0xf1, 0x5c, - 0xcc, 0xd5, 0x35, 0x8a, 0xdd, 0x3c, 0xa7, 0x1c, 0xc6, 0xa6, 0xf7, 0x56, 0x31, 0xc3, 0x26, 0x67, - 0xf4, 0x51, 0x11, 0x5a, 0x59, 0x2e, 0x2c, 0x77, 0xaf, 0x96, 0x8b, 0xa7, 0x8c, 0xa0, 0x1a, 0x93, - 0x34, 0x2e, 0x28, 0xe5, 0x15, 0x53, 0x52, 0xaa, 0x18, 0xa5, 0xbe, 0x84, 0x93, 0x36, 0x63, 0xce, - 0xc8, 0x4e, 0x00, 0x75, 0xf7, 0xc5, 0x01, 0xc3, 0xd6, 0xa6, 0xa0, 0xee, 0x74, 0xd2, 0xb0, 0x4d, - 0xbb, 0x49, 0x38, 0x98, 0x75, 0x60, 0x9e, 0x04, 0x60, 0x8d, 0x63, 0x7f, 0xb1, 0x0a, 0xb9, 0x94, - 0x24, 0xb4, 0x6d, 0x5e, 0x5b, 0x66, 0x15, 0x7b, 0x6d, 0x99, 0x12, 0xa6, 0xd7, 0xd5, 0x65, 0xa8, - 0x05, 0xd5, 0x68, 0xc3, 0x49, 0xa4, 0x6d, 0xfc, 0x8a, 0xec, 0xa6, 0x65, 0xda, 0x78, 0x77, 0x77, - 0xe2, 0xc7, 0x06, 0xf3, 0xb5, 0xd0, 0xb1, 0x3a, 0xc5, 0x33, 0xfc, 0x35, 0x6b, 0x46, 0x03, 0x73, - 0xfa, 0x07, 0xb9, 0xbc, 0xe7, 0x53, 0xa2, 0xe0, 0x30, 0x26, 0x49, 0xc7, 0x4f, 0xc5, 0x68, 0x78, - 0xa5, 0xc0, 0x59, 0xc6, 0x09, 0xeb, 0x64, 0x5a, 0xfe, 0x1f, 0x1b, 0x4c, 0xd1, 0x87, 0xa1, 0x9e, - 0xa4, 0x4e, 0x9c, 0x1e, 0x32, 0xfd, 0x4d, 0x75, 0xfa, 0x8a, 0x24, 0x82, 0x35, 0x3d, 0xf4, 0x2a, - 0x2b, 0xa8, 0xe8, 0x25, 0x1b, 0x87, 0x8c, 0x88, 0x96, 0xc5, 0x17, 0x05, 0x05, 0x6c, 0x50, 0xa3, - 0x5b, 0x0f, 0x36, 0xb6, 0x79, 0x18, 0x50, 0x8d, 0xed, 0x2d, 0x95, 0x2a, 0xc4, 0x0a, 0x82, 0x0d, - 0x2c, 0xfb, 0x87, 0x20, 0x9b, 0x0d, 0x8e, 0x26, 0x64, 0xf2, 0x39, 0xf7, 0x3d, 0xb1, 0xc8, 0xe6, - 0x4c, 0x9e, 0xf8, 0xaf, 0x5b, 0x60, 0xa6, 0xac, 0xa3, 0x37, 0x78, 0x6e, 0xbc, 0x55, 0xc4, 0x79, - 0x81, 0x41, 0x77, 0x72, 0xd1, 0x89, 0x72, 0x07, 0x57, 0x32, 0x41, 0xfe, 0xfc, 0xfb, 0xa0, 0x26, - 0xa1, 0x07, 0x32, 0xea, 0x3e, 0x01, 0x67, 0xf2, 0x97, 0xba, 0x0a, 0x5f, 0x73, 0x2b, 0x0e, 0x3b, - 0x51, 0x7e, 0x23, 0xc9, 0x2e, 0xfd, 0xc4, 0x1c, 0x46, 0xb7, 0x63, 0x9b, 0x5e, 0xd0, 0xcc, 0x6f, - 0x24, 0xaf, 0x79, 0x41, 0x13, 0x33, 0xc8, 0x00, 0x97, 0xd7, 0xfd, 0x86, 0x05, 0x17, 0xf6, 0xbb, - 0x7b, 0x16, 0x3d, 0x0e, 0x95, 0x3b, 0x4e, 0x2c, 0x2b, 0xdd, 0x32, 0x45, 0x79, 0xcb, 0x89, 0x03, - 0xcc, 0x5a, 0xd1, 0x0e, 0x0c, 0xf1, 0x58, 0x11, 0x61, 0xad, 0xbf, 0x52, 0xec, 0x4d, 0xb8, 0xd7, - 0x88, 0xb1, 0x5d, 0xe0, 0x71, 0x2a, 0x58, 0x30, 0xb4, 0xbf, 0x63, 0x01, 0x5a, 0xda, 0x22, 0x71, - 0xec, 0x35, 0x8d, 0xe8, 0x16, 0xf4, 0x02, 0x8c, 0xde, 0x5e, 0x59, 0xba, 0xbe, 0x1c, 0x7a, 0x01, - 0xab, 0x0e, 0x61, 0x24, 0xc0, 0x5d, 0x35, 0xda, 0x71, 0x06, 0x0b, 0xcd, 0xc0, 0xd8, 0xed, 0x37, - 0xe8, 0xe6, 0xd7, 0xac, 0xaa, 0x5f, 0xd2, 0xee, 0xce, 0xab, 0xaf, 0xe4, 0x80, 0xb8, 0x1b, 0x1f, - 0x2d, 0xc1, 0xb9, 0x36, 0xdf, 0x6e, 0xf0, 0x62, 0xd8, 0x7c, 0xef, 0xa1, 0xd2, 0x4d, 0x1e, 0xdb, - 0xdb, 0x9d, 0x38, 0xb7, 0xd8, 0x0b, 0x01, 0xf7, 0x7e, 0xce, 0x7e, 0x1f, 0x20, 0x1e, 0xd4, 0x32, - 0xd3, 0x2b, 0x42, 0xa1, 0xef, 0x4e, 0xdc, 0xfe, 0x5a, 0x15, 0x4e, 0xe5, 0xea, 0x20, 0xd2, 0xad, - 0x5e, 0x77, 0x48, 0xc4, 0x91, 0xd7, 0xef, 0x6e, 0xf1, 0x06, 0x0a, 0xb2, 0x08, 0xa0, 0xea, 0x05, - 0x51, 0x27, 0x2d, 0x26, 0xc3, 0x8c, 0x0b, 0x31, 0x4f, 0x09, 0x1a, 0x4e, 0x22, 0xfa, 0x17, 0x73, - 0x36, 0x45, 0x86, 0x6c, 0x64, 0x8c, 0xf1, 0xca, 0x03, 0x72, 0x07, 0x7c, 0x4a, 0x07, 0x50, 0x54, - 0x8b, 0x38, 0xa8, 0xcf, 0x0d, 0x96, 0xe3, 0x3e, 0x60, 0xfb, 0xb5, 0x12, 0x8c, 0x18, 0x1f, 0x0d, - 0xfd, 0x52, 0xb6, 0xa0, 0x8b, 0x55, 0xdc, 0x2b, 0x31, 0xfa, 0x93, 0xba, 0x64, 0x0b, 0x7f, 0xa5, - 0xa7, 0xbb, 0x6b, 0xb9, 0xdc, 0xdd, 0x9d, 0x38, 0x9d, 0xab, 0xd6, 0x92, 0xa9, 0xef, 0x72, 0xfe, - 0xe3, 0x70, 0x2a, 0x47, 0xa6, 0xc7, 0x2b, 0xaf, 0x66, 0xef, 0xec, 0x3d, 0xa2, 0x5b, 0xca, 0xec, - 0xb2, 0xb7, 0x68, 0x97, 0xe9, 0xab, 0xdc, 0x07, 0x70, 0xc7, 0xe5, 0x72, 0xe9, 0x4a, 0x03, 0xe6, - 0xd2, 0x3d, 0x03, 0xb5, 0x28, 0xf4, 0x3d, 0xd7, 0x53, 0xa5, 0xbf, 0x58, 0xf6, 0xde, 0xb2, 0x68, - 0xc3, 0x0a, 0x8a, 0xee, 0x40, 0x5d, 0x5d, 0x6f, 0x2c, 0x82, 0x15, 0x8b, 0x72, 0xf5, 0x2a, 0xa3, - 0x45, 0x5f, 0x5b, 0xac, 0x79, 0x21, 0x1b, 0x86, 0xd8, 0x22, 0x28, 0x03, 0x83, 0x59, 0xa6, 0x27, - 0x5b, 0x1d, 0x13, 0x2c, 0x20, 0xf6, 0x37, 0xea, 0x70, 0xb6, 0x57, 0x31, 0x5a, 0xf4, 0x31, 0x18, - 0xe2, 0x32, 0x16, 0x53, 0xef, 0xbc, 0x17, 0x8f, 0x39, 0x46, 0x50, 0x88, 0xc5, 0x7e, 0x63, 0xc1, - 0x53, 0x70, 0xf7, 0x9d, 0x35, 0x31, 0x42, 0x8e, 0x87, 0xfb, 0x82, 0xa3, 0xb9, 0x2f, 0x38, 0x9c, - 0xbb, 0xef, 0xac, 0xa1, 0x6d, 0xa8, 0xb6, 0xbc, 0x94, 0x38, 0xc2, 0x89, 0x70, 0xeb, 0x58, 0x98, - 0x13, 0x87, 0x5b, 0x69, 0xec, 0x27, 0xe6, 0x0c, 0xd1, 0xd7, 0x2d, 0x38, 0xb5, 0x96, 0x4d, 0x9c, - 0x15, 0xca, 0xd3, 0x39, 0x86, 0x82, 0xc3, 0x59, 0x46, 0xfc, 0xe6, 0x8a, 0x5c, 0x23, 0xce, 0x8b, - 0x83, 0x3e, 0x6d, 0xc1, 0xf0, 0xba, 0xe7, 0x1b, 0xb5, 0x27, 0x8f, 0xe1, 0xe3, 0x5c, 0x66, 0x0c, - 0xf4, 0x8e, 0x83, 0xff, 0x4f, 0xb0, 0xe4, 0xdc, 0x6f, 0xa5, 0x1a, 0x3a, 0xea, 0x4a, 0x35, 0xfc, - 0x80, 0x56, 0xaa, 0xcf, 0x58, 0x50, 0x57, 0x3d, 0x2d, 0x92, 0x21, 0x3f, 0x7c, 0x8c, 0x9f, 0x9c, - 0x7b, 0x4e, 0xd4, 0x5f, 0xac, 0x99, 0xa3, 0x2f, 0x59, 0x30, 0xe2, 0xbc, 0xd9, 0x89, 0x49, 0x93, - 0x6c, 0x85, 0x51, 0x22, 0x6e, 0x8b, 0x7a, 0xad, 0x78, 0x61, 0xa6, 0x29, 0x93, 0x59, 0xb2, 0xb5, - 0x14, 0x25, 0x22, 0x69, 0x41, 0x37, 0x60, 0x53, 0x04, 0x7b, 0xb7, 0x04, 0x13, 0xfb, 0x50, 0x40, - 0x2f, 0xc1, 0x68, 0x18, 0xb7, 0x9c, 0xc0, 0x7b, 0xd3, 0xcc, 0x84, 0x57, 0x56, 0xd6, 0x92, 0x01, - 0xc3, 0x19, 0x4c, 0x33, 0x5d, 0xb3, 0xb4, 0x4f, 0xba, 0xe6, 0x05, 0xa8, 0xc4, 0x24, 0x0a, 0xf3, - 0x9b, 0x05, 0x16, 0x30, 0xcc, 0x20, 0xe8, 0x09, 0x28, 0x3b, 0x91, 0x27, 0xc2, 0x4f, 0xd4, 0x1e, - 0x68, 0x7a, 0x79, 0x1e, 0xd3, 0xf6, 0x4c, 0xf6, 0x78, 0xf5, 0xbe, 0x64, 0x8f, 0xd3, 0x65, 0x40, - 0x9c, 0x5d, 0x0c, 0xe9, 0x65, 0x20, 0x7b, 0xa6, 0x60, 0x7f, 0xb5, 0x0c, 0x4f, 0xdc, 0x73, 0xbc, - 0xe8, 0xe8, 0x1b, 0xeb, 0x1e, 0xd1, 0x37, 0xb2, 0x7b, 0x4a, 0xfb, 0x75, 0x4f, 0xb9, 0x4f, 0xf7, - 0x7c, 0x9a, 0x4e, 0x03, 0x59, 0x41, 0xa0, 0x98, 0x8b, 0x8b, 0xfa, 0x15, 0x24, 0x10, 0x33, 0x40, - 0x42, 0xb1, 0xe6, 0x4b, 0xf7, 0x00, 0x99, 0x54, 0xc5, 0x6a, 0x11, 0xcb, 0x40, 0xdf, 0x8a, 0x02, - 0x7c, 0xec, 0xf7, 0xcb, 0x7f, 0xb4, 0x7f, 0xbe, 0x04, 0x4f, 0x0d, 0xa0, 0xbd, 0xcd, 0x51, 0x6c, - 0x0d, 0x38, 0x8a, 0xbf, 0xb7, 0x3f, 0x93, 0xfd, 0x17, 0x2d, 0x38, 0xdf, 0x7f, 0xf1, 0x40, 0xcf, - 0xc3, 0xc8, 0x5a, 0xec, 0x04, 0xee, 0x06, 0xbb, 0x8c, 0x4d, 0x76, 0x0a, 0xeb, 0x6b, 0xdd, 0x8c, - 0x4d, 0x1c, 0xba, 0xbd, 0xe5, 0x05, 0xe0, 0x0d, 0x0c, 0x99, 0x5a, 0x46, 0xb7, 0xb7, 0xab, 0x79, - 0x20, 0xee, 0xc6, 0xb7, 0xff, 0xa4, 0xd4, 0x5b, 0x2c, 0x6e, 0x64, 0x1c, 0xe4, 0x3b, 0x89, 0xaf, - 0x50, 0x1a, 0x40, 0x97, 0x94, 0xef, 0xb7, 0x2e, 0xa9, 0xf4, 0xd3, 0x25, 0x68, 0x16, 0x4e, 0x1b, - 0xf7, 0x16, 0xf0, 0x74, 0x41, 0x1e, 0x66, 0xa7, 0x72, 0xe8, 0x97, 0x73, 0x70, 0xdc, 0xf5, 0x04, - 0x7a, 0x16, 0x6a, 0x5e, 0x90, 0x10, 0xb7, 0x13, 0xf3, 0xf0, 0x4e, 0x23, 0x45, 0x63, 0x5e, 0xb4, - 0x63, 0x85, 0x61, 0xff, 0x72, 0x09, 0x1e, 0xeb, 0x6b, 0x67, 0xdd, 0x27, 0xdd, 0x65, 0x7e, 0x8e, - 0xca, 0xfd, 0xf9, 0x1c, 0x66, 0x27, 0x55, 0xf7, 0xed, 0xa4, 0xdf, 0xef, 0x3f, 0x30, 0xa9, 0xcd, - 0xfd, 0x7d, 0xdb, 0x4b, 0x2f, 0xc3, 0x09, 0x27, 0x8a, 0x38, 0x1e, 0x8b, 0xd2, 0xca, 0xd5, 0xd0, - 0x98, 0x36, 0x81, 0x38, 0x8b, 0x3b, 0xd0, 0xea, 0xf9, 0x87, 0x16, 0xd4, 0x31, 0x59, 0xe7, 0xda, - 0x01, 0xdd, 0x16, 0x5d, 0x64, 0x15, 0x51, 0x6d, 0x8f, 0x76, 0x6c, 0xe2, 0xb1, 0x2a, 0x74, 0xbd, - 0x3a, 0xbb, 0xfb, 0x7e, 0x8b, 0xd2, 0x81, 0xee, 0xb7, 0x50, 0x37, 0x1c, 0x94, 0xfb, 0xdf, 0x70, - 0x60, 0xbf, 0x35, 0x4c, 0x5f, 0x2f, 0x0a, 0x67, 0x62, 0xd2, 0x4c, 0xe8, 0xf7, 0xed, 0xc4, 0xbe, - 0x18, 0x24, 0xea, 0xfb, 0xde, 0xc0, 0x0b, 0x98, 0xb6, 0x67, 0x8e, 0x62, 0x4a, 0x07, 0xaa, 0x20, - 0x50, 0xde, 0xb7, 0x82, 0xc0, 0xcb, 0x70, 0x22, 0x49, 0x36, 0x96, 0x63, 0x6f, 0xcb, 0x49, 0xc9, - 0x35, 0xb2, 0x23, 0xac, 0x2c, 0x9d, 0xf5, 0xbb, 0x72, 0x45, 0x03, 0x71, 0x16, 0x17, 0xcd, 0xc1, - 0x98, 0xce, 0xe3, 0x27, 0x71, 0xca, 0x62, 0x7a, 0xf9, 0x48, 0x50, 0x29, 0x7e, 0x3a, 0xf3, 0x5f, - 0x20, 0xe0, 0xee, 0x67, 0xa8, 0x7e, 0xcb, 0x34, 0x52, 0x41, 0x86, 0xb2, 0xfa, 0x2d, 0x43, 0x87, - 0xca, 0xd2, 0xf5, 0x04, 0x5a, 0x84, 0x33, 0x7c, 0x60, 0x4c, 0x47, 0x91, 0xf1, 0x46, 0xc3, 0xd9, - 0x2a, 0x67, 0x73, 0xdd, 0x28, 0xb8, 0xd7, 0x73, 0xe8, 0x45, 0x18, 0x51, 0xcd, 0xf3, 0xb3, 0xe2, - 0x14, 0x41, 0x79, 0x31, 0x14, 0x99, 0xf9, 0x26, 0x36, 0xf1, 0xd0, 0x87, 0xe0, 0x51, 0xfd, 0x97, - 0x27, 0x7e, 0xf0, 0xa3, 0xb5, 0x59, 0x51, 0x22, 0x45, 0xd5, 0xd3, 0x9f, 0xeb, 0x89, 0xd6, 0xc4, - 0xfd, 0x9e, 0x47, 0x6b, 0x70, 0x5e, 0x81, 0x2e, 0x05, 0x29, 0x8b, 0xe2, 0x4e, 0x48, 0xc3, 0x49, - 0xc8, 0x8d, 0xd8, 0x67, 0x45, 0x55, 0xea, 0xfa, 0xaa, 0xb3, 0x39, 0x2f, 0xbd, 0xd2, 0x0b, 0x13, - 0x2f, 0xe0, 0x7b, 0x50, 0x41, 0x53, 0x50, 0x27, 0x81, 0xb3, 0xe6, 0x93, 0xa5, 0x99, 0x79, 0x56, - 0x6a, 0xc5, 0x38, 0xc9, 0xbb, 0x24, 0x01, 0x58, 0xe3, 0xa8, 0xb8, 0xb2, 0xd1, 0xbe, 0xd7, 0xee, - 0x2d, 0xc3, 0xd9, 0x96, 0x1b, 0x51, 0xdb, 0xc3, 0x73, 0xc9, 0xb4, 0xcb, 0x62, 0xab, 0xe8, 0x87, - 0xe1, 0xe5, 0xe7, 0x54, 0xd0, 0xe4, 0xdc, 0xcc, 0x72, 0x17, 0x0e, 0xee, 0xf9, 0x24, 0x9d, 0x63, - 0x51, 0x1c, 0x6e, 0xef, 0x8c, 0x9f, 0xc9, 0xce, 0xb1, 0x65, 0xda, 0x88, 0x39, 0x0c, 0x5d, 0x05, - 0xc4, 0x22, 0x70, 0xaf, 0xa4, 0x69, 0xa4, 0x8c, 0x9d, 0xf1, 0xb3, 0xec, 0x95, 0xce, 0x8b, 0x27, - 0xd0, 0xe5, 0x2e, 0x0c, 0xdc, 0xe3, 0x29, 0xfb, 0xdf, 0x5a, 0x70, 0x42, 0xcd, 0xd7, 0xfb, 0x10, - 0x83, 0xee, 0x67, 0x63, 0xd0, 0xe7, 0x8e, 0xae, 0xf1, 0x98, 0xe4, 0x7d, 0x02, 0x19, 0x7f, 0x66, - 0x04, 0x40, 0x6b, 0x45, 0xb5, 0x20, 0x59, 0x7d, 0x17, 0xa4, 0x87, 0x56, 0x23, 0xf5, 0xaa, 0xab, - 0x50, 0x7d, 0xb0, 0x75, 0x15, 0x56, 0xe0, 0x9c, 0x34, 0x17, 0xf8, 0x59, 0xd1, 0x95, 0x30, 0x51, - 0x0a, 0xae, 0xd6, 0x78, 0x42, 0x10, 0x3a, 0x37, 0xdf, 0x0b, 0x09, 0xf7, 0x7e, 0x36, 0x63, 0xa5, - 0x0c, 0xef, 0x67, 0xa5, 0xe8, 0x39, 0xbd, 0xb0, 0x2e, 0x0b, 0xe7, 0xe7, 0xe6, 0xf4, 0xc2, 0xe5, - 0x15, 0xac, 0x71, 0x7a, 0x2b, 0xf6, 0x7a, 0x41, 0x8a, 0x1d, 0x0e, 0xac, 0xd8, 0xa5, 0x8a, 0x19, - 0xe9, 0xab, 0x62, 0xa4, 0x4f, 0x7a, 0xb4, 0xaf, 0x4f, 0xfa, 0xfd, 0x70, 0xd2, 0x0b, 0x36, 0x48, - 0xec, 0xa5, 0xa4, 0xc9, 0xe6, 0x02, 0x53, 0x3f, 0x35, 0xbd, 0xac, 0xcf, 0x67, 0xa0, 0x38, 0x87, - 0x9d, 0xd5, 0x8b, 0x27, 0x07, 0xd0, 0x8b, 0x7d, 0x56, 0xa3, 0x53, 0xc5, 0xac, 0x46, 0xa7, 0x8f, - 0xbe, 0x1a, 0x8d, 0x1d, 0xeb, 0x6a, 0x84, 0x0a, 0x59, 0x8d, 0x06, 0x52, 0xf4, 0xc6, 0xf6, 0xef, - 0xec, 0x3e, 0xdb, 0xbf, 0x7e, 0x4b, 0xd1, 0xb9, 0x43, 0x2f, 0x45, 0xbd, 0x57, 0x99, 0x47, 0x0e, - 0xb5, 0xca, 0x7c, 0xa6, 0x04, 0xe7, 0xb4, 0x1e, 0xa6, 0xa3, 0xdf, 0x5b, 0xa7, 0x9a, 0x88, 0xdd, - 0xbd, 0xc2, 0xcf, 0x6d, 0x8c, 0x94, 0x08, 0x9d, 0x5d, 0xa1, 0x20, 0xd8, 0xc0, 0x62, 0x99, 0x05, - 0x24, 0x66, 0x45, 0x36, 0xf3, 0x4a, 0x7a, 0x46, 0xb4, 0x63, 0x85, 0x41, 0xc7, 0x17, 0xfd, 0x2d, - 0xb2, 0xb5, 0xf2, 0xa5, 0xa4, 0x66, 0x34, 0x08, 0x9b, 0x78, 0xe8, 0x19, 0xce, 0x84, 0x29, 0x08, - 0xaa, 0xa8, 0x47, 0xc5, 0x65, 0x8c, 0x52, 0x27, 0x28, 0xa8, 0x14, 0x87, 0xa5, 0x90, 0x54, 0xbb, - 0xc5, 0x61, 0x21, 0x50, 0x0a, 0xc3, 0xfe, 0x6f, 0x16, 0x3c, 0xd6, 0xb3, 0x2b, 0xee, 0xc3, 0xe2, - 0xbb, 0x9d, 0x5d, 0x7c, 0x57, 0x8a, 0xda, 0x6e, 0x18, 0x6f, 0xd1, 0x67, 0x21, 0xfe, 0xd7, 0x16, - 0x9c, 0xd4, 0xf8, 0xf7, 0xe1, 0x55, 0xbd, 0xec, 0xab, 0x16, 0xb7, 0xb3, 0xaa, 0x77, 0xbd, 0xdb, - 0xef, 0x94, 0x40, 0x95, 0x77, 0x9b, 0x76, 0x65, 0xf1, 0xcc, 0x7d, 0x4e, 0x12, 0x77, 0x60, 0x88, - 0x1d, 0x84, 0x26, 0xc5, 0x04, 0x79, 0x64, 0xf9, 0xb3, 0x43, 0x55, 0x7d, 0xc8, 0xcc, 0xfe, 0x26, - 0x58, 0x30, 0x64, 0x25, 0x60, 0xbd, 0x84, 0x6a, 0xf3, 0xa6, 0x48, 0xc6, 0xd0, 0x25, 0x60, 0x45, - 0x3b, 0x56, 0x18, 0x74, 0x79, 0xf0, 0xdc, 0x30, 0x98, 0xf1, 0x9d, 0x44, 0x5e, 0x38, 0xa6, 0x96, - 0x87, 0x79, 0x09, 0xc0, 0x1a, 0x87, 0x9d, 0x91, 0x7a, 0x49, 0xe4, 0x3b, 0x3b, 0xc6, 0xfe, 0xd9, - 0xc8, 0x4a, 0x56, 0x20, 0x6c, 0xe2, 0xd9, 0x6d, 0x18, 0xcf, 0xbe, 0xc4, 0x2c, 0x59, 0x67, 0x01, - 0x8a, 0x03, 0x75, 0xe7, 0x14, 0xd4, 0x1d, 0xf6, 0xd4, 0x42, 0xc7, 0xc9, 0xdf, 0x13, 0x3c, 0x2d, - 0x01, 0x58, 0xe3, 0xd8, 0xbf, 0x6a, 0xc1, 0x99, 0x1e, 0x9d, 0x56, 0x60, 0xb2, 0x4b, 0xaa, 0xb5, - 0x4d, 0xaf, 0x85, 0xfd, 0xdd, 0x30, 0xdc, 0x24, 0xeb, 0x8e, 0x0c, 0x81, 0x33, 0x74, 0xfb, 0x2c, - 0x6f, 0xc6, 0x12, 0x6e, 0xff, 0x17, 0x0b, 0x4e, 0x65, 0x65, 0x4d, 0xa8, 0x76, 0xe6, 0x2f, 0x33, - 0xeb, 0x25, 0x6e, 0xb8, 0x45, 0xe2, 0x1d, 0xfa, 0xe6, 0x5c, 0x6a, 0xa5, 0x9d, 0xa7, 0xbb, 0x30, - 0x70, 0x8f, 0xa7, 0x58, 0x71, 0xc7, 0xa6, 0xea, 0x6d, 0x39, 0x22, 0x6f, 0x16, 0x39, 0x22, 0xf5, - 0xc7, 0x34, 0x8f, 0xcb, 0x15, 0x4b, 0x6c, 0xf2, 0xb7, 0xbf, 0x53, 0x01, 0x95, 0x0d, 0xc7, 0xe2, - 0x8f, 0x0a, 0x8a, 0xde, 0xca, 0xdc, 0x8d, 0x54, 0x1e, 0xe0, 0x6e, 0x24, 0x39, 0x18, 0x2a, 0xf7, - 0x0a, 0x08, 0xe0, 0x5e, 0x12, 0xd3, 0x75, 0xa9, 0xde, 0x70, 0x55, 0x83, 0xb0, 0x89, 0x47, 0x25, - 0xf1, 0xbd, 0x2d, 0xc2, 0x1f, 0x1a, 0xca, 0x4a, 0xb2, 0x20, 0x01, 0x58, 0xe3, 0x50, 0x49, 0x9a, - 0xde, 0xfa, 0xba, 0xd8, 0xf2, 0x2b, 0x49, 0x68, 0xef, 0x60, 0x06, 0xe1, 0xf5, 0x7a, 0xc3, 0x4d, - 0x61, 0x05, 0x1b, 0xf5, 0x7a, 0xc3, 0x4d, 0xcc, 0x20, 0xd4, 0x6e, 0x0b, 0xc2, 0xb8, 0xcd, 0xee, - 0x71, 0x6e, 0x2a, 0x2e, 0xc2, 0xfa, 0x55, 0x76, 0xdb, 0xf5, 0x6e, 0x14, 0xdc, 0xeb, 0x39, 0x3a, - 0x02, 0xa3, 0x98, 0x34, 0x3d, 0x37, 0x35, 0xa9, 0x41, 0x76, 0x04, 0x2e, 0x77, 0x61, 0xe0, 0x1e, - 0x4f, 0xa1, 0x69, 0x38, 0x25, 0xb3, 0x19, 0x65, 0xad, 0x8a, 0x91, 0x6c, 0x6e, 0x3c, 0xce, 0x82, - 0x71, 0x1e, 0x9f, 0x6a, 0xb5, 0xb6, 0x28, 0x67, 0xc3, 0x8c, 0x65, 0x43, 0xab, 0xc9, 0x32, 0x37, - 0x58, 0x61, 0xd8, 0x9f, 0x2a, 0xd3, 0x55, 0xb8, 0x4f, 0x19, 0xa7, 0xfb, 0x16, 0x2d, 0x98, 0x1d, - 0x91, 0x95, 0x01, 0x46, 0xe4, 0x0b, 0x30, 0x7a, 0x3b, 0x09, 0x03, 0x15, 0x89, 0x57, 0xed, 0x1b, - 0x89, 0x67, 0x60, 0xf5, 0x8e, 0xc4, 0x1b, 0x2a, 0x2a, 0x12, 0x6f, 0xf8, 0x90, 0x91, 0x78, 0xdf, - 0xaa, 0x82, 0xba, 0x38, 0xe0, 0x3a, 0x49, 0xef, 0x84, 0xf1, 0xa6, 0x17, 0xb4, 0x58, 0x16, 0xe8, - 0xd7, 0x2d, 0x18, 0xe5, 0xf3, 0x65, 0xc1, 0xcc, 0xa4, 0x5a, 0x2f, 0xa8, 0x22, 0x7d, 0x86, 0xd9, - 0xe4, 0xaa, 0xc1, 0x28, 0x77, 0xdf, 0x9d, 0x09, 0xc2, 0x19, 0x89, 0xd0, 0xc7, 0x01, 0xa4, 0x7f, - 0x74, 0x5d, 0xaa, 0xcc, 0xf9, 0x62, 0xe4, 0xc3, 0x64, 0x5d, 0xdb, 0xc0, 0xab, 0x8a, 0x09, 0x36, - 0x18, 0xa2, 0xcf, 0xe4, 0xef, 0xb9, 0xff, 0xe8, 0xb1, 0xf4, 0xcd, 0x20, 0x39, 0x66, 0x18, 0x86, - 0xbd, 0xa0, 0x45, 0xc7, 0x89, 0x88, 0x58, 0x7a, 0x57, 0xaf, 0x0c, 0xea, 0x85, 0xd0, 0x69, 0x36, - 0x1c, 0xdf, 0x09, 0x5c, 0x12, 0xcf, 0x73, 0x74, 0xf3, 0x96, 0x57, 0xd6, 0x80, 0x25, 0xa1, 0xae, - 0x2b, 0x17, 0xaa, 0x83, 0x5c, 0xb9, 0x70, 0xfe, 0x03, 0x30, 0xd6, 0xf5, 0x31, 0x0f, 0x94, 0x52, - 0x76, 0xf8, 0x6c, 0x34, 0xfb, 0x9f, 0x0c, 0xe9, 0x45, 0xeb, 0x7a, 0xd8, 0xe4, 0x85, 0xff, 0x63, - 0xfd, 0x45, 0x85, 0x8d, 0x5b, 0xe0, 0x10, 0x31, 0x6e, 0x8a, 0x55, 0x8d, 0xd8, 0x64, 0x49, 0xc7, - 0x68, 0xe4, 0xc4, 0x24, 0x38, 0xee, 0x31, 0xba, 0xac, 0x98, 0x60, 0x83, 0x21, 0xda, 0xc8, 0xe4, - 0x94, 0x5c, 0x3e, 0x7a, 0x4e, 0x09, 0xab, 0x2d, 0xd3, 0xab, 0x56, 0xf7, 0x97, 0x2c, 0x38, 0x19, - 0x64, 0x46, 0x6e, 0x31, 0x61, 0xa4, 0xbd, 0x67, 0x05, 0xbf, 0x77, 0x26, 0xdb, 0x86, 0x73, 0xfc, - 0x7b, 0x2d, 0x69, 0xd5, 0x03, 0x2e, 0x69, 0xfa, 0x06, 0x91, 0xa1, 0x7e, 0x37, 0x88, 0xa0, 0x40, - 0x5d, 0xa1, 0x34, 0x5c, 0xf8, 0x15, 0x4a, 0xd0, 0xe3, 0xfa, 0xa4, 0x5b, 0x50, 0x77, 0x63, 0xe2, - 0xa4, 0x87, 0xbc, 0x4d, 0x87, 0x1d, 0xd0, 0xcf, 0x48, 0x02, 0x58, 0xd3, 0xb2, 0xff, 0x57, 0x05, - 0x4e, 0xcb, 0x1e, 0x91, 0x21, 0xe8, 0x74, 0x7d, 0xe4, 0x7c, 0xb5, 0x71, 0xab, 0xd6, 0xc7, 0x2b, - 0x12, 0x80, 0x35, 0x0e, 0xb5, 0xc7, 0x3a, 0x09, 0x59, 0x8a, 0x48, 0xb0, 0xe0, 0xad, 0x25, 0xe2, - 0x9c, 0x53, 0x4d, 0x94, 0x1b, 0x1a, 0x84, 0x4d, 0x3c, 0x6a, 0x8c, 0x73, 0xbb, 0x38, 0xc9, 0xa7, - 0xaf, 0x08, 0x7b, 0x1b, 0x4b, 0x38, 0xfa, 0x85, 0x9e, 0x75, 0x25, 0x8b, 0x49, 0xdc, 0xea, 0x8a, - 0xbc, 0x3f, 0xe0, 0x05, 0x6c, 0x7f, 0xdd, 0x82, 0x73, 0xbc, 0x55, 0xf6, 0xe4, 0x8d, 0xa8, 0xe9, - 0xa4, 0x24, 0x29, 0xa6, 0xce, 0x73, 0x0f, 0xf9, 0xb4, 0x93, 0xb7, 0x17, 0x5b, 0xdc, 0x5b, 0x1a, - 0xf4, 0x45, 0x0b, 0x4e, 0x6d, 0x66, 0x32, 0xfd, 0xe5, 0xd2, 0x71, 0xc4, 0x9a, 0x34, 0xd9, 0xf2, - 0x01, 0x7a, 0xaa, 0x65, 0xdb, 0x13, 0x9c, 0xe7, 0x6e, 0xff, 0x89, 0x05, 0xa6, 0x1a, 0x1d, 0xcc, - 0x02, 0x34, 0xae, 0xbc, 0x2d, 0xed, 0x73, 0xe5, 0xad, 0x34, 0x16, 0xcb, 0x83, 0x6d, 0x4e, 0x2a, - 0x07, 0xd8, 0x9c, 0x54, 0xfb, 0x5a, 0x97, 0x4f, 0x40, 0xb9, 0xe3, 0x35, 0xc5, 0xfe, 0x42, 0x9f, - 0xbe, 0xce, 0xcf, 0x62, 0xda, 0x6e, 0xff, 0xc3, 0xaa, 0xf6, 0x5b, 0x88, 0xbc, 0xa8, 0xef, 0x8b, - 0xd7, 0x5e, 0x57, 0x25, 0x86, 0xf8, 0x9b, 0x5f, 0xef, 0x2a, 0x31, 0xf4, 0x23, 0x07, 0x4f, 0x7b, - 0xe3, 0x1d, 0xd4, 0xaf, 0xc2, 0xd0, 0xf0, 0x3e, 0x39, 0x6f, 0xb7, 0xa1, 0x46, 0xb7, 0x60, 0xcc, - 0x01, 0x59, 0xcb, 0x08, 0x55, 0xbb, 0x22, 0xda, 0xef, 0xee, 0x4e, 0xfc, 0xf0, 0xc1, 0xc5, 0x92, - 0x4f, 0x63, 0x45, 0x1f, 0x25, 0x50, 0xa7, 0xbf, 0x59, 0x7a, 0x9e, 0xd8, 0xdc, 0xdd, 0x50, 0x3a, - 0x53, 0x02, 0x0a, 0xc9, 0xfd, 0xd3, 0x7c, 0x50, 0x00, 0x75, 0x76, 0x57, 0x25, 0x63, 0xca, 0xf7, - 0x80, 0xcb, 0x2a, 0x49, 0x4e, 0x02, 0xee, 0xee, 0x4e, 0xbc, 0x7c, 0x70, 0xa6, 0xea, 0x71, 0xac, - 0x59, 0xd8, 0x5f, 0xae, 0xe8, 0xb1, 0x2b, 0x2a, 0x4b, 0x7d, 0x5f, 0x8c, 0xdd, 0x97, 0x72, 0x63, - 0xf7, 0x42, 0xd7, 0xd8, 0x3d, 0xa9, 0xef, 0x54, 0xcc, 0x8c, 0xc6, 0xfb, 0x6d, 0x08, 0xec, 0xef, - 0x6f, 0x60, 0x16, 0xd0, 0x1b, 0x1d, 0x2f, 0x26, 0xc9, 0x72, 0xdc, 0x09, 0xbc, 0xa0, 0x25, 0xee, - 0xca, 0x37, 0x2c, 0xa0, 0x0c, 0x18, 0xe7, 0xf1, 0xd9, 0x3d, 0xfb, 0x3b, 0x81, 0x7b, 0xcb, 0xd9, - 0xe2, 0xa3, 0xca, 0x28, 0xb6, 0xb3, 0x22, 0xda, 0xb1, 0xc2, 0xb0, 0xdf, 0x62, 0x67, 0xd9, 0x46, - 0x5e, 0x30, 0x1d, 0x13, 0x3e, 0xbb, 0x1c, 0x94, 0x57, 0xea, 0x51, 0x63, 0x82, 0xdf, 0x08, 0xca, - 0x61, 0xe8, 0x0e, 0x0c, 0xaf, 0xf1, 0xdb, 0xb1, 0x8a, 0xa9, 0x5e, 0x2c, 0xae, 0xda, 0x62, 0x77, - 0x20, 0xc8, 0x7b, 0xb7, 0xee, 0xea, 0x9f, 0x58, 0x72, 0xb3, 0xbf, 0x59, 0x81, 0x53, 0xb9, 0xeb, - 0x23, 0x33, 0x35, 0x12, 0x4b, 0xfb, 0xd6, 0x48, 0xfc, 0x08, 0x40, 0x93, 0x44, 0x7e, 0xb8, 0xc3, - 0xcc, 0xb1, 0xca, 0x81, 0xcd, 0x31, 0x65, 0xc1, 0xcf, 0x2a, 0x2a, 0xd8, 0xa0, 0x28, 0xca, 0x13, - 0xf1, 0x92, 0x8b, 0xb9, 0xf2, 0x44, 0x46, 0x8d, 0xf3, 0xa1, 0xfb, 0x5b, 0xe3, 0xdc, 0x83, 0x53, - 0x5c, 0x44, 0x95, 0x7d, 0x7b, 0x88, 0x24, 0x5b, 0x96, 0xbf, 0x30, 0x9b, 0x25, 0x83, 0xf3, 0x74, - 0x1f, 0xe4, 0xed, 0xb0, 0xe8, 0x3d, 0x50, 0x97, 0xdf, 0x39, 0x19, 0xaf, 0xeb, 0x0a, 0x06, 0x72, - 0x18, 0xb0, 0x5b, 0x5b, 0xc5, 0x4f, 0xfb, 0x0b, 0x25, 0x6a, 0x3d, 0xf3, 0x7f, 0xaa, 0x12, 0xcd, - 0xd3, 0x30, 0xe4, 0x74, 0xd2, 0x8d, 0xb0, 0xeb, 0x86, 0xad, 0x69, 0xd6, 0x8a, 0x05, 0x14, 0x2d, - 0x40, 0xa5, 0xa9, 0xab, 0x8b, 0x1c, 0xa4, 0x17, 0xb5, 0x23, 0xd2, 0x49, 0x09, 0x66, 0x54, 0xd0, - 0xe3, 0x50, 0x49, 0x9d, 0x96, 0x4c, 0x74, 0x62, 0xc9, 0xad, 0xab, 0x4e, 0x2b, 0xc1, 0xac, 0xd5, - 0x5c, 0x34, 0x2b, 0xfb, 0x2c, 0x9a, 0x2f, 0xc3, 0x89, 0xc4, 0x6b, 0x05, 0x4e, 0xda, 0x89, 0x89, - 0x71, 0xb8, 0xa6, 0xe3, 0x25, 0x4c, 0x20, 0xce, 0xe2, 0xda, 0xbf, 0x39, 0x0a, 0x67, 0x57, 0x66, - 0x16, 0x65, 0xa5, 0xdc, 0x63, 0xcb, 0x55, 0xea, 0xc5, 0xe3, 0xfe, 0xe5, 0x2a, 0xf5, 0xe1, 0xee, - 0x1b, 0xb9, 0x4a, 0xbe, 0x91, 0xab, 0x94, 0x4d, 0x1c, 0x29, 0x17, 0x91, 0x38, 0xd2, 0x4b, 0x82, - 0x41, 0x12, 0x47, 0x8e, 0x2d, 0x79, 0xe9, 0x9e, 0x02, 0x1d, 0x28, 0x79, 0x49, 0x65, 0x76, 0x15, - 0x12, 0xd2, 0xdf, 0xe7, 0x53, 0xf5, 0xcc, 0xec, 0x52, 0x59, 0x35, 0x3c, 0x5d, 0x45, 0x28, 0xd8, - 0xd7, 0x8a, 0x17, 0x60, 0x80, 0xac, 0x1a, 0x91, 0x31, 0x63, 0x66, 0x72, 0x0d, 0x17, 0x91, 0xc9, - 0xd5, 0x4b, 0x9c, 0x7d, 0x33, 0xb9, 0x5e, 0x86, 0x13, 0xae, 0x1f, 0x06, 0x64, 0x39, 0x0e, 0xd3, - 0xd0, 0x0d, 0x7d, 0x61, 0x4c, 0x2b, 0x95, 0x30, 0x63, 0x02, 0x71, 0x16, 0xb7, 0x5f, 0x1a, 0x58, - 0xfd, 0xa8, 0x69, 0x60, 0xf0, 0x80, 0xd2, 0xc0, 0x7e, 0x56, 0x27, 0x2c, 0x8f, 0xb0, 0x2f, 0xf2, - 0x91, 0xe2, 0xbf, 0xc8, 0x20, 0x59, 0xcb, 0xe8, 0xab, 0xfc, 0x8a, 0x2b, 0x6a, 0x8e, 0xce, 0x84, - 0x6d, 0x6a, 0x6e, 0x8d, 0xb2, 0x2e, 0x79, 0xfd, 0x18, 0x06, 0xec, 0xad, 0x15, 0xcd, 0x46, 0x5d, - 0x7b, 0xa5, 0x9b, 0x70, 0x56, 0x90, 0xa3, 0x24, 0x54, 0x7f, 0xad, 0x04, 0x3f, 0xb0, 0xaf, 0x08, - 0xe8, 0x0e, 0x40, 0xea, 0xb4, 0xc4, 0x40, 0x15, 0xc7, 0x14, 0x47, 0x0c, 0x6a, 0x5c, 0x95, 0xf4, - 0x78, 0x25, 0x10, 0xf5, 0x97, 0x1d, 0x00, 0xc8, 0xdf, 0x2c, 0x96, 0x31, 0xf4, 0xbb, 0xaa, 0x1e, - 0xe2, 0xd0, 0x27, 0x98, 0x41, 0xe8, 0xf2, 0x1f, 0x93, 0x96, 0xbe, 0x93, 0x55, 0x7d, 0x3e, 0xcc, - 0x5a, 0xb1, 0x80, 0xa2, 0x17, 0x61, 0xc4, 0xf1, 0x7d, 0x9e, 0x95, 0x42, 0x12, 0x71, 0xc7, 0x85, - 0xae, 0xdc, 0xa6, 0x41, 0xd8, 0xc4, 0xb3, 0xff, 0xb8, 0x04, 0x13, 0xfb, 0xe8, 0x94, 0xae, 0x3c, - 0xbb, 0xea, 0xc0, 0x79, 0x76, 0x22, 0x33, 0x60, 0xa8, 0x4f, 0x66, 0xc0, 0x8b, 0x30, 0x92, 0x12, - 0xa7, 0x2d, 0xc2, 0xa0, 0xc4, 0xfe, 0x5b, 0x9f, 0xbb, 0x6a, 0x10, 0x36, 0xf1, 0xa8, 0x16, 0x3b, - 0xe9, 0xb8, 0x2e, 0x49, 0x12, 0x19, 0xfa, 0x2f, 0x7c, 0x98, 0x85, 0xe5, 0x15, 0x30, 0xd7, 0xf0, - 0x74, 0x86, 0x05, 0xce, 0xb1, 0xcc, 0x77, 0x78, 0x7d, 0xc0, 0x0e, 0xff, 0x46, 0x09, 0x9e, 0xb8, - 0xe7, 0xea, 0x36, 0x70, 0x56, 0x46, 0x27, 0x21, 0x71, 0x7e, 0xe0, 0xdc, 0x48, 0x48, 0x8c, 0x19, - 0x84, 0xf7, 0x52, 0x14, 0x19, 0x77, 0xde, 0x16, 0x9d, 0x32, 0xc4, 0x7b, 0x29, 0xc3, 0x02, 0xe7, - 0x58, 0x1e, 0x76, 0x58, 0xfe, 0xed, 0x12, 0x3c, 0x35, 0x80, 0x0d, 0x50, 0x60, 0x6a, 0x55, 0x36, - 0xc1, 0xad, 0xfc, 0x80, 0xf2, 0x10, 0x0f, 0xd9, 0x5d, 0x6f, 0x95, 0xe0, 0x7c, 0xff, 0xa5, 0x18, - 0xfd, 0x28, 0xdd, 0xc3, 0xcb, 0xd8, 0x27, 0x33, 0x37, 0xee, 0x0c, 0xdf, 0xbf, 0x67, 0x40, 0x38, - 0x8f, 0x8b, 0x26, 0x01, 0x22, 0x27, 0xdd, 0x48, 0x2e, 0x6d, 0x7b, 0x49, 0x2a, 0x6a, 0xbf, 0x9c, - 0xe4, 0x27, 0x46, 0xb2, 0x15, 0x1b, 0x18, 0x94, 0x1d, 0xfb, 0x37, 0x1b, 0x5e, 0x0f, 0x53, 0xfe, - 0x10, 0xdf, 0x46, 0x9c, 0x91, 0xf5, 0xf1, 0x0d, 0x10, 0xce, 0xe3, 0x52, 0x76, 0xec, 0x4c, 0x92, - 0x0b, 0xca, 0xf7, 0x17, 0x8c, 0xdd, 0x82, 0x6a, 0xc5, 0x06, 0x46, 0x3e, 0xeb, 0xaf, 0xba, 0x7f, - 0xd6, 0x9f, 0xfd, 0x0f, 0x4a, 0xf0, 0x58, 0x5f, 0x53, 0x6e, 0xb0, 0x09, 0xf8, 0xf0, 0x65, 0xea, - 0x1d, 0x6e, 0xec, 0x1c, 0x30, 0xa3, 0xec, 0x0f, 0xfb, 0x8c, 0x34, 0x91, 0x51, 0x76, 0xf8, 0x94, - 0xec, 0x87, 0xaf, 0x3f, 0xbb, 0x92, 0xc8, 0x2a, 0x07, 0x48, 0x22, 0xcb, 0x7d, 0x8c, 0xea, 0x80, - 0x13, 0xf9, 0xff, 0xf4, 0xef, 0x5e, 0xba, 0xf5, 0x1b, 0xc8, 0x3b, 0x3a, 0x0b, 0xa7, 0xbd, 0x80, - 0xdd, 0x95, 0xb2, 0xd2, 0x59, 0x13, 0xe5, 0x40, 0x4a, 0xd9, 0x1b, 0x8d, 0xe7, 0x73, 0x70, 0xdc, - 0xf5, 0xc4, 0x43, 0x98, 0xd4, 0x77, 0xb8, 0x2e, 0x3d, 0x60, 0x5a, 0xe9, 0x47, 0xa0, 0xae, 0x24, - 0xe1, 0x61, 0xcd, 0xea, 0xf3, 0x77, 0x85, 0x35, 0xab, 0x6f, 0x6f, 0x60, 0xd1, 0x7e, 0xa3, 0xc6, - 0x69, 0x6e, 0x1c, 0x5f, 0x23, 0x3b, 0xcc, 0x52, 0xb5, 0xdf, 0x0b, 0xa3, 0xca, 0xe3, 0x31, 0xe8, - 0xf5, 0x19, 0xf6, 0x97, 0x87, 0xe0, 0x44, 0xa6, 0x38, 0x5e, 0xc6, 0xc1, 0x68, 0xed, 0xeb, 0x60, - 0x64, 0x61, 0xea, 0x9d, 0x40, 0xde, 0xad, 0x63, 0x84, 0xa9, 0x77, 0x02, 0x82, 0x39, 0x8c, 0x1a, - 0x9a, 0xcd, 0x78, 0x07, 0x77, 0x02, 0x11, 0x4e, 0xaa, 0x0c, 0xcd, 0x59, 0xd6, 0x8a, 0x05, 0x14, - 0x7d, 0xd2, 0x82, 0xd1, 0x84, 0x79, 0xaf, 0xb9, 0x7b, 0x56, 0x7c, 0xfe, 0xab, 0x47, 0xaf, 0xfd, - 0xa7, 0x0a, 0x41, 0xb2, 0x08, 0x11, 0xb3, 0x05, 0x67, 0x38, 0xa2, 0x9f, 0xb6, 0xa0, 0xae, 0xae, - 0x00, 0x10, 0x17, 0x65, 0xad, 0x14, 0x5b, 0x7b, 0x90, 0xfb, 0xf5, 0xd4, 0x41, 0x80, 0xbe, 0xfb, - 0x5b, 0x33, 0x46, 0x89, 0xf2, 0x9d, 0x0e, 0x1f, 0x8f, 0xef, 0x14, 0x7a, 0xf8, 0x4d, 0xdf, 0x03, - 0xf5, 0xb6, 0x13, 0x78, 0xeb, 0x24, 0x49, 0xb9, 0x3b, 0x53, 0x96, 0x44, 0x95, 0x8d, 0x58, 0xc3, - 0xe9, 0xd2, 0x98, 0xb0, 0x17, 0x4b, 0x0d, 0xff, 0x23, 0x5b, 0x1a, 0x57, 0x74, 0x33, 0x36, 0x71, - 0x4c, 0x67, 0x29, 0x3c, 0x50, 0x67, 0xe9, 0xc8, 0x3e, 0xce, 0xd2, 0xbf, 0x6b, 0xc1, 0xb9, 0x9e, - 0x5f, 0xed, 0xe1, 0x0d, 0xfc, 0xb3, 0xbf, 0x52, 0x85, 0x33, 0x3d, 0xaa, 0x5c, 0xa2, 0x1d, 0x73, - 0x3c, 0x5b, 0x45, 0x9c, 0xa1, 0x67, 0x8f, 0x84, 0x65, 0x37, 0xf6, 0x18, 0xc4, 0x07, 0x3b, 0xaa, - 0xd0, 0xc7, 0x05, 0xe5, 0xfb, 0x7b, 0x5c, 0x60, 0x0c, 0xcb, 0xca, 0x03, 0x1d, 0x96, 0xd5, 0x7b, - 0x0f, 0x4b, 0xf4, 0x6b, 0x16, 0x8c, 0xb7, 0xfb, 0x94, 0x56, 0x17, 0x2e, 0xc0, 0x9b, 0xc7, 0x53, - 0xb8, 0xbd, 0xf1, 0xf8, 0xde, 0xee, 0x44, 0xdf, 0x8a, 0xf6, 0xb8, 0xaf, 0x54, 0xf6, 0x77, 0xca, - 0xc0, 0x4a, 0xac, 0xb2, 0x4a, 0x66, 0x3b, 0xe8, 0x13, 0x66, 0xb1, 0x5c, 0xab, 0xa8, 0xc2, 0xae, - 0x9c, 0xb8, 0x2a, 0xb6, 0xcb, 0x7b, 0xb0, 0x57, 0xed, 0xdd, 0xbc, 0xd2, 0x2a, 0x0d, 0xa0, 0xb4, - 0x7c, 0x59, 0x95, 0xb8, 0x5c, 0x7c, 0x55, 0xe2, 0x7a, 0xbe, 0x22, 0xf1, 0xbd, 0x3f, 0x71, 0xe5, - 0xa1, 0xfc, 0xc4, 0x7f, 0xd5, 0xe2, 0x8a, 0x27, 0xf7, 0x15, 0xb4, 0x65, 0x60, 0xdd, 0xc3, 0x32, - 0x78, 0x96, 0xdd, 0xe2, 0xbe, 0x7e, 0x85, 0x38, 0xbe, 0xb0, 0x20, 0xcc, 0x0b, 0xd9, 0x59, 0x3b, - 0x56, 0x18, 0xec, 0xb2, 0x42, 0xdf, 0x0f, 0xef, 0x5c, 0x6a, 0x47, 0xe9, 0x8e, 0xb0, 0x25, 0xf4, - 0x65, 0x85, 0x0a, 0x82, 0x0d, 0x2c, 0xfb, 0xaf, 0x95, 0xf8, 0x08, 0x14, 0x41, 0x00, 0x2f, 0xe5, - 0xae, 0x97, 0x1a, 0xfc, 0xfc, 0xfc, 0x63, 0x00, 0xae, 0xba, 0xc0, 0x59, 0x9c, 0xce, 0x5c, 0x39, - 0xf2, 0xed, 0xb2, 0x82, 0x9e, 0x7e, 0x0d, 0xdd, 0x86, 0x0d, 0x7e, 0x19, 0x5d, 0x5a, 0xde, 0x57, - 0x97, 0x66, 0xd4, 0x4a, 0x65, 0x9f, 0xd5, 0xee, 0x8f, 0x2d, 0xc8, 0x58, 0x44, 0x28, 0x82, 0x2a, - 0x15, 0x77, 0xa7, 0x98, 0xbb, 0xa9, 0x4d, 0xd2, 0x54, 0x35, 0x8a, 0x61, 0xcf, 0x7e, 0x62, 0xce, - 0x08, 0xf9, 0x22, 0x56, 0xa0, 0x54, 0xc4, 0xfd, 0xe9, 0x26, 0xc3, 0x2b, 0x61, 0xb8, 0xc9, 0x8f, - 0x18, 0x75, 0xdc, 0x81, 0xfd, 0x12, 0x8c, 0x75, 0x09, 0xc5, 0x6e, 0x92, 0x09, 0xe5, 0x85, 0xdc, - 0xc6, 0x70, 0x65, 0x09, 0x8c, 0x98, 0xc3, 0xec, 0xb7, 0x2c, 0x38, 0x9d, 0x27, 0x8f, 0xbe, 0x6a, - 0xc1, 0x58, 0x92, 0xa7, 0x77, 0x5c, 0x7d, 0xa7, 0xe2, 0xfd, 0xba, 0x40, 0xb8, 0x5b, 0x08, 0xfb, - 0x7f, 0x8b, 0xc1, 0x7f, 0xcb, 0x0b, 0x9a, 0xe1, 0x1d, 0x65, 0x98, 0x58, 0x7d, 0x0d, 0x13, 0x3a, - 0x1f, 0xdd, 0x0d, 0xd2, 0xec, 0xf8, 0x5d, 0x99, 0x93, 0x2b, 0xa2, 0x1d, 0x2b, 0x0c, 0x96, 0x28, - 0xd6, 0x11, 0x65, 0xcb, 0x73, 0x83, 0x72, 0x56, 0xb4, 0x63, 0x85, 0x81, 0x5e, 0x80, 0x51, 0xf3, - 0xd2, 0x79, 0x31, 0x2e, 0x99, 0x41, 0x6e, 0xde, 0x4f, 0x8f, 0x33, 0x58, 0x68, 0x12, 0x40, 0x19, - 0x39, 0x72, 0x89, 0x64, 0x2e, 0x1b, 0xa5, 0x89, 0x12, 0x6c, 0x60, 0xb0, 0xb4, 0x4c, 0x7e, 0xb3, - 0xbb, 0x8c, 0x8a, 0xe5, 0x69, 0x99, 0xa2, 0x0d, 0x2b, 0x28, 0xd5, 0x26, 0x6d, 0x27, 0xe8, 0x38, - 0x3e, 0xed, 0x21, 0x91, 0x4b, 0xae, 0xa6, 0xe1, 0xa2, 0x82, 0x60, 0x03, 0x8b, 0xbe, 0x71, 0xea, - 0xb5, 0xc9, 0xab, 0x61, 0x20, 0xe3, 0xb4, 0xf4, 0x01, 0x8c, 0x68, 0xc7, 0x0a, 0xc3, 0xfe, 0x4f, - 0x16, 0x9c, 0xd2, 0x49, 0xde, 0xfc, 0xce, 0x58, 0x73, 0xcf, 0x68, 0xed, 0x9b, 0xbf, 0x9e, 0xcd, - 0x7e, 0x2d, 0x0d, 0x94, 0xfd, 0x6a, 0x26, 0xa6, 0x96, 0xef, 0x99, 0x98, 0xfa, 0x83, 0xfa, 0x3e, - 0x42, 0x9e, 0xc1, 0x3a, 0xd2, 0xeb, 0x2e, 0x42, 0x64, 0xc3, 0x90, 0xeb, 0xa8, 0x0a, 0x27, 0xa3, - 0x7c, 0xef, 0x30, 0x33, 0xcd, 0x90, 0x04, 0xc4, 0x5e, 0x82, 0xba, 0x3a, 0x87, 0x90, 0x1b, 0x55, - 0xab, 0xf7, 0x46, 0x75, 0xa0, 0x04, 0xb9, 0xc6, 0xda, 0x37, 0xbf, 0xfb, 0xe4, 0x3b, 0x7e, 0xf7, - 0xbb, 0x4f, 0xbe, 0xe3, 0x0f, 0xbe, 0xfb, 0xe4, 0x3b, 0x3e, 0xb9, 0xf7, 0xa4, 0xf5, 0xcd, 0xbd, - 0x27, 0xad, 0xdf, 0xdd, 0x7b, 0xd2, 0xfa, 0x83, 0xbd, 0x27, 0xad, 0xef, 0xec, 0x3d, 0x69, 0x7d, - 0xe9, 0xdf, 0x3f, 0xf9, 0x8e, 0x57, 0x7b, 0x06, 0xea, 0xd1, 0x1f, 0xcf, 0xb9, 0xcd, 0xa9, 0xad, - 0x8b, 0x2c, 0x56, 0x8c, 0x4e, 0xaf, 0x29, 0x63, 0x4c, 0x4d, 0xc9, 0xe9, 0xf5, 0xff, 0x02, 0x00, - 0x00, 0xff, 0xff, 0x36, 0x27, 0x29, 0x3c, 0xac, 0xd9, 0x00, 0x00, + // 10965 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x70, 0x1c, 0xc9, + 0x75, 0x98, 0x66, 0x3f, 0x80, 0xdd, 0x07, 0x10, 0x24, 0x9b, 0xe4, 0x1d, 0x48, 0xdd, 0x1d, 0xe8, + 0x39, 0xfb, 0x74, 0x8e, 0xee, 0x00, 0x1f, 0x7d, 0xa7, 0x5c, 0x74, 0xb6, 0x64, 0x7c, 0x90, 0x20, + 0x48, 0x80, 0xc0, 0x35, 0x40, 0x52, 0x3a, 0xf9, 0x74, 0x1a, 0xec, 0x36, 0x16, 0x43, 0xcc, 0xce, + 0xcc, 0xcd, 0xcc, 0x82, 0xc0, 0x59, 0x92, 0x25, 0x4b, 0xb6, 0x95, 0xe8, 0xe3, 0x14, 0x29, 0x29, + 0x9f, 0x93, 0xc8, 0x91, 0x2d, 0x27, 0x15, 0x57, 0xa2, 0x8a, 0x93, 0xfc, 0x88, 0x13, 0x27, 0xe5, + 0xb2, 0x9d, 0x4a, 0x29, 0xa5, 0xa4, 0xec, 0x4a, 0xb9, 0x2c, 0x27, 0xb1, 0x11, 0x09, 0x29, 0x57, + 0x52, 0xa9, 0x8a, 0xab, 0x9c, 0xf8, 0x87, 0xc3, 0xe4, 0x47, 0xaa, 0xbf, 0x7b, 0x66, 0x67, 0x81, + 0x05, 0x30, 0x20, 0x29, 0xe5, 0xfe, 0xed, 0xf6, 0x7b, 0xf3, 0x5e, 0x4f, 0x4f, 0xf7, 0x7b, 0xaf, + 0x5f, 0xbf, 0xf7, 0x1a, 0xe6, 0x5b, 0x6e, 0xb2, 0xde, 0x59, 0x1d, 0x6f, 0x04, 0xed, 0x09, 0x27, + 0x6a, 0x05, 0x61, 0x14, 0xdc, 0x61, 0x3f, 0x9e, 0x6d, 0x34, 0x27, 0x36, 0x2f, 0x4d, 0x84, 0x1b, + 0xad, 0x09, 0x27, 0x74, 0xe3, 0x09, 0x27, 0x0c, 0x3d, 0xb7, 0xe1, 0x24, 0x6e, 0xe0, 0x4f, 0x6c, + 0x3e, 0xe7, 0x78, 0xe1, 0xba, 0xf3, 0xdc, 0x44, 0x8b, 0xf8, 0x24, 0x72, 0x12, 0xd2, 0x1c, 0x0f, + 0xa3, 0x20, 0x09, 0xd0, 0x8f, 0x68, 0x6a, 0xe3, 0x92, 0x1a, 0xfb, 0xf1, 0x5a, 0xa3, 0x39, 0xbe, + 0x79, 0x69, 0x3c, 0xdc, 0x68, 0x8d, 0x53, 0x6a, 0xe3, 0x06, 0xb5, 0x71, 0x49, 0xed, 0xc2, 0xb3, + 0x46, 0x5f, 0x5a, 0x41, 0x2b, 0x98, 0x60, 0x44, 0x57, 0x3b, 0x6b, 0xec, 0x1f, 0xfb, 0xc3, 0x7e, + 0x71, 0x66, 0x17, 0xec, 0x8d, 0x17, 0xe3, 0x71, 0x37, 0xa0, 0xdd, 0x9b, 0x68, 0x04, 0x11, 0x99, + 0xd8, 0xec, 0xea, 0xd0, 0x85, 0xab, 0x1a, 0x87, 0x6c, 0x25, 0xc4, 0x8f, 0xdd, 0xc0, 0x8f, 0x9f, + 0xa5, 0x5d, 0x20, 0xd1, 0x26, 0x89, 0xcc, 0xd7, 0x33, 0x10, 0xf2, 0x28, 0x3d, 0xaf, 0x29, 0xb5, + 0x9d, 0xc6, 0xba, 0xeb, 0x93, 0x68, 0x5b, 0x3f, 0xde, 0x26, 0x89, 0x93, 0xf7, 0xd4, 0x44, 0xaf, + 0xa7, 0xa2, 0x8e, 0x9f, 0xb8, 0x6d, 0xd2, 0xf5, 0xc0, 0x7b, 0xf6, 0x7b, 0x20, 0x6e, 0xac, 0x93, + 0xb6, 0xd3, 0xf5, 0xdc, 0x0f, 0xf7, 0x7a, 0xae, 0x93, 0xb8, 0xde, 0x84, 0xeb, 0x27, 0x71, 0x12, + 0x65, 0x1f, 0xb2, 0x5f, 0x87, 0x13, 0x93, 0xb7, 0x97, 0x27, 0x3b, 0xc9, 0xfa, 0x74, 0xe0, 0xaf, + 0xb9, 0x2d, 0xf4, 0x02, 0x0c, 0x35, 0xbc, 0x4e, 0x9c, 0x90, 0xe8, 0x86, 0xd3, 0x26, 0xa3, 0xd6, + 0x45, 0xeb, 0xe9, 0xfa, 0xd4, 0x99, 0x6f, 0xec, 0x8c, 0xbd, 0x63, 0x77, 0x67, 0x6c, 0x68, 0x5a, + 0x83, 0xb0, 0x89, 0x87, 0x7e, 0x10, 0x06, 0xa3, 0xc0, 0x23, 0x93, 0xf8, 0xc6, 0x68, 0x89, 0x3d, + 0x72, 0x52, 0x3c, 0x32, 0x88, 0x79, 0x33, 0x96, 0x70, 0xfb, 0xf7, 0x4b, 0x00, 0x93, 0x61, 0xb8, + 0x14, 0x05, 0x77, 0x48, 0x23, 0x41, 0x1f, 0x81, 0x1a, 0x1d, 0xba, 0xa6, 0x93, 0x38, 0x8c, 0xdb, + 0xd0, 0xa5, 0x1f, 0x1a, 0xe7, 0x6f, 0x32, 0x6e, 0xbe, 0x89, 0x9e, 0x38, 0x14, 0x7b, 0x7c, 0xf3, + 0xb9, 0xf1, 0xc5, 0x55, 0xfa, 0xfc, 0x02, 0x49, 0x9c, 0x29, 0x24, 0x98, 0x81, 0x6e, 0xc3, 0x8a, + 0x2a, 0xf2, 0xa1, 0x12, 0x87, 0xa4, 0xc1, 0x3a, 0x36, 0x74, 0x69, 0x7e, 0xfc, 0x28, 0x33, 0x74, + 0x5c, 0xf7, 0x7c, 0x39, 0x24, 0x8d, 0xa9, 0x61, 0xc1, 0xb9, 0x42, 0xff, 0x61, 0xc6, 0x07, 0x6d, + 0xc2, 0x40, 0x9c, 0x38, 0x49, 0x27, 0x1e, 0x2d, 0x33, 0x8e, 0x37, 0x0a, 0xe3, 0xc8, 0xa8, 0x4e, + 0x8d, 0x08, 0x9e, 0x03, 0xfc, 0x3f, 0x16, 0xdc, 0xec, 0x3f, 0xb2, 0x60, 0x44, 0x23, 0xcf, 0xbb, + 0x71, 0x82, 0x7e, 0xbc, 0x6b, 0x70, 0xc7, 0xfb, 0x1b, 0x5c, 0xfa, 0x34, 0x1b, 0xda, 0x53, 0x82, + 0x59, 0x4d, 0xb6, 0x18, 0x03, 0xdb, 0x86, 0xaa, 0x9b, 0x90, 0x76, 0x3c, 0x5a, 0xba, 0x58, 0x7e, + 0x7a, 0xe8, 0xd2, 0xd5, 0xa2, 0xde, 0x73, 0xea, 0x84, 0x60, 0x5a, 0x9d, 0xa3, 0xe4, 0x31, 0xe7, + 0x62, 0xff, 0xca, 0xb0, 0xf9, 0x7e, 0x74, 0xc0, 0xd1, 0x73, 0x30, 0x14, 0x07, 0x9d, 0xa8, 0x41, + 0x30, 0x09, 0x83, 0x78, 0xd4, 0xba, 0x58, 0xa6, 0x53, 0x8f, 0xce, 0xd4, 0x65, 0xdd, 0x8c, 0x4d, + 0x1c, 0xf4, 0x05, 0x0b, 0x86, 0x9b, 0x24, 0x4e, 0x5c, 0x9f, 0xf1, 0x97, 0x9d, 0x5f, 0x39, 0x72, + 0xe7, 0x65, 0xe3, 0x8c, 0x26, 0x3e, 0x75, 0x56, 0xbc, 0xc8, 0xb0, 0xd1, 0x18, 0xe3, 0x14, 0x7f, + 0xba, 0xe2, 0x9a, 0x24, 0x6e, 0x44, 0x6e, 0x48, 0xff, 0xb3, 0x39, 0x63, 0xac, 0xb8, 0x19, 0x0d, + 0xc2, 0x26, 0x1e, 0xf2, 0xa1, 0x4a, 0x57, 0x54, 0x3c, 0x5a, 0x61, 0xfd, 0x9f, 0x3b, 0x5a, 0xff, + 0xc5, 0xa0, 0xd2, 0xc5, 0xaa, 0x47, 0x9f, 0xfe, 0x8b, 0x31, 0x67, 0x83, 0x3e, 0x6f, 0xc1, 0xa8, + 0x58, 0xf1, 0x98, 0xf0, 0x01, 0xbd, 0xbd, 0xee, 0x26, 0xc4, 0x73, 0xe3, 0x64, 0xb4, 0xca, 0xfa, + 0x30, 0xd1, 0xdf, 0xdc, 0x9a, 0x8d, 0x82, 0x4e, 0x78, 0xdd, 0xf5, 0x9b, 0x53, 0x17, 0x05, 0xa7, + 0xd1, 0xe9, 0x1e, 0x84, 0x71, 0x4f, 0x96, 0xe8, 0xcb, 0x16, 0x5c, 0xf0, 0x9d, 0x36, 0x89, 0x43, + 0x87, 0x7e, 0x5a, 0x0e, 0x9e, 0xf2, 0x9c, 0xc6, 0x06, 0xeb, 0xd1, 0xc0, 0xe1, 0x7a, 0x64, 0x8b, + 0x1e, 0x5d, 0xb8, 0xd1, 0x93, 0x34, 0xde, 0x83, 0x2d, 0xfa, 0x9a, 0x05, 0xa7, 0x83, 0x28, 0x5c, + 0x77, 0x7c, 0xd2, 0x94, 0xd0, 0x78, 0x74, 0x90, 0x2d, 0xbd, 0x0f, 0x1f, 0xed, 0x13, 0x2d, 0x66, + 0xc9, 0x2e, 0x04, 0xbe, 0x9b, 0x04, 0xd1, 0x32, 0x49, 0x12, 0xd7, 0x6f, 0xc5, 0x53, 0xe7, 0x76, + 0x77, 0xc6, 0x4e, 0x77, 0x61, 0xe1, 0xee, 0xfe, 0xa0, 0x9f, 0x80, 0xa1, 0x78, 0xdb, 0x6f, 0xdc, + 0x76, 0xfd, 0x66, 0x70, 0x37, 0x1e, 0xad, 0x15, 0xb1, 0x7c, 0x97, 0x15, 0x41, 0xb1, 0x00, 0x35, + 0x03, 0x6c, 0x72, 0xcb, 0xff, 0x70, 0x7a, 0x2a, 0xd5, 0x8b, 0xfe, 0x70, 0x7a, 0x32, 0xed, 0xc1, + 0x16, 0xfd, 0xac, 0x05, 0x27, 0x62, 0xb7, 0xe5, 0x3b, 0x49, 0x27, 0x22, 0xd7, 0xc9, 0x76, 0x3c, + 0x0a, 0xac, 0x23, 0xd7, 0x8e, 0x38, 0x2a, 0x06, 0xc9, 0xa9, 0x73, 0xa2, 0x8f, 0x27, 0xcc, 0xd6, + 0x18, 0xa7, 0xf9, 0xe6, 0x2d, 0x34, 0x3d, 0xad, 0x87, 0x8a, 0x5d, 0x68, 0x7a, 0x52, 0xf7, 0x64, + 0x89, 0x7e, 0x0c, 0x4e, 0xf1, 0x26, 0x35, 0xb2, 0xf1, 0xe8, 0x30, 0x13, 0xb4, 0x67, 0x77, 0x77, + 0xc6, 0x4e, 0x2d, 0x67, 0x60, 0xb8, 0x0b, 0x1b, 0xbd, 0x0e, 0x63, 0x21, 0x89, 0xda, 0x6e, 0xb2, + 0xe8, 0x7b, 0xdb, 0x52, 0x7c, 0x37, 0x82, 0x90, 0x34, 0x45, 0x77, 0xe2, 0xd1, 0x13, 0x17, 0xad, + 0xa7, 0x6b, 0x53, 0xef, 0x12, 0xdd, 0x1c, 0x5b, 0xda, 0x1b, 0x1d, 0xef, 0x47, 0xcf, 0xfe, 0x37, + 0x25, 0x38, 0x95, 0x55, 0x9c, 0xe8, 0xef, 0x5a, 0x70, 0xf2, 0xce, 0xdd, 0x64, 0x25, 0xd8, 0x20, + 0x7e, 0x3c, 0xb5, 0x4d, 0xc5, 0x1b, 0x53, 0x19, 0x43, 0x97, 0x1a, 0xc5, 0xaa, 0xe8, 0xf1, 0x6b, + 0x69, 0x2e, 0x97, 0xfd, 0x24, 0xda, 0x9e, 0x7a, 0x54, 0xbc, 0xdd, 0xc9, 0x6b, 0xb7, 0x57, 0x4c, + 0x28, 0xce, 0x76, 0xea, 0xc2, 0x67, 0x2d, 0x38, 0x9b, 0x47, 0x02, 0x9d, 0x82, 0xf2, 0x06, 0xd9, + 0xe6, 0x56, 0x19, 0xa6, 0x3f, 0xd1, 0xab, 0x50, 0xdd, 0x74, 0xbc, 0x0e, 0x11, 0xd6, 0xcd, 0xec, + 0xd1, 0x5e, 0x44, 0xf5, 0x0c, 0x73, 0xaa, 0xef, 0x2d, 0xbd, 0x68, 0xd9, 0xbf, 0x53, 0x86, 0x21, + 0x43, 0xbf, 0xdd, 0x07, 0x8b, 0x2d, 0x48, 0x59, 0x6c, 0x0b, 0x85, 0xa9, 0xe6, 0x9e, 0x26, 0xdb, + 0xdd, 0x8c, 0xc9, 0xb6, 0x58, 0x1c, 0xcb, 0x3d, 0x6d, 0x36, 0x94, 0x40, 0x3d, 0x08, 0xa9, 0x45, + 0x4e, 0x55, 0x7f, 0xa5, 0x88, 0x4f, 0xb8, 0x28, 0xc9, 0x4d, 0x9d, 0xd8, 0xdd, 0x19, 0xab, 0xab, + 0xbf, 0x58, 0x33, 0xb2, 0xbf, 0x65, 0xc1, 0x59, 0xa3, 0x8f, 0xd3, 0x81, 0xdf, 0x74, 0xd9, 0xa7, + 0xbd, 0x08, 0x95, 0x64, 0x3b, 0x94, 0x66, 0xbf, 0x1a, 0xa9, 0x95, 0xed, 0x90, 0x60, 0x06, 0xa1, + 0x86, 0x7e, 0x9b, 0xc4, 0xb1, 0xd3, 0x22, 0x59, 0x43, 0x7f, 0x81, 0x37, 0x63, 0x09, 0x47, 0x11, + 0x20, 0xcf, 0x89, 0x93, 0x95, 0xc8, 0xf1, 0x63, 0x46, 0x7e, 0xc5, 0x6d, 0x13, 0x31, 0xc0, 0x7f, + 0xa1, 0xbf, 0x19, 0x43, 0x9f, 0x98, 0x7a, 0x64, 0x77, 0x67, 0x0c, 0xcd, 0x77, 0x51, 0xc2, 0x39, + 0xd4, 0xed, 0x2f, 0x5b, 0xf0, 0x48, 0xbe, 0x2d, 0x86, 0x9e, 0x82, 0x01, 0xbe, 0xe5, 0x13, 0x6f, + 0xa7, 0x3f, 0x09, 0x6b, 0xc5, 0x02, 0x8a, 0x26, 0xa0, 0xae, 0xf4, 0x84, 0x78, 0xc7, 0xd3, 0x02, + 0xb5, 0xae, 0x95, 0x8b, 0xc6, 0xa1, 0x83, 0x46, 0xff, 0x08, 0xcb, 0x4d, 0x0d, 0x1a, 0xdb, 0x24, + 0x31, 0x88, 0xfd, 0x9f, 0x2d, 0x38, 0x69, 0xf4, 0xea, 0x3e, 0x98, 0xe6, 0x7e, 0xda, 0x34, 0x9f, + 0x2b, 0x6c, 0x3e, 0xf7, 0xb0, 0xcd, 0x3f, 0x6f, 0xc1, 0x05, 0x03, 0x6b, 0xc1, 0x49, 0x1a, 0xeb, + 0x97, 0xb7, 0xc2, 0x88, 0xc4, 0x74, 0x3b, 0x8d, 0x1e, 0x37, 0xe4, 0xd6, 0xd4, 0x90, 0xa0, 0x50, + 0xbe, 0x4e, 0xb6, 0xb9, 0x10, 0x7b, 0x06, 0x6a, 0x7c, 0x72, 0x06, 0x91, 0x18, 0x71, 0xf5, 0x6e, + 0x8b, 0xa2, 0x1d, 0x2b, 0x0c, 0x64, 0xc3, 0x00, 0x13, 0x4e, 0x74, 0xb1, 0x52, 0x35, 0x04, 0xf4, + 0x23, 0xde, 0x62, 0x2d, 0x58, 0x40, 0xec, 0x38, 0xd5, 0x9d, 0xa5, 0x88, 0xb0, 0x8f, 0xdb, 0xbc, + 0xe2, 0x12, 0xaf, 0x19, 0xd3, 0x6d, 0x83, 0xe3, 0xfb, 0x41, 0x22, 0x76, 0x00, 0xc6, 0xb6, 0x61, + 0x52, 0x37, 0x63, 0x13, 0x87, 0x32, 0xf5, 0x9c, 0x55, 0xe2, 0xf1, 0x11, 0x15, 0x4c, 0xe7, 0x59, + 0x0b, 0x16, 0x10, 0x7b, 0xb7, 0xc4, 0x36, 0x28, 0x6a, 0xe9, 0x93, 0xfb, 0xb1, 0xbb, 0x8d, 0x52, + 0xb2, 0x72, 0xa9, 0x38, 0xc1, 0x45, 0x7a, 0xef, 0x70, 0xdf, 0xc8, 0x88, 0x4b, 0x5c, 0x28, 0xd7, + 0xbd, 0x77, 0xb9, 0xbf, 0x59, 0x82, 0xb1, 0xf4, 0x03, 0x5d, 0xd2, 0x96, 0x6e, 0xa9, 0x0c, 0x46, + 0x59, 0x27, 0x86, 0x81, 0x8f, 0x4d, 0xbc, 0x1e, 0x02, 0xab, 0x74, 0x9c, 0x02, 0xcb, 0x94, 0xa7, + 0xe5, 0x7d, 0xe4, 0xe9, 0x53, 0x6a, 0xd4, 0x2b, 0x19, 0x01, 0x96, 0xd6, 0x29, 0x17, 0xa1, 0x12, + 0x27, 0x24, 0x1c, 0xad, 0xa6, 0xe5, 0xd1, 0x72, 0x42, 0x42, 0xcc, 0x20, 0xf6, 0x7f, 0x2f, 0xc1, + 0xa3, 0xe9, 0x31, 0xd4, 0x2a, 0xe0, 0xfd, 0x29, 0x15, 0xf0, 0x6e, 0x53, 0x05, 0xdc, 0xdb, 0x19, + 0x7b, 0x67, 0x8f, 0xc7, 0xbe, 0x6b, 0x34, 0x04, 0x9a, 0xcd, 0x8c, 0xe2, 0x44, 0x7a, 0x14, 0xef, + 0xed, 0x8c, 0x3d, 0xde, 0xe3, 0x1d, 0x33, 0xc3, 0xfc, 0x14, 0x0c, 0x44, 0xc4, 0x89, 0x03, 0x5f, + 0x0c, 0xb4, 0xfa, 0x1c, 0x98, 0xb5, 0x62, 0x01, 0xb5, 0xff, 0x7d, 0x3d, 0x3b, 0xd8, 0xb3, 0xdc, + 0x09, 0x17, 0x44, 0xc8, 0x85, 0x0a, 0x33, 0xeb, 0xb9, 0x68, 0xb8, 0x7e, 0xb4, 0x65, 0x44, 0xd5, + 0x80, 0x22, 0x3d, 0x55, 0xa3, 0x5f, 0x8d, 0x36, 0x61, 0xc6, 0x02, 0x6d, 0x41, 0xad, 0x21, 0xad, + 0xed, 0x52, 0x11, 0x7e, 0x29, 0x61, 0x6b, 0x6b, 0x8e, 0xc3, 0x54, 0x5e, 0x2b, 0x13, 0x5d, 0x71, + 0x43, 0x04, 0xca, 0x2d, 0x37, 0x11, 0x9f, 0xf5, 0x88, 0xfb, 0xa9, 0x59, 0xd7, 0x78, 0xc5, 0x41, + 0xaa, 0x44, 0x66, 0xdd, 0x04, 0x53, 0xfa, 0xe8, 0xa7, 0x2d, 0x18, 0x8a, 0x1b, 0xed, 0xa5, 0x28, + 0xd8, 0x74, 0x9b, 0x24, 0x12, 0xd6, 0xd4, 0x11, 0x45, 0xd3, 0xf2, 0xf4, 0x82, 0x24, 0xa8, 0xf9, + 0xf2, 0xfd, 0xad, 0x86, 0x60, 0x93, 0x2f, 0xdd, 0x65, 0x3c, 0x2a, 0xde, 0x7d, 0x86, 0x34, 0x5c, + 0xaa, 0xff, 0xe4, 0xa6, 0x8a, 0xcd, 0x94, 0x23, 0x5b, 0x97, 0x33, 0x9d, 0xc6, 0x06, 0x5d, 0x6f, + 0xba, 0x43, 0xef, 0xdc, 0xdd, 0x19, 0x7b, 0x74, 0x3a, 0x9f, 0x27, 0xee, 0xd5, 0x19, 0x36, 0x60, + 0x61, 0xc7, 0xf3, 0x30, 0x79, 0xbd, 0x43, 0x98, 0xcb, 0xa4, 0x80, 0x01, 0x5b, 0xd2, 0x04, 0x33, + 0x03, 0x66, 0x40, 0xb0, 0xc9, 0x17, 0xbd, 0x0e, 0x03, 0x6d, 0x27, 0x89, 0xdc, 0x2d, 0xe1, 0x27, + 0x39, 0xa2, 0xbd, 0xbf, 0xc0, 0x68, 0x69, 0xe6, 0x4c, 0x53, 0xf3, 0x46, 0x2c, 0x18, 0xa1, 0x36, + 0x54, 0xdb, 0x24, 0x6a, 0x91, 0xd1, 0x5a, 0x11, 0x3e, 0xe1, 0x05, 0x4a, 0x4a, 0x33, 0xac, 0x53, + 0xeb, 0x88, 0xb5, 0x61, 0xce, 0x05, 0xbd, 0x0a, 0xb5, 0x98, 0x78, 0xa4, 0x41, 0xed, 0x9b, 0x3a, + 0xe3, 0xf8, 0xc3, 0x7d, 0xda, 0x7a, 0xd4, 0xb0, 0x58, 0x16, 0x8f, 0xf2, 0x05, 0x26, 0xff, 0x61, + 0x45, 0x92, 0x0e, 0x60, 0xe8, 0x75, 0x5a, 0xae, 0x3f, 0x0a, 0x45, 0x0c, 0xe0, 0x12, 0xa3, 0x95, + 0x19, 0x40, 0xde, 0x88, 0x05, 0x23, 0xfb, 0x8f, 0x2d, 0x40, 0x69, 0xa1, 0x76, 0x1f, 0x8c, 0xda, + 0xd7, 0xd3, 0x46, 0xed, 0x7c, 0x91, 0x56, 0x47, 0x0f, 0xbb, 0xf6, 0xd7, 0xeb, 0x90, 0x51, 0x07, + 0x37, 0x48, 0x9c, 0x90, 0xe6, 0xdb, 0x22, 0xfc, 0x6d, 0x11, 0xfe, 0xb6, 0x08, 0x57, 0x22, 0x7c, + 0x35, 0x23, 0xc2, 0xdf, 0x67, 0xac, 0x7a, 0x7d, 0xa8, 0xfa, 0x9a, 0x3a, 0x75, 0x35, 0x7b, 0x60, + 0x20, 0x50, 0x49, 0x70, 0x6d, 0x79, 0xf1, 0x46, 0xae, 0xcc, 0x7e, 0x2d, 0x2d, 0xb3, 0x8f, 0xca, + 0xe2, 0xff, 0x07, 0x29, 0xfd, 0xaf, 0x2d, 0x78, 0x57, 0x5a, 0x7a, 0xc9, 0x99, 0x33, 0xd7, 0xf2, + 0x83, 0x88, 0xcc, 0xb8, 0x6b, 0x6b, 0x24, 0x22, 0x7e, 0x83, 0xc4, 0xca, 0x8b, 0x61, 0xf5, 0xf2, + 0x62, 0xa0, 0xe7, 0x61, 0xf8, 0x4e, 0x1c, 0xf8, 0x4b, 0x81, 0xeb, 0x0b, 0x11, 0x44, 0x37, 0xc2, + 0xa7, 0x76, 0x77, 0xc6, 0x86, 0xe9, 0x88, 0xca, 0x76, 0x9c, 0xc2, 0x42, 0xd3, 0x70, 0xfa, 0xce, + 0xeb, 0x4b, 0x4e, 0x62, 0xb8, 0x03, 0xe4, 0xc6, 0x9d, 0x1d, 0x58, 0x5c, 0x7b, 0x39, 0x03, 0xc4, + 0xdd, 0xf8, 0xf6, 0xdf, 0x2c, 0xc1, 0xf9, 0xcc, 0x8b, 0x04, 0x9e, 0x17, 0x74, 0x12, 0xba, 0xa9, + 0x41, 0xbf, 0x60, 0xc1, 0xa9, 0x76, 0xda, 0xe3, 0x10, 0x0b, 0xc7, 0xee, 0x07, 0x0a, 0xd3, 0x11, + 0x19, 0x97, 0xc6, 0xd4, 0xa8, 0x18, 0xa1, 0x53, 0x19, 0x40, 0x8c, 0xbb, 0xfa, 0x82, 0x5e, 0x85, + 0x7a, 0xdb, 0xd9, 0xba, 0x19, 0x36, 0x9d, 0x44, 0xee, 0x27, 0x7b, 0xbb, 0x01, 0x3a, 0x89, 0xeb, + 0x8d, 0xf3, 0xe3, 0xfa, 0xf1, 0x39, 0x3f, 0x59, 0x8c, 0x96, 0x93, 0xc8, 0xf5, 0x5b, 0xdc, 0x9d, + 0xb7, 0x20, 0xc9, 0x60, 0x4d, 0xd1, 0xfe, 0x8a, 0x95, 0x55, 0x52, 0x6a, 0x74, 0x22, 0x27, 0x21, + 0xad, 0x6d, 0xf4, 0x51, 0xa8, 0xd2, 0x8d, 0x9f, 0x1c, 0x95, 0xdb, 0x45, 0x6a, 0x4e, 0xe3, 0x4b, + 0x68, 0x25, 0x4a, 0xff, 0xc5, 0x98, 0x33, 0xb5, 0xff, 0xb8, 0x96, 0x35, 0x16, 0xd8, 0xe1, 0xed, + 0x25, 0x80, 0x56, 0xb0, 0x42, 0xda, 0xa1, 0x47, 0x87, 0xc5, 0x62, 0x27, 0x00, 0xca, 0xd7, 0x31, + 0xab, 0x20, 0xd8, 0xc0, 0x42, 0x7f, 0xd9, 0x02, 0x68, 0xc9, 0x39, 0x2f, 0x0d, 0x81, 0x9b, 0x45, + 0xbe, 0x8e, 0x5e, 0x51, 0xba, 0x2f, 0x8a, 0x21, 0x36, 0x98, 0xa3, 0x9f, 0xb2, 0xa0, 0x96, 0xc8, + 0xee, 0x73, 0xd5, 0xb8, 0x52, 0x64, 0x4f, 0xe4, 0x4b, 0x6b, 0x9b, 0x48, 0x0d, 0x89, 0xe2, 0x8b, + 0x7e, 0xc6, 0x02, 0x88, 0xb7, 0xfd, 0xc6, 0x52, 0xe0, 0xb9, 0x8d, 0x6d, 0xa1, 0x31, 0x6f, 0x15, + 0xea, 0x8f, 0x51, 0xd4, 0xa7, 0x46, 0xe8, 0x68, 0xe8, 0xff, 0xd8, 0xe0, 0x8c, 0x3e, 0x0e, 0xb5, + 0x58, 0x4c, 0x37, 0xa1, 0x23, 0x57, 0x8a, 0xf5, 0x0a, 0x71, 0xda, 0x42, 0xbc, 0x8a, 0x7f, 0x58, + 0xf1, 0x44, 0x3f, 0x67, 0xc1, 0xc9, 0x30, 0xed, 0xe7, 0x13, 0xea, 0xb0, 0x38, 0x19, 0x90, 0xf1, + 0x23, 0x4e, 0x9d, 0xd9, 0xdd, 0x19, 0x3b, 0x99, 0x69, 0xc4, 0xd9, 0x5e, 0x50, 0x09, 0xa8, 0x67, + 0xf0, 0x62, 0xc8, 0x7d, 0x8e, 0x83, 0x5a, 0x02, 0xce, 0x66, 0x81, 0xb8, 0x1b, 0x1f, 0x2d, 0xc1, + 0x59, 0xda, 0xbb, 0x6d, 0x6e, 0x7e, 0x4a, 0xf5, 0x12, 0x33, 0x65, 0x58, 0x9b, 0x7a, 0x4c, 0xcc, + 0x10, 0xe6, 0xd5, 0xcf, 0xe2, 0xe0, 0xdc, 0x27, 0xd1, 0xef, 0x58, 0xf0, 0x98, 0xcb, 0xd4, 0x80, + 0xe9, 0x30, 0xd7, 0x1a, 0x41, 0x9c, 0xc4, 0x92, 0x42, 0x65, 0x45, 0x2f, 0xf5, 0x33, 0xf5, 0xfd, + 0xe2, 0x0d, 0x1e, 0x9b, 0xdb, 0xa3, 0x4b, 0x78, 0xcf, 0x0e, 0xdb, 0xdf, 0x2c, 0xa5, 0x8e, 0x35, + 0x94, 0x2f, 0x91, 0x49, 0x8d, 0x86, 0x74, 0xe3, 0x48, 0x21, 0x58, 0xa8, 0xd4, 0x50, 0x4e, 0x22, + 0x2d, 0x35, 0x54, 0x53, 0x8c, 0x0d, 0xe6, 0xd4, 0xb6, 0x3c, 0xed, 0x64, 0x3d, 0x96, 0x42, 0x90, + 0xbd, 0x5a, 0x64, 0x97, 0xba, 0x0f, 0xa1, 0xce, 0x8b, 0xae, 0x9d, 0xee, 0x02, 0xe1, 0xee, 0x2e, + 0xd9, 0xdf, 0x4c, 0x1f, 0xa5, 0x18, 0x6b, 0xb0, 0x8f, 0x63, 0xa2, 0x2f, 0x58, 0x30, 0x14, 0x05, + 0x9e, 0xe7, 0xfa, 0x2d, 0x2a, 0x2f, 0x84, 0xd2, 0xfb, 0xd0, 0xb1, 0xe8, 0x1d, 0x21, 0x18, 0x98, + 0x85, 0x8a, 0x35, 0x4f, 0x6c, 0x76, 0xc0, 0xfe, 0x23, 0x0b, 0x46, 0x7b, 0xc9, 0x35, 0x44, 0xe0, + 0x9d, 0x72, 0xd1, 0xaa, 0x20, 0x89, 0x45, 0x7f, 0x86, 0x78, 0x44, 0xf9, 0x8f, 0x6b, 0x53, 0x4f, + 0x8a, 0xd7, 0x7c, 0xe7, 0x52, 0x6f, 0x54, 0xbc, 0x17, 0x1d, 0xf4, 0x0a, 0x9c, 0x32, 0xde, 0x2b, + 0x56, 0x03, 0x53, 0x9f, 0x1a, 0xa7, 0x86, 0xc4, 0x64, 0x06, 0x76, 0x6f, 0x67, 0xec, 0x91, 0x6c, + 0x9b, 0x10, 0xbc, 0x5d, 0x74, 0xec, 0x5f, 0x2e, 0x65, 0xbf, 0x96, 0xd2, 0x99, 0x6f, 0x59, 0x5d, + 0xbb, 0xf2, 0x0f, 0x1c, 0x87, 0x9e, 0x62, 0xfb, 0x77, 0x15, 0x87, 0xd1, 0x1b, 0xe7, 0x01, 0x1e, + 0xf4, 0xda, 0xff, 0xb6, 0x02, 0x7b, 0xf4, 0xac, 0x0f, 0x23, 0xf8, 0xc0, 0xa7, 0x83, 0x9f, 0xb3, + 0xd4, 0xc9, 0x51, 0x99, 0x2d, 0xf2, 0xe6, 0x71, 0x8d, 0x3d, 0xdf, 0x87, 0xc4, 0x3c, 0xd8, 0x40, + 0x79, 0xa3, 0xd3, 0x67, 0x54, 0xe8, 0xab, 0x56, 0xfa, 0xec, 0x8b, 0x47, 0x8f, 0xb9, 0xc7, 0xd6, + 0x27, 0xe3, 0x40, 0x8d, 0x77, 0x4c, 0x1f, 0xc3, 0xf4, 0x3a, 0x6a, 0x1b, 0x07, 0x58, 0x73, 0x7d, + 0xc7, 0x73, 0xdf, 0xa0, 0xbb, 0x8c, 0x2a, 0x53, 0x94, 0xcc, 0xf2, 0xb8, 0xa2, 0x5a, 0xb1, 0x81, + 0x71, 0xe1, 0x2f, 0xc1, 0x90, 0xf1, 0xe6, 0x39, 0x31, 0x12, 0x67, 0xcd, 0x18, 0x89, 0xba, 0x11, + 0xda, 0x70, 0xe1, 0x7d, 0x70, 0x2a, 0xdb, 0xc1, 0x83, 0x3c, 0x6f, 0xff, 0xf9, 0x60, 0xf6, 0x30, + 0x6a, 0x85, 0x44, 0x6d, 0xda, 0xb5, 0xb7, 0x1d, 0x44, 0x6f, 0x3b, 0x88, 0xde, 0x76, 0x10, 0x99, + 0x3e, 0x7e, 0xe1, 0xfc, 0x18, 0xbc, 0x4f, 0xce, 0x8f, 0x94, 0x3b, 0xa7, 0x56, 0xb8, 0x3b, 0xc7, + 0xde, 0xad, 0x42, 0xca, 0x8e, 0xe2, 0xe3, 0xfd, 0x83, 0x30, 0x18, 0x91, 0x30, 0xb8, 0x89, 0xe7, + 0x85, 0x0e, 0xd1, 0x71, 0xf0, 0xbc, 0x19, 0x4b, 0x38, 0xd5, 0x35, 0xa1, 0x93, 0xac, 0x0b, 0x25, + 0xa2, 0x74, 0xcd, 0x92, 0x93, 0xac, 0x63, 0x06, 0x41, 0xef, 0x83, 0x91, 0xc4, 0x89, 0x5a, 0xd4, + 0x6c, 0xde, 0x64, 0x9f, 0x55, 0x1c, 0x59, 0x3e, 0x22, 0x70, 0x47, 0x56, 0x52, 0x50, 0x9c, 0xc1, + 0x46, 0xaf, 0x43, 0x65, 0x9d, 0x78, 0x6d, 0x31, 0xe4, 0xcb, 0xc5, 0xc9, 0x78, 0xf6, 0xae, 0x57, + 0x89, 0xd7, 0xe6, 0x12, 0x88, 0xfe, 0xc2, 0x8c, 0x15, 0x9d, 0x6f, 0xf5, 0x8d, 0x4e, 0x9c, 0x04, + 0x6d, 0xf7, 0x0d, 0xe9, 0xa9, 0xfb, 0x40, 0xc1, 0x8c, 0xaf, 0x4b, 0xfa, 0xdc, 0x25, 0xa2, 0xfe, + 0x62, 0xcd, 0x99, 0xf5, 0xa3, 0xe9, 0x46, 0xec, 0x53, 0x6d, 0x0b, 0x87, 0x5b, 0xd1, 0xfd, 0x98, + 0x91, 0xf4, 0x79, 0x3f, 0xd4, 0x5f, 0xac, 0x39, 0xa3, 0x6d, 0x35, 0xef, 0x87, 0x58, 0x1f, 0x6e, + 0x16, 0xdc, 0x07, 0x3e, 0xe7, 0x73, 0xe7, 0xff, 0x93, 0x50, 0x6d, 0xac, 0x3b, 0x51, 0x32, 0x3a, + 0xcc, 0x26, 0x8d, 0x72, 0xcd, 0x4c, 0xd3, 0x46, 0xcc, 0x61, 0xe8, 0x71, 0x28, 0x47, 0x64, 0x8d, + 0x85, 0x5f, 0x1a, 0x81, 0x39, 0x98, 0xac, 0x61, 0xda, 0x6e, 0xff, 0x62, 0x29, 0x6d, 0x2e, 0xa5, + 0xdf, 0x9b, 0xcf, 0xf6, 0x46, 0x27, 0x8a, 0xa5, 0xfb, 0xc6, 0x98, 0xed, 0xac, 0x19, 0x4b, 0x38, + 0xfa, 0xa4, 0x05, 0x83, 0x77, 0xe2, 0xc0, 0xf7, 0x49, 0x22, 0x54, 0xd3, 0xad, 0x82, 0x87, 0xe2, + 0x1a, 0xa7, 0xae, 0xfb, 0x20, 0x1a, 0xb0, 0xe4, 0x4b, 0xbb, 0x4b, 0xb6, 0x1a, 0x5e, 0xa7, 0xd9, + 0x15, 0x6b, 0x71, 0x99, 0x37, 0x63, 0x09, 0xa7, 0xa8, 0xae, 0xcf, 0x51, 0x2b, 0x69, 0xd4, 0x39, + 0x5f, 0xa0, 0x0a, 0xb8, 0xfd, 0xd7, 0x07, 0xe0, 0x5c, 0xee, 0xe2, 0xa0, 0x86, 0x0c, 0x33, 0x15, + 0xae, 0xb8, 0x1e, 0x91, 0x51, 0x46, 0xcc, 0x90, 0xb9, 0xa5, 0x5a, 0xb1, 0x81, 0x81, 0x7e, 0x12, + 0x20, 0x74, 0x22, 0xa7, 0x4d, 0x94, 0x7b, 0xf5, 0xc8, 0xf6, 0x02, 0xed, 0xc7, 0x92, 0xa4, 0xa9, + 0xf7, 0xa6, 0xaa, 0x29, 0xc6, 0x06, 0x4b, 0xf4, 0x02, 0x0c, 0x45, 0xc4, 0x23, 0x4e, 0xcc, 0xa2, + 0x77, 0xb3, 0xa9, 0x08, 0x58, 0x83, 0xb0, 0x89, 0x87, 0x9e, 0x52, 0x01, 0x59, 0x99, 0xc0, 0x94, + 0x74, 0x50, 0x16, 0x7a, 0xd3, 0x82, 0x91, 0x35, 0xd7, 0x23, 0x9a, 0xbb, 0x48, 0x1c, 0x58, 0x3c, + 0xfa, 0x4b, 0x5e, 0x31, 0xe9, 0x6a, 0x09, 0x99, 0x6a, 0x8e, 0x71, 0x86, 0x3d, 0xfd, 0xcc, 0x9b, + 0x24, 0x62, 0xa2, 0x75, 0x20, 0xfd, 0x99, 0x6f, 0xf1, 0x66, 0x2c, 0xe1, 0x68, 0x12, 0x4e, 0x86, + 0x4e, 0x1c, 0x4f, 0x47, 0xa4, 0x49, 0xfc, 0xc4, 0x75, 0x3c, 0x1e, 0xd6, 0x5f, 0xd3, 0x61, 0xbd, + 0x4b, 0x69, 0x30, 0xce, 0xe2, 0xa3, 0x0f, 0xc2, 0xa3, 0xdc, 0x7f, 0xb1, 0xe0, 0xc6, 0xb1, 0xeb, + 0xb7, 0xf4, 0x34, 0x10, 0x6e, 0x9c, 0x31, 0x41, 0xea, 0xd1, 0xb9, 0x7c, 0x34, 0xdc, 0xeb, 0x79, + 0xf4, 0x0c, 0xd4, 0xe2, 0x0d, 0x37, 0x9c, 0x8e, 0x9a, 0x31, 0x3b, 0xbb, 0xa8, 0x69, 0xa7, 0xe1, + 0xb2, 0x68, 0xc7, 0x0a, 0x03, 0x35, 0x60, 0x98, 0x7f, 0x12, 0x1e, 0x51, 0x26, 0xe4, 0xe3, 0xb3, + 0x3d, 0xd5, 0xa3, 0xc8, 0x3c, 0x1b, 0xc7, 0xce, 0xdd, 0xcb, 0xf2, 0x24, 0x85, 0x3b, 0xfe, 0x6f, + 0x19, 0x64, 0x70, 0x8a, 0xa8, 0xfd, 0xf3, 0xa5, 0xf4, 0x8e, 0xdb, 0x5c, 0xa4, 0x28, 0xa6, 0x4b, + 0x31, 0xb9, 0xe5, 0x44, 0xd2, 0x1b, 0x73, 0xc4, 0xec, 0x03, 0x41, 0xf7, 0x96, 0x13, 0x99, 0x8b, + 0x9a, 0x31, 0xc0, 0x92, 0x13, 0xba, 0x03, 0x95, 0xc4, 0x73, 0x0a, 0x4a, 0x57, 0x32, 0x38, 0x6a, + 0x07, 0xc8, 0xfc, 0x64, 0x8c, 0x19, 0x0f, 0xf4, 0x18, 0xb5, 0xfa, 0x57, 0xe5, 0x49, 0x87, 0x30, + 0xd4, 0x57, 0x63, 0xcc, 0x5a, 0xed, 0x3f, 0xaf, 0xe7, 0xc8, 0x55, 0xa5, 0xc8, 0xd0, 0x25, 0x00, + 0xba, 0x81, 0x5c, 0x8a, 0xc8, 0x9a, 0xbb, 0x25, 0x0c, 0x09, 0xb5, 0x76, 0x6f, 0x28, 0x08, 0x36, + 0xb0, 0xe4, 0x33, 0xcb, 0x9d, 0x35, 0xfa, 0x4c, 0xa9, 0xfb, 0x19, 0x0e, 0xc1, 0x06, 0x16, 0x7a, + 0x1e, 0x06, 0xdc, 0xb6, 0xd3, 0x52, 0x91, 0x94, 0x8f, 0xd1, 0x45, 0x3b, 0xc7, 0x5a, 0xee, 0xed, + 0x8c, 0x8d, 0xa8, 0x0e, 0xb1, 0x26, 0x2c, 0x70, 0xd1, 0x2f, 0x5b, 0x30, 0xdc, 0x08, 0xda, 0xed, + 0xc0, 0xe7, 0xdb, 0x2e, 0xb1, 0x87, 0xbc, 0x73, 0x5c, 0x6a, 0x7e, 0x7c, 0xda, 0x60, 0xc6, 0x37, + 0x91, 0x2a, 0xaf, 0xca, 0x04, 0xe1, 0x54, 0xaf, 0xcc, 0xb5, 0x5d, 0xdd, 0x67, 0x6d, 0xff, 0x9a, + 0x05, 0xa7, 0xf9, 0xb3, 0xc6, 0x6e, 0x50, 0xa4, 0x10, 0x05, 0xc7, 0xfc, 0x5a, 0x5d, 0x1b, 0x64, + 0xe5, 0xa5, 0xeb, 0x82, 0xe3, 0xee, 0x4e, 0xa2, 0x59, 0x38, 0xbd, 0x16, 0x44, 0x0d, 0x62, 0x0e, + 0x84, 0x10, 0x4c, 0x8a, 0xd0, 0x95, 0x2c, 0x02, 0xee, 0x7e, 0x06, 0xdd, 0x82, 0x47, 0x8c, 0x46, + 0x73, 0x1c, 0xb8, 0x6c, 0x7a, 0x42, 0x50, 0x7b, 0xe4, 0x4a, 0x2e, 0x16, 0xee, 0xf1, 0x74, 0xda, + 0x61, 0x52, 0xef, 0xc3, 0x61, 0xf2, 0x1a, 0x9c, 0x6f, 0x74, 0x8f, 0xcc, 0x66, 0xdc, 0x59, 0x8d, + 0xb9, 0xa4, 0xaa, 0x4d, 0x7d, 0x9f, 0x20, 0x70, 0x7e, 0xba, 0x17, 0x22, 0xee, 0x4d, 0x03, 0x7d, + 0x14, 0x6a, 0x11, 0x61, 0x5f, 0x25, 0x16, 0xf9, 0x34, 0x47, 0xdc, 0x25, 0x6b, 0x0b, 0x94, 0x93, + 0xd5, 0xb2, 0x57, 0x34, 0xc4, 0x58, 0x71, 0x44, 0x77, 0x61, 0x30, 0x74, 0x92, 0xc6, 0xba, 0xc8, + 0xa2, 0x39, 0x72, 0x18, 0x8b, 0x62, 0xbe, 0x44, 0xa9, 0xea, 0x49, 0xbe, 0xc4, 0x99, 0x60, 0xc9, + 0xed, 0xc2, 0xfb, 0xe1, 0x74, 0xd7, 0x42, 0x3a, 0x90, 0xb3, 0x64, 0x06, 0x1e, 0xc9, 0x9f, 0xb2, + 0x07, 0x72, 0x99, 0xfc, 0x93, 0x4c, 0xec, 0xa9, 0x61, 0xc6, 0xf6, 0xe1, 0x7e, 0x73, 0xa0, 0x4c, + 0xfc, 0x4d, 0x21, 0xc1, 0xaf, 0x1c, 0x6d, 0xe4, 0x2e, 0xfb, 0x9b, 0x7c, 0xc5, 0x31, 0x1f, 0xc3, + 0x65, 0x7f, 0x13, 0x53, 0xda, 0xe8, 0x4b, 0x56, 0xca, 0x0c, 0xe3, 0x4e, 0xbb, 0x0f, 0x1f, 0x8b, + 0xdd, 0xde, 0xb7, 0x65, 0x66, 0xff, 0xbb, 0x12, 0x5c, 0xdc, 0x8f, 0x48, 0x1f, 0xc3, 0xf7, 0x24, + 0x0c, 0xc4, 0xec, 0x34, 0x59, 0x88, 0xc4, 0x21, 0x3a, 0x53, 0xf8, 0xf9, 0xf2, 0x6b, 0x58, 0x80, + 0x90, 0x07, 0xe5, 0xb6, 0x13, 0x0a, 0x5f, 0xce, 0xdc, 0x51, 0xb3, 0x51, 0xe8, 0x7f, 0xc7, 0x5b, + 0x70, 0x42, 0xee, 0x21, 0x30, 0x1a, 0x30, 0x65, 0x83, 0x12, 0xa8, 0x3a, 0x51, 0xe4, 0xc8, 0xa3, + 0xcb, 0xeb, 0xc5, 0xf0, 0x9b, 0xa4, 0x24, 0xa7, 0x4e, 0xef, 0xee, 0x8c, 0x9d, 0x48, 0x35, 0x61, + 0xce, 0xcc, 0xfe, 0xdc, 0x60, 0x2a, 0x23, 0x83, 0x9d, 0x47, 0xc7, 0x30, 0x20, 0x5c, 0x38, 0x56, + 0xd1, 0x49, 0x40, 0x3c, 0xa5, 0x8e, 0xed, 0xd2, 0x44, 0x62, 0xb2, 0x60, 0x85, 0x3e, 0x6b, 0xb1, + 0xf4, 0x5f, 0x99, 0xa5, 0x22, 0xf6, 0x46, 0xc7, 0x93, 0x8d, 0x6c, 0x26, 0x15, 0xcb, 0x46, 0x6c, + 0x72, 0xa7, 0x3a, 0x33, 0xe4, 0x89, 0x6c, 0xd9, 0x1d, 0x92, 0x4c, 0x10, 0x96, 0x70, 0xb4, 0x95, + 0x73, 0xee, 0x5c, 0x40, 0x0a, 0x69, 0x1f, 0x27, 0xcd, 0x5f, 0xb5, 0xe0, 0xb4, 0x9b, 0x3d, 0x40, + 0x14, 0x3b, 0x89, 0x23, 0x46, 0x36, 0xf4, 0x3e, 0x9f, 0x54, 0xca, 0xb4, 0x0b, 0x84, 0xbb, 0x3b, + 0x83, 0x9a, 0x50, 0x71, 0xfd, 0xb5, 0x40, 0x98, 0x10, 0x53, 0x47, 0xeb, 0xd4, 0x9c, 0xbf, 0x16, + 0xe8, 0xd5, 0x4c, 0xff, 0x61, 0x46, 0x1d, 0xcd, 0xc3, 0xd9, 0x48, 0xf8, 0x7a, 0xae, 0xba, 0x31, + 0xdd, 0x91, 0xcf, 0xbb, 0x6d, 0x37, 0x61, 0xea, 0xbf, 0x3c, 0x35, 0xba, 0xbb, 0x33, 0x76, 0x16, + 0xe7, 0xc0, 0x71, 0xee, 0x53, 0xe8, 0x0d, 0x18, 0x94, 0xf9, 0xca, 0xb5, 0x22, 0x76, 0x65, 0xdd, + 0xf3, 0x5f, 0x4d, 0xa6, 0x65, 0x91, 0x9a, 0x2c, 0x19, 0xda, 0x6f, 0x0e, 0x41, 0xf7, 0xa1, 0x24, + 0xfa, 0x18, 0xd4, 0x23, 0x95, 0x43, 0x6d, 0x15, 0xa1, 0x2c, 0xe5, 0xf7, 0x15, 0x07, 0xa2, 0xca, + 0x10, 0xd1, 0xd9, 0xd2, 0x9a, 0x23, 0xdd, 0x2e, 0xc4, 0xfa, 0xec, 0xb2, 0x80, 0xb9, 0x2d, 0xb8, + 0xea, 0x73, 0xa9, 0x6d, 0xbf, 0x81, 0x19, 0x0f, 0x14, 0xc1, 0xc0, 0x3a, 0x71, 0xbc, 0x64, 0xbd, + 0x18, 0x17, 0xfa, 0x55, 0x46, 0x2b, 0x9b, 0x49, 0xc3, 0x5b, 0xb1, 0xe0, 0x84, 0xb6, 0x60, 0x70, + 0x9d, 0x4f, 0x00, 0x61, 0xc1, 0x2f, 0x1c, 0x75, 0x70, 0x53, 0xb3, 0x4a, 0x7f, 0x6e, 0xd1, 0x80, + 0x25, 0x3b, 0x16, 0xb4, 0x62, 0x9c, 0xc7, 0xf3, 0xa5, 0x5b, 0x5c, 0x12, 0x51, 0xff, 0x87, 0xf1, + 0x1f, 0x81, 0xe1, 0x88, 0x34, 0x02, 0xbf, 0xe1, 0x7a, 0xa4, 0x39, 0x29, 0xdd, 0xe3, 0x07, 0x49, + 0x3d, 0x61, 0xbb, 0x60, 0x6c, 0xd0, 0xc0, 0x29, 0x8a, 0xe8, 0x33, 0x16, 0x8c, 0xa8, 0xc4, 0x4b, + 0xfa, 0x41, 0x88, 0x70, 0xc7, 0xce, 0x17, 0x94, 0xe6, 0xc9, 0x68, 0x4e, 0xa1, 0xdd, 0x9d, 0xb1, + 0x91, 0x74, 0x1b, 0xce, 0xf0, 0x45, 0xaf, 0x00, 0x04, 0xab, 0x3c, 0x32, 0x65, 0x32, 0x11, 0xbe, + 0xd9, 0x83, 0xbc, 0xea, 0x08, 0xcf, 0x41, 0x93, 0x14, 0xb0, 0x41, 0x0d, 0x5d, 0x07, 0xe0, 0xcb, + 0x66, 0x65, 0x3b, 0x94, 0x66, 0xbe, 0xcc, 0x1d, 0x82, 0x65, 0x05, 0xb9, 0xb7, 0x33, 0xd6, 0xed, + 0x2b, 0x63, 0x61, 0x03, 0xc6, 0xe3, 0xe8, 0x27, 0x60, 0x30, 0xee, 0xb4, 0xdb, 0x8e, 0xf2, 0xdc, + 0x16, 0x98, 0xd5, 0xc6, 0xe9, 0x1a, 0xa2, 0x88, 0x37, 0x60, 0xc9, 0x11, 0xdd, 0xa1, 0x42, 0x35, + 0x16, 0x4e, 0x3c, 0xb6, 0x8a, 0xb8, 0x4d, 0x30, 0xc4, 0xde, 0xe9, 0x3d, 0x32, 0xd0, 0x06, 0xe7, + 0xe0, 0xdc, 0xdb, 0x19, 0x7b, 0x24, 0xdd, 0x3e, 0x1f, 0x88, 0x3c, 0xb3, 0x5c, 0x9a, 0xe8, 0x9a, + 0x2c, 0x5f, 0x42, 0x5f, 0x5b, 0x66, 0xd5, 0x3f, 0xad, 0xcb, 0x97, 0xb0, 0xe6, 0xde, 0x63, 0x66, + 0x3e, 0x8c, 0x16, 0xe0, 0x4c, 0x23, 0xf0, 0x93, 0x28, 0xf0, 0x3c, 0x5e, 0x93, 0x87, 0xef, 0xb8, + 0xb8, 0x67, 0xf7, 0x9d, 0xa2, 0xdb, 0x67, 0xa6, 0xbb, 0x51, 0x70, 0xde, 0x73, 0xb6, 0x9f, 0x0e, + 0xd9, 0x13, 0x83, 0xf3, 0x3c, 0x0c, 0x93, 0xad, 0x84, 0x44, 0xbe, 0xe3, 0xdd, 0xc4, 0xf3, 0xd2, + 0xa7, 0xc9, 0xd6, 0xc0, 0x65, 0xa3, 0x1d, 0xa7, 0xb0, 0x90, 0xad, 0xdc, 0x0c, 0x46, 0xee, 0x24, + 0x77, 0x33, 0x48, 0xa7, 0x82, 0xfd, 0xbf, 0x4b, 0x29, 0x83, 0x6c, 0x25, 0x22, 0x04, 0x05, 0x50, + 0xf5, 0x83, 0xa6, 0x92, 0xfd, 0xd7, 0x8a, 0x91, 0xfd, 0x37, 0x82, 0xa6, 0x51, 0xe3, 0x84, 0xfe, + 0x8b, 0x31, 0xe7, 0xc3, 0x8a, 0x40, 0xc8, 0x6a, 0x19, 0x0c, 0x20, 0x36, 0x1a, 0x45, 0x72, 0x56, + 0x45, 0x20, 0x16, 0x4d, 0x46, 0x38, 0xcd, 0x17, 0x6d, 0x40, 0x75, 0x3d, 0x88, 0x13, 0xb9, 0xfd, + 0x38, 0xe2, 0x4e, 0xe7, 0x6a, 0x10, 0x27, 0xcc, 0x8a, 0x50, 0xaf, 0x4d, 0x5b, 0x62, 0xcc, 0x79, + 0xd8, 0xff, 0xd5, 0x4a, 0x79, 0xb0, 0x6f, 0xb3, 0xf0, 0xd5, 0x4d, 0xe2, 0xd3, 0x65, 0x6d, 0x06, + 0xfa, 0xfc, 0xc5, 0x4c, 0x32, 0xe0, 0xbb, 0x7a, 0x55, 0x9c, 0xba, 0x4b, 0x29, 0x8c, 0x33, 0x12, + 0x46, 0x4c, 0xd0, 0x27, 0xac, 0x74, 0x5a, 0x66, 0xa9, 0x88, 0x0d, 0x86, 0x99, 0x9a, 0xbc, 0x6f, + 0x86, 0xa7, 0xfd, 0x25, 0x0b, 0x06, 0xa7, 0x9c, 0xc6, 0x46, 0xb0, 0xb6, 0x86, 0x9e, 0x81, 0x5a, + 0xb3, 0x13, 0x99, 0x19, 0xa2, 0x6a, 0xdb, 0x3e, 0x23, 0xda, 0xb1, 0xc2, 0xa0, 0x73, 0x78, 0xcd, + 0x69, 0xc8, 0x04, 0xe5, 0x32, 0x9f, 0xc3, 0x57, 0x58, 0x0b, 0x16, 0x10, 0xf4, 0x02, 0x0c, 0xb5, + 0x9d, 0x2d, 0xf9, 0x70, 0xd6, 0x7d, 0xbe, 0xa0, 0x41, 0xd8, 0xc4, 0xb3, 0xff, 0x95, 0x05, 0xa3, + 0x53, 0x4e, 0xec, 0x36, 0x26, 0x3b, 0xc9, 0xfa, 0x94, 0x9b, 0xac, 0x76, 0x1a, 0x1b, 0x24, 0xe1, + 0x59, 0xe9, 0xb4, 0x97, 0x9d, 0x98, 0x2e, 0x25, 0xb5, 0xaf, 0x53, 0xbd, 0xbc, 0x29, 0xda, 0xb1, + 0xc2, 0x40, 0x6f, 0xc0, 0x50, 0xe8, 0xc4, 0xf1, 0xdd, 0x20, 0x6a, 0x62, 0xb2, 0x56, 0x4c, 0x4d, + 0x88, 0x65, 0xd2, 0x88, 0x48, 0x82, 0xc9, 0x9a, 0x38, 0xe2, 0xd5, 0xf4, 0xb1, 0xc9, 0xcc, 0xfe, + 0x82, 0x05, 0xe7, 0xa7, 0x88, 0x13, 0x91, 0x88, 0x95, 0x90, 0x50, 0x2f, 0x32, 0xed, 0x05, 0x9d, + 0x26, 0x7a, 0x1d, 0x6a, 0x09, 0x6d, 0xa6, 0xdd, 0xb2, 0x8a, 0xed, 0x16, 0x3b, 0xa1, 0x5d, 0x11, + 0xc4, 0xb1, 0x62, 0x63, 0xff, 0x0d, 0x0b, 0x86, 0xd9, 0x61, 0xd7, 0x0c, 0x49, 0x1c, 0xd7, 0xeb, + 0xaa, 0xb4, 0x64, 0xf5, 0x59, 0x69, 0xe9, 0x22, 0x54, 0xd6, 0x83, 0x36, 0xc9, 0x1e, 0xd4, 0x5e, + 0x0d, 0xe8, 0xb6, 0x9a, 0x42, 0xd0, 0x73, 0xf4, 0xc3, 0xbb, 0x7e, 0xe2, 0xd0, 0x25, 0x20, 0x9d, + 0xa9, 0x27, 0xf9, 0x47, 0x57, 0xcd, 0xd8, 0xc4, 0xb1, 0x7f, 0xb3, 0x0e, 0x83, 0xe2, 0x34, 0xbf, + 0xef, 0xca, 0x04, 0x72, 0x7f, 0x5f, 0xea, 0xb9, 0xbf, 0x8f, 0x61, 0xa0, 0xc1, 0xea, 0xb8, 0x09, + 0x33, 0xf2, 0x7a, 0x21, 0xe1, 0x1f, 0xbc, 0x34, 0x9c, 0xee, 0x16, 0xff, 0x8f, 0x05, 0x2b, 0xf4, + 0x45, 0x0b, 0x4e, 0x36, 0x02, 0xdf, 0x27, 0x0d, 0x6d, 0xe3, 0x54, 0x8a, 0x38, 0xe5, 0x9f, 0x4e, + 0x13, 0xd5, 0x27, 0x2d, 0x19, 0x00, 0xce, 0xb2, 0x47, 0x2f, 0xc1, 0x09, 0x3e, 0x66, 0xb7, 0x52, + 0x1e, 0x60, 0x5d, 0x80, 0xc7, 0x04, 0xe2, 0x34, 0x2e, 0x1a, 0xe7, 0x9e, 0x74, 0x51, 0xea, 0x66, + 0x40, 0x1f, 0xdb, 0x19, 0x45, 0x6e, 0x0c, 0x0c, 0x14, 0x01, 0x8a, 0xc8, 0x5a, 0x44, 0xe2, 0x75, + 0x11, 0xed, 0xc0, 0xec, 0xab, 0xc1, 0xc3, 0x65, 0x31, 0xe3, 0x2e, 0x4a, 0x38, 0x87, 0x3a, 0xda, + 0x10, 0x1b, 0xcc, 0x5a, 0x11, 0x32, 0x54, 0x7c, 0xe6, 0x9e, 0xfb, 0xcc, 0x31, 0xa8, 0xc6, 0xeb, + 0x4e, 0xd4, 0x64, 0x76, 0x5d, 0x99, 0x67, 0xce, 0x2c, 0xd3, 0x06, 0xcc, 0xdb, 0xd1, 0x0c, 0x9c, + 0xca, 0x94, 0x0f, 0x8a, 0x85, 0xa7, 0x56, 0x65, 0x49, 0x64, 0x0a, 0x0f, 0xc5, 0xb8, 0xeb, 0x09, + 0xd3, 0xf9, 0x30, 0xb4, 0x8f, 0xf3, 0x61, 0x5b, 0xc5, 0xd4, 0x71, 0x1f, 0xea, 0xcb, 0x85, 0x0c, + 0x40, 0x5f, 0x01, 0x74, 0x9f, 0xcf, 0x04, 0xd0, 0x9d, 0x60, 0x1d, 0xb8, 0x55, 0x4c, 0x07, 0x0e, + 0x1e, 0x2d, 0xf7, 0x20, 0xa3, 0xdf, 0xfe, 0xcc, 0x02, 0xf9, 0x5d, 0xa7, 0x9d, 0xc6, 0x3a, 0xa1, + 0x53, 0x06, 0xbd, 0x0f, 0x46, 0xd4, 0x16, 0x7a, 0x3a, 0xe8, 0xf8, 0x3c, 0xf0, 0xad, 0xac, 0x8f, + 0x64, 0x71, 0x0a, 0x8a, 0x33, 0xd8, 0x68, 0x02, 0xea, 0x74, 0x9c, 0xf8, 0xa3, 0x5c, 0xd7, 0xaa, + 0x6d, 0xfa, 0xe4, 0xd2, 0x9c, 0x78, 0x4a, 0xe3, 0xa0, 0x00, 0x4e, 0x7b, 0x4e, 0x9c, 0xb0, 0x1e, + 0xd0, 0x1d, 0xf5, 0x21, 0x6b, 0x08, 0xb0, 0x50, 0xfc, 0xf9, 0x2c, 0x21, 0xdc, 0x4d, 0xdb, 0xfe, + 0x56, 0x05, 0x4e, 0xa4, 0x24, 0xe3, 0x01, 0x95, 0xf4, 0x33, 0x50, 0x93, 0x7a, 0x33, 0x5b, 0xed, + 0x44, 0x29, 0x57, 0x85, 0x41, 0x95, 0xd6, 0xaa, 0xd6, 0xaa, 0x59, 0xa3, 0xc2, 0x50, 0xb8, 0xd8, + 0xc4, 0x63, 0x42, 0x39, 0xf1, 0xe2, 0x69, 0xcf, 0x25, 0x7e, 0xc2, 0xbb, 0x59, 0x8c, 0x50, 0x5e, + 0x99, 0x5f, 0x36, 0x89, 0x6a, 0xa1, 0x9c, 0x01, 0xe0, 0x2c, 0x7b, 0xf4, 0x69, 0x0b, 0x4e, 0x38, + 0x77, 0x63, 0x5d, 0x6c, 0x54, 0x84, 0xca, 0x1d, 0x51, 0x49, 0xa5, 0xea, 0x97, 0x72, 0x97, 0x6f, + 0xaa, 0x09, 0xa7, 0x99, 0xa2, 0xb7, 0x2c, 0x40, 0x64, 0x8b, 0x34, 0x64, 0x30, 0x9f, 0xe8, 0xcb, + 0x40, 0x11, 0x3b, 0xcd, 0xcb, 0x5d, 0x74, 0xb9, 0x54, 0xef, 0x6e, 0xc7, 0x39, 0x7d, 0xb0, 0xff, + 0x79, 0x59, 0x2d, 0x28, 0x1d, 0x3f, 0xea, 0x18, 0x71, 0x6c, 0xd6, 0xe1, 0xe3, 0xd8, 0x74, 0x3c, + 0x40, 0x77, 0x6a, 0x62, 0x2a, 0x93, 0xa9, 0xf4, 0x80, 0x32, 0x99, 0x7e, 0xca, 0x4a, 0xd5, 0xf5, + 0x19, 0xba, 0xf4, 0x4a, 0xb1, 0xb1, 0xab, 0xe3, 0x3c, 0x56, 0x21, 0x23, 0xdd, 0xd3, 0x21, 0x2a, + 0x54, 0x9a, 0x1a, 0x68, 0x07, 0x92, 0x86, 0xff, 0xb1, 0x0c, 0x43, 0x86, 0x26, 0xcd, 0x35, 0x8b, + 0xac, 0x87, 0xcc, 0x2c, 0x2a, 0x1d, 0xc0, 0x2c, 0xfa, 0x49, 0xa8, 0x37, 0xa4, 0x94, 0x2f, 0xa6, + 0xb2, 0x6d, 0x56, 0x77, 0x68, 0x41, 0xaf, 0x9a, 0xb0, 0xe6, 0x89, 0x66, 0x53, 0x89, 0x33, 0x42, + 0x43, 0x54, 0x98, 0x86, 0xc8, 0xcb, 0x6c, 0x11, 0x9a, 0xa2, 0xfb, 0x19, 0x56, 0xfe, 0x29, 0x74, + 0xc5, 0x7b, 0xc9, 0x08, 0x73, 0x5e, 0xfe, 0x69, 0x69, 0x4e, 0x36, 0x63, 0x13, 0xc7, 0xfe, 0x96, + 0xa5, 0x3e, 0xee, 0x7d, 0x28, 0x74, 0x70, 0x27, 0x5d, 0xe8, 0xe0, 0x72, 0x21, 0xc3, 0xdc, 0xa3, + 0xc2, 0xc1, 0x0d, 0x18, 0x9c, 0x0e, 0xda, 0x6d, 0xc7, 0x6f, 0xa2, 0x1f, 0x80, 0xc1, 0x06, 0xff, + 0x29, 0x1c, 0x3b, 0xec, 0x78, 0x50, 0x40, 0xb1, 0x84, 0xa1, 0xc7, 0xa0, 0xe2, 0x44, 0x2d, 0xe9, + 0xcc, 0x61, 0xa1, 0x2d, 0x93, 0x51, 0x2b, 0xc6, 0xac, 0xd5, 0xfe, 0xc7, 0x15, 0x80, 0xe9, 0xa0, + 0x1d, 0x3a, 0x11, 0x69, 0xae, 0x04, 0xac, 0xb2, 0xde, 0xb1, 0x1e, 0xaa, 0xe9, 0xcd, 0xd2, 0xc3, + 0x7c, 0xb0, 0x66, 0x1c, 0xae, 0x94, 0xef, 0xf3, 0xe1, 0x4a, 0x8f, 0xf3, 0xb2, 0xca, 0x43, 0x74, + 0x5e, 0x66, 0x7f, 0xce, 0x02, 0x44, 0x27, 0x4d, 0xe0, 0x13, 0x3f, 0xd1, 0x07, 0xda, 0x13, 0x50, + 0x6f, 0xc8, 0x56, 0x61, 0x58, 0x69, 0x11, 0x21, 0x01, 0x58, 0xe3, 0xf4, 0xb1, 0x43, 0x7e, 0x52, + 0xca, 0xef, 0x72, 0x3a, 0x2a, 0x96, 0x49, 0x7d, 0x21, 0xce, 0xed, 0xdf, 0x2a, 0xc1, 0x23, 0x5c, + 0x25, 0x2f, 0x38, 0xbe, 0xd3, 0x22, 0x6d, 0xda, 0xab, 0x7e, 0x43, 0x14, 0x1a, 0x74, 0x6b, 0xe6, + 0xca, 0x28, 0xd7, 0xa3, 0xae, 0x5d, 0xbe, 0xe6, 0xf8, 0x2a, 0x9b, 0xf3, 0xdd, 0x04, 0x33, 0xe2, + 0x28, 0x86, 0x9a, 0x2c, 0xe5, 0x2e, 0x64, 0x71, 0x41, 0x8c, 0x94, 0x58, 0x12, 0x7a, 0x93, 0x60, + 0xc5, 0x88, 0x1a, 0xae, 0x5e, 0xd0, 0xd8, 0xc0, 0x24, 0x0c, 0x98, 0xdc, 0x35, 0x82, 0x0c, 0xe7, + 0x45, 0x3b, 0x56, 0x18, 0xf6, 0x6f, 0x59, 0x90, 0xd5, 0x48, 0x46, 0x09, 0x33, 0x6b, 0xcf, 0x12, + 0x66, 0x07, 0xa8, 0x21, 0xf6, 0xe3, 0x30, 0xe4, 0x24, 0xd4, 0x88, 0xe0, 0xdb, 0xee, 0xf2, 0xe1, + 0x8e, 0x35, 0x16, 0x82, 0xa6, 0xbb, 0xe6, 0xb2, 0xed, 0xb6, 0x49, 0xce, 0xfe, 0x9f, 0x15, 0x38, + 0xdd, 0x95, 0x8b, 0x81, 0x5e, 0x84, 0xe1, 0x86, 0x98, 0x1e, 0xa1, 0x74, 0x68, 0xd5, 0xcd, 0xa0, + 0x34, 0x0d, 0xc3, 0x29, 0xcc, 0x3e, 0x26, 0xe8, 0x1c, 0x9c, 0x89, 0xe8, 0x46, 0xbf, 0x43, 0x26, + 0xd7, 0x12, 0x12, 0x2d, 0x93, 0x46, 0xe0, 0x37, 0x79, 0xa1, 0xbd, 0xf2, 0xd4, 0xa3, 0xbb, 0x3b, + 0x63, 0x67, 0x70, 0x37, 0x18, 0xe7, 0x3d, 0x83, 0x42, 0x38, 0xe1, 0x99, 0x36, 0xa0, 0xd8, 0x00, + 0x1c, 0xca, 0x7c, 0x54, 0x36, 0x42, 0xaa, 0x19, 0xa7, 0x19, 0xa4, 0x0d, 0xc9, 0xea, 0x03, 0x32, + 0x24, 0x3f, 0xa5, 0x0d, 0x49, 0x7e, 0xfe, 0xfe, 0xa1, 0x82, 0x73, 0x71, 0x8e, 0xdb, 0x92, 0x7c, + 0x19, 0x6a, 0x32, 0x36, 0xa9, 0xaf, 0x98, 0x1e, 0x93, 0x4e, 0x0f, 0x89, 0xf6, 0x14, 0x7c, 0xff, + 0xe5, 0x28, 0x32, 0x06, 0xf3, 0x46, 0x90, 0x4c, 0x7a, 0x5e, 0x70, 0x97, 0x2a, 0xe9, 0x9b, 0x31, + 0x11, 0x1e, 0x16, 0xfb, 0x5e, 0x09, 0x72, 0x36, 0x2b, 0x74, 0x3d, 0x6a, 0xcb, 0x20, 0xb5, 0x1e, + 0x0f, 0x66, 0x1d, 0xa0, 0x2d, 0x1e, 0xbf, 0xc5, 0x75, 0xe0, 0x07, 0x8b, 0xde, 0x6c, 0xe9, 0x90, + 0x2e, 0x95, 0xca, 0xa0, 0xc2, 0xba, 0x2e, 0x01, 0x68, 0x83, 0x4e, 0x04, 0xaa, 0xab, 0xe3, 0x61, + 0x6d, 0xf7, 0x61, 0x03, 0x8b, 0xee, 0xbd, 0x5d, 0x3f, 0x4e, 0x1c, 0xcf, 0xbb, 0xea, 0xfa, 0x89, + 0x70, 0x22, 0x2a, 0x65, 0x3f, 0xa7, 0x41, 0xd8, 0xc4, 0xbb, 0xf0, 0x1e, 0xe3, 0xfb, 0x1d, 0xe4, + 0xbb, 0xaf, 0xc3, 0xf9, 0x59, 0x37, 0x51, 0xe9, 0x15, 0x6a, 0xbe, 0x51, 0x7b, 0x4d, 0xa5, 0x0b, + 0x59, 0x3d, 0xd3, 0x85, 0x8c, 0xf4, 0x86, 0x52, 0x3a, 0x1b, 0x23, 0x9b, 0xde, 0x60, 0xbf, 0x08, + 0x67, 0x67, 0xdd, 0xe4, 0x8a, 0xeb, 0x91, 0x03, 0x32, 0xb1, 0x7f, 0x63, 0x00, 0x86, 0xcd, 0x04, + 0xbd, 0x83, 0x64, 0x3c, 0x7d, 0x81, 0x9a, 0x64, 0xe2, 0xed, 0x5c, 0x75, 0xb8, 0x76, 0xfb, 0xc8, + 0xd9, 0x82, 0xf9, 0x23, 0x66, 0x58, 0x65, 0x9a, 0x27, 0x36, 0x3b, 0x80, 0xee, 0x42, 0x75, 0x8d, + 0x85, 0xdf, 0x97, 0x8b, 0x88, 0x40, 0xc8, 0x1b, 0x51, 0xbd, 0x1c, 0x79, 0x00, 0x3f, 0xe7, 0x47, + 0x35, 0x69, 0x94, 0xce, 0xe9, 0x32, 0x42, 0x46, 0x45, 0x36, 0x97, 0xc2, 0xe8, 0xa5, 0x12, 0xaa, + 0x87, 0x50, 0x09, 0x29, 0x01, 0x3d, 0xf0, 0x80, 0x04, 0x34, 0x4b, 0xa5, 0x48, 0xd6, 0x99, 0x9d, + 0x27, 0x62, 0xdc, 0x07, 0xd9, 0x20, 0x18, 0xa9, 0x14, 0x29, 0x30, 0xce, 0xe2, 0xa3, 0x8f, 0x2b, + 0x11, 0x5f, 0x2b, 0xc2, 0xff, 0x6a, 0xce, 0xe8, 0xe3, 0x96, 0xee, 0x9f, 0x2b, 0xc1, 0xc8, 0xac, + 0xdf, 0x59, 0x9a, 0x5d, 0xea, 0xac, 0x7a, 0x6e, 0xe3, 0x3a, 0xd9, 0xa6, 0x22, 0x7c, 0x83, 0x6c, + 0xcf, 0xcd, 0x88, 0x15, 0xa4, 0xe6, 0xcc, 0x75, 0xda, 0x88, 0x39, 0x8c, 0x0a, 0xa3, 0x35, 0xd7, + 0x6f, 0x91, 0x28, 0x8c, 0x5c, 0xe1, 0x1a, 0x35, 0x84, 0xd1, 0x15, 0x0d, 0xc2, 0x26, 0x1e, 0xa5, + 0x1d, 0xdc, 0xf5, 0x49, 0x94, 0x35, 0x78, 0x17, 0x69, 0x23, 0xe6, 0x30, 0x8a, 0x94, 0x44, 0x9d, + 0x38, 0x11, 0x93, 0x51, 0x21, 0xad, 0xd0, 0x46, 0xcc, 0x61, 0x74, 0xa5, 0xc7, 0x9d, 0x55, 0x16, + 0xe0, 0x91, 0x09, 0xa8, 0x5f, 0xe6, 0xcd, 0x58, 0xc2, 0x29, 0xea, 0x06, 0xd9, 0x9e, 0xa1, 0xbb, + 0xe3, 0x4c, 0x5e, 0xcd, 0x75, 0xde, 0x8c, 0x25, 0x9c, 0x55, 0x12, 0x4c, 0x0f, 0xc7, 0x77, 0x5d, + 0x25, 0xc1, 0x74, 0xf7, 0x7b, 0xec, 0xb3, 0x7f, 0xc9, 0x82, 0x61, 0x33, 0x2c, 0x0b, 0xb5, 0x32, + 0xb6, 0xf0, 0x62, 0x57, 0x21, 0xda, 0x1f, 0xcd, 0xbb, 0x99, 0xab, 0xe5, 0x26, 0x41, 0x18, 0x3f, + 0x4b, 0xfc, 0x96, 0xeb, 0x13, 0x76, 0xda, 0xce, 0xc3, 0xb9, 0x52, 0x31, 0x5f, 0xd3, 0x41, 0x93, + 0x1c, 0xc2, 0x98, 0xb6, 0x6f, 0xc3, 0xe9, 0xae, 0x64, 0xaa, 0x3e, 0x4c, 0x90, 0x7d, 0x53, 0x59, + 0x6d, 0x0c, 0x43, 0x94, 0xb0, 0xac, 0x66, 0x33, 0x0d, 0xa7, 0xf9, 0x42, 0xa2, 0x9c, 0x96, 0x1b, + 0xeb, 0xa4, 0xad, 0x12, 0xe4, 0x98, 0x1f, 0xfe, 0x56, 0x16, 0x88, 0xbb, 0xf1, 0xed, 0xcf, 0x5b, + 0x70, 0x22, 0x95, 0xdf, 0x56, 0x90, 0xb1, 0xc4, 0x56, 0x5a, 0xc0, 0xa2, 0x04, 0x59, 0xa8, 0x74, + 0x99, 0x29, 0x53, 0xbd, 0xd2, 0x34, 0x08, 0x9b, 0x78, 0xf6, 0x97, 0x4a, 0x50, 0x93, 0x91, 0x16, + 0x7d, 0x74, 0xe5, 0xb3, 0x16, 0x9c, 0x50, 0x67, 0x1f, 0xcc, 0xa9, 0x56, 0x2a, 0x22, 0x19, 0x81, + 0xf6, 0x40, 0x6d, 0xcb, 0xfd, 0xb5, 0x40, 0x5b, 0xee, 0xd8, 0x64, 0x86, 0xd3, 0xbc, 0xd1, 0x2d, + 0x80, 0x78, 0x3b, 0x4e, 0x48, 0xdb, 0x70, 0xef, 0xd9, 0xc6, 0x8a, 0x1b, 0x6f, 0x04, 0x11, 0xa1, + 0xeb, 0xeb, 0x46, 0xd0, 0x24, 0xcb, 0x0a, 0x53, 0x9b, 0x50, 0xba, 0x0d, 0x1b, 0x94, 0xec, 0x7f, + 0x58, 0x82, 0x53, 0xd9, 0x2e, 0xa1, 0x0f, 0xc1, 0xb0, 0xe4, 0x6e, 0xdc, 0x32, 0x26, 0xc3, 0x4b, + 0x86, 0xb1, 0x01, 0xbb, 0xb7, 0x33, 0x36, 0xd6, 0x7d, 0xcb, 0xdb, 0xb8, 0x89, 0x82, 0x53, 0xc4, + 0xf8, 0x01, 0x94, 0x38, 0x29, 0x9d, 0xda, 0x9e, 0x0c, 0x43, 0x71, 0x8a, 0x64, 0x1c, 0x40, 0x99, + 0x50, 0x9c, 0xc1, 0x46, 0x4b, 0x70, 0xd6, 0x68, 0xb9, 0x41, 0xdc, 0xd6, 0xfa, 0x6a, 0x10, 0xc9, + 0x1d, 0xd8, 0x63, 0x3a, 0x00, 0xac, 0x1b, 0x07, 0xe7, 0x3e, 0x49, 0xb5, 0x7d, 0xc3, 0x09, 0x9d, + 0x86, 0x9b, 0x6c, 0x0b, 0x7f, 0xa5, 0x92, 0x4d, 0xd3, 0xa2, 0x1d, 0x2b, 0x0c, 0x7b, 0x01, 0x2a, + 0x7d, 0xce, 0xa0, 0xbe, 0x2c, 0xff, 0x97, 0xa1, 0x46, 0xc9, 0x49, 0xf3, 0xae, 0x08, 0x92, 0x01, + 0xd4, 0xe4, 0x45, 0x21, 0xc8, 0x86, 0xb2, 0xeb, 0xc8, 0x33, 0x3e, 0xf5, 0x5a, 0x73, 0x71, 0xdc, + 0x61, 0x9b, 0x69, 0x0a, 0x44, 0x4f, 0x42, 0x99, 0x6c, 0x85, 0xd9, 0xc3, 0xbc, 0xcb, 0x5b, 0xa1, + 0x1b, 0x91, 0x98, 0x22, 0x91, 0xad, 0x10, 0x5d, 0x80, 0x92, 0xdb, 0x14, 0x4a, 0x0a, 0x04, 0x4e, + 0x69, 0x6e, 0x06, 0x97, 0xdc, 0xa6, 0xbd, 0x05, 0x75, 0x75, 0x33, 0x09, 0xda, 0x90, 0xb2, 0xdb, + 0x2a, 0x22, 0x34, 0x4a, 0xd2, 0xed, 0x21, 0xb5, 0x3b, 0x00, 0x3a, 0xd1, 0xaf, 0x28, 0xf9, 0x72, + 0x11, 0x2a, 0x8d, 0x40, 0x24, 0x21, 0xd7, 0x34, 0x19, 0x26, 0xb4, 0x19, 0xc4, 0xbe, 0x0d, 0x23, + 0xd7, 0xfd, 0xe0, 0x2e, 0x2b, 0xab, 0xce, 0xaa, 0x88, 0x51, 0xc2, 0x6b, 0xf4, 0x47, 0xd6, 0x44, + 0x60, 0x50, 0xcc, 0x61, 0xaa, 0x2e, 0x53, 0xa9, 0x57, 0x5d, 0x26, 0xfb, 0x13, 0x16, 0x0c, 0xab, + 0x8c, 0xa1, 0xd9, 0xcd, 0x0d, 0x4a, 0xb7, 0x15, 0x05, 0x9d, 0x30, 0x4b, 0x97, 0xdd, 0x1d, 0x84, + 0x39, 0xcc, 0x4c, 0xa5, 0x2b, 0xed, 0x93, 0x4a, 0x77, 0x11, 0x2a, 0x1b, 0xae, 0xdf, 0xcc, 0x5e, + 0x86, 0x71, 0xdd, 0xf5, 0x9b, 0x98, 0x41, 0x68, 0x17, 0x4e, 0xa9, 0x2e, 0x48, 0x85, 0xf0, 0x22, + 0x0c, 0xaf, 0x76, 0x5c, 0xaf, 0x29, 0xcb, 0xa3, 0x65, 0x3c, 0x2a, 0x53, 0x06, 0x0c, 0xa7, 0x30, + 0xe9, 0xbe, 0x6e, 0xd5, 0xf5, 0x9d, 0x68, 0x7b, 0x49, 0x6b, 0x20, 0x25, 0x94, 0xa6, 0x14, 0x04, + 0x1b, 0x58, 0xf6, 0x9b, 0x65, 0x18, 0x49, 0xe7, 0x4d, 0xf5, 0xb1, 0xbd, 0x7a, 0x12, 0xaa, 0x2c, + 0x95, 0x2a, 0xfb, 0x69, 0xd9, 0xf3, 0x98, 0xc3, 0x50, 0x0c, 0x03, 0xbc, 0x08, 0x43, 0x31, 0x17, + 0xc9, 0xa8, 0x4e, 0x2a, 0x3f, 0x0c, 0x8b, 0x3b, 0x13, 0x75, 0x1f, 0x04, 0x2b, 0xf4, 0x69, 0x0b, + 0x06, 0x83, 0xd0, 0xac, 0xe7, 0xf3, 0xc1, 0x22, 0x73, 0xca, 0x44, 0x52, 0x8d, 0xb0, 0x88, 0xd5, + 0xa7, 0x97, 0x9f, 0x43, 0xb2, 0xbe, 0xf0, 0x5e, 0x18, 0x36, 0x31, 0xf7, 0x33, 0x8a, 0x6b, 0xa6, + 0x51, 0xfc, 0x59, 0x73, 0x52, 0x88, 0xac, 0xb9, 0x3e, 0x96, 0xdb, 0x4d, 0xa8, 0x36, 0x54, 0xa0, + 0xc0, 0xa1, 0x8a, 0x6a, 0xaa, 0xaa, 0x08, 0xec, 0xb0, 0x88, 0x53, 0xb3, 0xbf, 0x65, 0x19, 0xf3, + 0x03, 0x93, 0x78, 0xae, 0x89, 0x22, 0x28, 0xb7, 0x36, 0x37, 0x84, 0x29, 0x7a, 0xad, 0xa0, 0xe1, + 0x9d, 0xdd, 0xdc, 0xd0, 0x73, 0xdc, 0x6c, 0xc5, 0x94, 0x59, 0x1f, 0xce, 0xc2, 0x54, 0x72, 0x65, + 0x79, 0xff, 0xe4, 0x4a, 0xfb, 0xad, 0x12, 0x9c, 0xee, 0x9a, 0x54, 0xe8, 0x0d, 0xa8, 0x46, 0xf4, + 0x2d, 0xc5, 0xeb, 0xcd, 0x17, 0x96, 0x0e, 0x19, 0xcf, 0x35, 0xb5, 0xde, 0x4d, 0xb7, 0x63, 0xce, + 0x12, 0x5d, 0x03, 0xa4, 0xc3, 0x59, 0x94, 0xa7, 0x92, 0xbf, 0xf2, 0x05, 0xf1, 0x28, 0x9a, 0xec, + 0xc2, 0xc0, 0x39, 0x4f, 0xa1, 0x97, 0xb2, 0x0e, 0xcf, 0x72, 0xfa, 0x7c, 0x73, 0x2f, 0xdf, 0xa5, + 0xfd, 0x2f, 0x4a, 0x70, 0x22, 0x55, 0x5e, 0x09, 0x79, 0x50, 0x23, 0x1e, 0x73, 0xfe, 0x4b, 0x65, + 0x73, 0xd4, 0xa2, 0xc3, 0x4a, 0x41, 0x5e, 0x16, 0x74, 0xb1, 0xe2, 0xf0, 0x70, 0x1c, 0xc2, 0xbf, + 0x08, 0xc3, 0xb2, 0x43, 0x1f, 0x74, 0xda, 0x9e, 0x18, 0x40, 0x35, 0x47, 0x2f, 0x1b, 0x30, 0x9c, + 0xc2, 0xb4, 0x7f, 0xbb, 0x0c, 0xa3, 0xfc, 0xb4, 0xa4, 0xa9, 0x66, 0xde, 0x82, 0xdc, 0x6f, 0xfd, + 0x15, 0x5d, 0x04, 0x8d, 0x0f, 0xe4, 0xea, 0x51, 0x6b, 0xfc, 0xe7, 0x33, 0xea, 0x2b, 0x82, 0xeb, + 0x17, 0x32, 0x11, 0x5c, 0xdc, 0xec, 0x6e, 0x1d, 0x53, 0x8f, 0xbe, 0xbb, 0x42, 0xba, 0xfe, 0x5e, + 0x09, 0x4e, 0x66, 0x2e, 0x50, 0x40, 0x6f, 0xa6, 0x6b, 0xee, 0x5a, 0x45, 0xf8, 0xd4, 0xf7, 0xac, + 0xa9, 0x7f, 0xb0, 0xca, 0xbb, 0x0f, 0x68, 0xa9, 0xd8, 0xbf, 0x57, 0x82, 0x91, 0xf4, 0xcd, 0x0f, + 0x0f, 0xe1, 0x48, 0xbd, 0x1b, 0xea, 0xac, 0xb8, 0x39, 0xbb, 0xd1, 0x92, 0xbb, 0xe4, 0x79, 0x1d, + 0x69, 0xd9, 0x88, 0x35, 0xfc, 0xa1, 0x28, 0x68, 0x6c, 0xff, 0x7d, 0x0b, 0xce, 0xf1, 0xb7, 0xcc, + 0xce, 0xc3, 0xbf, 0x9a, 0x37, 0xba, 0xaf, 0x16, 0xdb, 0xc1, 0x4c, 0xf1, 0xbe, 0xfd, 0xc6, 0x97, + 0xdd, 0xa4, 0x27, 0x7a, 0x9b, 0x9e, 0x0a, 0x0f, 0x61, 0x67, 0x0f, 0x34, 0x19, 0xec, 0xdf, 0x2b, + 0x83, 0xbe, 0x3c, 0x10, 0xb9, 0x22, 0x17, 0xb2, 0x90, 0x22, 0x86, 0xcb, 0xdb, 0x7e, 0x43, 0x5f, + 0x53, 0x58, 0xcb, 0xa4, 0x42, 0xfe, 0xac, 0x05, 0x43, 0xae, 0xef, 0x26, 0xae, 0xc3, 0xb6, 0xd1, + 0xc5, 0x5c, 0x6c, 0xa6, 0xd8, 0xcd, 0x71, 0xca, 0x41, 0x64, 0x9e, 0xe3, 0x28, 0x66, 0xd8, 0xe4, + 0x8c, 0x3e, 0x22, 0x82, 0xac, 0xcb, 0x85, 0x65, 0xf1, 0xd6, 0x32, 0x91, 0xd5, 0x21, 0x35, 0xbc, + 0x92, 0xa8, 0xa0, 0xe4, 0x77, 0x4c, 0x49, 0xa9, 0x7a, 0xb8, 0xfa, 0x1a, 0x67, 0xda, 0x8c, 0x39, + 0x23, 0x3b, 0x06, 0xd4, 0x3d, 0x16, 0x07, 0x0c, 0x60, 0x9d, 0x80, 0xba, 0xd3, 0x49, 0x82, 0x36, + 0x1d, 0x26, 0x71, 0xd4, 0xa4, 0x43, 0x74, 0x25, 0x00, 0x6b, 0x1c, 0xfb, 0xcd, 0x2a, 0x64, 0x92, + 0x13, 0xd1, 0x96, 0x79, 0xf1, 0xa5, 0x55, 0xec, 0xc5, 0x97, 0xaa, 0x33, 0x79, 0x97, 0x5f, 0xa2, + 0x16, 0x54, 0xc3, 0x75, 0x27, 0x96, 0x66, 0xf5, 0xcb, 0x6a, 0x1f, 0x47, 0x1b, 0xef, 0xed, 0x8c, + 0xfd, 0x58, 0x7f, 0x5e, 0x57, 0x3a, 0x57, 0x27, 0x78, 0x91, 0x11, 0xcd, 0x9a, 0xd1, 0xc0, 0x9c, + 0xfe, 0x41, 0xae, 0x76, 0xfb, 0xa4, 0xa8, 0xe2, 0x8e, 0x49, 0xdc, 0xf1, 0x12, 0x31, 0x1b, 0x5e, + 0x2e, 0x70, 0x95, 0x71, 0xc2, 0x3a, 0xad, 0x9e, 0xff, 0xc7, 0x06, 0x53, 0xf4, 0x21, 0xa8, 0xc7, + 0x89, 0x13, 0x25, 0x87, 0x4c, 0x84, 0x55, 0x83, 0xbe, 0x2c, 0x89, 0x60, 0x4d, 0x0f, 0xbd, 0xc2, + 0x6a, 0xba, 0xba, 0xf1, 0xfa, 0x21, 0x73, 0x23, 0x64, 0xfd, 0x57, 0x41, 0x01, 0x1b, 0xd4, 0xd0, + 0x25, 0x00, 0x36, 0xb7, 0x79, 0x40, 0x60, 0x8d, 0x79, 0x99, 0x94, 0x28, 0xc4, 0x0a, 0x82, 0x0d, + 0x2c, 0xfb, 0x87, 0x20, 0x5d, 0x17, 0x02, 0x8d, 0xc9, 0x32, 0x14, 0xdc, 0x0b, 0xcd, 0x72, 0x1c, + 0x52, 0x15, 0x23, 0x7e, 0xcd, 0x02, 0xb3, 0x78, 0x05, 0x7a, 0x9d, 0x57, 0xc9, 0xb0, 0x8a, 0x38, + 0x39, 0x34, 0xe8, 0x8e, 0x2f, 0x38, 0x61, 0xe6, 0x08, 0x5b, 0x96, 0xca, 0xb8, 0xf0, 0x1e, 0xa8, + 0x49, 0xe8, 0x81, 0x8c, 0xba, 0x8f, 0xc3, 0x99, 0xec, 0xb5, 0xe0, 0xe2, 0xd4, 0x69, 0x7f, 0xd7, + 0x8f, 0xf4, 0xe7, 0x94, 0x7a, 0xf9, 0x73, 0xfa, 0xb8, 0xfe, 0xf4, 0xd7, 0x2d, 0xb8, 0xb8, 0xdf, + 0xed, 0xe5, 0xe8, 0x31, 0xa8, 0xdc, 0x75, 0x22, 0x59, 0x6c, 0x9b, 0x09, 0xca, 0xdb, 0x4e, 0xe4, + 0x63, 0xd6, 0x8a, 0xb6, 0x61, 0x80, 0x47, 0x8d, 0x09, 0x6b, 0xfd, 0xe5, 0x62, 0xef, 0x52, 0xbf, + 0x4e, 0x8c, 0xed, 0x02, 0x8f, 0x58, 0xc3, 0x82, 0xa1, 0xfd, 0x6d, 0x0b, 0xd0, 0xe2, 0x26, 0x89, + 0x22, 0xb7, 0x69, 0xc4, 0xb9, 0xb1, 0xdb, 0x50, 0x8c, 0x5b, 0x4f, 0xcc, 0x54, 0xd8, 0xcc, 0x6d, + 0x28, 0xc6, 0xbf, 0xfc, 0xdb, 0x50, 0x4a, 0x07, 0xbb, 0x0d, 0x05, 0x2d, 0xc2, 0xb9, 0x36, 0xdf, + 0x6e, 0xf0, 0x1b, 0x06, 0xf8, 0xde, 0x43, 0x25, 0x9e, 0x9d, 0xdf, 0xdd, 0x19, 0x3b, 0xb7, 0x90, + 0x87, 0x80, 0xf3, 0x9f, 0xb3, 0xdf, 0x03, 0x88, 0x87, 0xb7, 0x4d, 0xe7, 0xc5, 0x2a, 0xf5, 0x74, + 0xbf, 0xd8, 0x5f, 0xa9, 0xc2, 0xc9, 0x4c, 0x29, 0x56, 0xba, 0xd5, 0xeb, 0x0e, 0x8e, 0x3a, 0xb2, + 0xfe, 0xee, 0xee, 0x5e, 0x5f, 0xe1, 0x56, 0x3e, 0x54, 0x5d, 0x3f, 0xec, 0x24, 0xc5, 0xe4, 0x9a, + 0xf2, 0x4e, 0xcc, 0x51, 0x82, 0x86, 0xbb, 0x98, 0xfe, 0xc5, 0x9c, 0x4d, 0x91, 0xc1, 0x5b, 0x29, + 0x63, 0xbc, 0xf2, 0x80, 0xdc, 0x01, 0x9f, 0xd4, 0xa1, 0x54, 0xd5, 0x22, 0x1c, 0x8b, 0x99, 0xc9, + 0x72, 0xdc, 0x47, 0xed, 0xbf, 0x5a, 0x82, 0x21, 0xe3, 0xa3, 0xa1, 0x5f, 0x4c, 0x97, 0x76, 0xb2, + 0x8a, 0x7b, 0x25, 0x46, 0x7f, 0x5c, 0x17, 0x6f, 0xe2, 0xaf, 0xf4, 0x54, 0x77, 0x55, 0xa7, 0x7b, + 0x3b, 0x63, 0xa7, 0x32, 0x75, 0x9b, 0x52, 0x95, 0x9e, 0x2e, 0x7c, 0x0c, 0x4e, 0x66, 0xc8, 0xe4, + 0xbc, 0xf2, 0x4a, 0xfa, 0xd6, 0xf7, 0x23, 0xba, 0xa5, 0xcc, 0x21, 0xfb, 0x3a, 0x1d, 0x32, 0x91, + 0x6e, 0x17, 0x78, 0xa4, 0x0f, 0x1f, 0x6c, 0x26, 0xab, 0xb6, 0xd4, 0x67, 0x56, 0xed, 0xd3, 0x50, + 0x0b, 0x03, 0xcf, 0x6d, 0xb8, 0xaa, 0xfa, 0x20, 0xcb, 0xe3, 0x5d, 0x12, 0x6d, 0x58, 0x41, 0xd1, + 0x5d, 0xa8, 0xab, 0x0b, 0xf2, 0x85, 0x7f, 0xbb, 0xa8, 0x43, 0x1f, 0x65, 0xb4, 0xe8, 0x8b, 0xef, + 0x35, 0x2f, 0x64, 0xc3, 0x00, 0x53, 0x82, 0x32, 0x45, 0x80, 0xf9, 0xde, 0x99, 0x76, 0x8c, 0xb1, + 0x80, 0xd8, 0x5f, 0xab, 0xc3, 0xd9, 0xbc, 0x7a, 0xd8, 0xe8, 0xa3, 0x30, 0xc0, 0xfb, 0x58, 0xcc, + 0x95, 0x0b, 0x79, 0x3c, 0x66, 0x19, 0x41, 0xd1, 0x2d, 0xf6, 0x1b, 0x0b, 0x9e, 0x82, 0xbb, 0xe7, + 0xac, 0x8a, 0x19, 0x72, 0x3c, 0xdc, 0xe7, 0x1d, 0xcd, 0x7d, 0xde, 0xe1, 0xdc, 0x3d, 0x67, 0x15, + 0x6d, 0x41, 0xb5, 0xe5, 0x26, 0xc4, 0x11, 0x4e, 0x84, 0xdb, 0xc7, 0xc2, 0x9c, 0x38, 0xdc, 0x4a, + 0x63, 0x3f, 0x31, 0x67, 0x88, 0xbe, 0x6a, 0xc1, 0xc9, 0xd5, 0x74, 0x0a, 0xbd, 0x10, 0x9e, 0xce, + 0x31, 0xd4, 0x3c, 0x4f, 0x33, 0xe2, 0xd7, 0x01, 0x65, 0x1a, 0x71, 0xb6, 0x3b, 0xe8, 0x53, 0x16, + 0x0c, 0xae, 0xb9, 0x9e, 0x51, 0xfe, 0xf6, 0x18, 0x3e, 0xce, 0x15, 0xc6, 0x40, 0xef, 0x38, 0xf8, + 0xff, 0x18, 0x4b, 0xce, 0xbd, 0x34, 0xd5, 0xc0, 0x51, 0x35, 0xd5, 0xe0, 0x03, 0xd2, 0x54, 0x9f, + 0xb1, 0xa0, 0xae, 0x46, 0x5a, 0xa4, 0x45, 0x7f, 0xe8, 0x18, 0x3f, 0x39, 0xf7, 0x9c, 0xa8, 0xbf, + 0x58, 0x33, 0x47, 0x5f, 0xb4, 0x60, 0xc8, 0x79, 0xa3, 0x13, 0x91, 0x26, 0xd9, 0x0c, 0xc2, 0x58, + 0xdc, 0x25, 0xf8, 0x6a, 0xf1, 0x9d, 0x99, 0xa4, 0x4c, 0x66, 0xc8, 0xe6, 0x62, 0x18, 0x8b, 0xf4, + 0x25, 0xdd, 0x80, 0xcd, 0x2e, 0xd8, 0x3b, 0x25, 0x18, 0xdb, 0x87, 0x02, 0x7a, 0x11, 0x86, 0x83, + 0xa8, 0xe5, 0xf8, 0xee, 0x1b, 0x66, 0x4d, 0x0c, 0x65, 0x65, 0x2d, 0x1a, 0x30, 0x9c, 0xc2, 0x34, + 0x13, 0xb7, 0x4b, 0xfb, 0x24, 0x6e, 0x5f, 0x84, 0x4a, 0x44, 0xc2, 0x20, 0xbb, 0x59, 0x60, 0xa9, + 0x03, 0x0c, 0x82, 0x1e, 0x87, 0xb2, 0x13, 0xba, 0x22, 0x10, 0x4d, 0xed, 0x81, 0x26, 0x97, 0xe6, + 0x30, 0x6d, 0x4f, 0xd5, 0x91, 0xa8, 0xde, 0x97, 0x3a, 0x12, 0xc6, 0xd5, 0xff, 0x03, 0x3d, 0xaf, + 0xfe, 0x7f, 0xab, 0x0c, 0x8f, 0xef, 0x39, 0x5f, 0x74, 0x1c, 0x9e, 0xb5, 0x47, 0x1c, 0x9e, 0x1c, + 0x9e, 0xd2, 0x7e, 0xc3, 0x53, 0xee, 0x31, 0x3c, 0x9f, 0xa2, 0xcb, 0x40, 0xd6, 0x12, 0x29, 0xe6, + 0x36, 0xb8, 0x5e, 0xa5, 0x49, 0xc4, 0x0a, 0x90, 0x50, 0xac, 0xf9, 0xd2, 0x3d, 0x40, 0x2a, 0x69, + 0xb9, 0x5a, 0x84, 0x1a, 0xe8, 0x59, 0x5b, 0x84, 0xcf, 0xfd, 0x5e, 0x99, 0xd0, 0xf6, 0xcf, 0x95, + 0xe0, 0xc9, 0x3e, 0xa4, 0xb7, 0x39, 0x8b, 0xad, 0x3e, 0x67, 0xf1, 0x77, 0xf7, 0x67, 0xb2, 0xff, + 0x9a, 0x05, 0x17, 0x7a, 0x2b, 0x0f, 0xf4, 0x1c, 0x0c, 0xad, 0x46, 0x8e, 0xdf, 0x58, 0x67, 0x37, + 0x5c, 0xca, 0x41, 0x61, 0x63, 0xad, 0x9b, 0xb1, 0x89, 0x43, 0xb7, 0xb7, 0x3c, 0x26, 0xc1, 0xc0, + 0x90, 0x49, 0xa6, 0x74, 0x7b, 0xbb, 0x92, 0x05, 0xe2, 0x6e, 0x7c, 0xfb, 0x4f, 0x4b, 0xf9, 0xdd, + 0xe2, 0x46, 0xc6, 0x41, 0xbe, 0x93, 0xf8, 0x0a, 0xa5, 0x3e, 0x64, 0x49, 0xf9, 0x7e, 0xcb, 0x92, + 0x4a, 0x2f, 0x59, 0x82, 0x66, 0xe0, 0x94, 0x71, 0x75, 0x0a, 0x4f, 0x1c, 0xe6, 0x01, 0xb7, 0xaa, + 0x9a, 0xc6, 0x52, 0x06, 0x8e, 0xbb, 0x9e, 0x40, 0xcf, 0x40, 0xcd, 0xf5, 0x63, 0xd2, 0xe8, 0x44, + 0x3c, 0xd0, 0xdb, 0x48, 0xd6, 0x9a, 0x13, 0xed, 0x58, 0x61, 0xd8, 0xbf, 0x54, 0x82, 0xf3, 0x3d, + 0xed, 0xac, 0xfb, 0x24, 0xbb, 0xcc, 0xcf, 0x51, 0xb9, 0x3f, 0x9f, 0xc3, 0x1c, 0xa4, 0xea, 0xbe, + 0x83, 0xf4, 0xfb, 0xbd, 0x27, 0x26, 0xb5, 0xb9, 0xbf, 0x67, 0x47, 0xe9, 0x25, 0x38, 0xe1, 0x84, + 0x21, 0xc7, 0x63, 0xf1, 0x9a, 0x99, 0x6a, 0x3a, 0x93, 0x26, 0x10, 0xa7, 0x71, 0xfb, 0xd2, 0x9e, + 0x7f, 0x68, 0x41, 0x1d, 0x93, 0x35, 0x2e, 0x1d, 0xd0, 0x1d, 0x31, 0x44, 0x56, 0x11, 0x75, 0x37, + 0xe9, 0xc0, 0xc6, 0x2e, 0xab, 0x47, 0x99, 0x37, 0xd8, 0xdd, 0x57, 0xec, 0x94, 0x0e, 0x74, 0xc5, + 0x8e, 0xba, 0x64, 0xa5, 0xdc, 0xfb, 0x92, 0x15, 0xfb, 0xeb, 0x83, 0xf4, 0xf5, 0xc2, 0x60, 0x3a, + 0x22, 0xcd, 0x98, 0x7e, 0xdf, 0x4e, 0xe4, 0x89, 0x49, 0xa2, 0xbe, 0xef, 0x4d, 0x3c, 0x8f, 0x69, + 0x7b, 0xea, 0x28, 0xa6, 0x74, 0xa0, 0x5a, 0x22, 0xe5, 0x7d, 0x6b, 0x89, 0xbc, 0x04, 0x27, 0xe2, + 0x78, 0x7d, 0x29, 0x72, 0x37, 0x9d, 0x84, 0x5c, 0x27, 0xdb, 0xc2, 0xca, 0xd2, 0xf9, 0xff, 0xcb, + 0x57, 0x35, 0x10, 0xa7, 0x71, 0xd1, 0x2c, 0x9c, 0xd6, 0x15, 0x3d, 0x48, 0x94, 0xb0, 0xe8, 0x7e, + 0x3e, 0x13, 0x54, 0xb2, 0xaf, 0xae, 0x01, 0x22, 0x10, 0x70, 0xf7, 0x33, 0x54, 0xbe, 0xa5, 0x1a, + 0x69, 0x47, 0x06, 0xd2, 0xf2, 0x2d, 0x45, 0x87, 0xf6, 0xa5, 0xeb, 0x09, 0xb4, 0x00, 0x67, 0xf8, + 0xc4, 0x98, 0x0c, 0x43, 0xe3, 0x8d, 0x06, 0xd3, 0xf5, 0x0e, 0x67, 0xbb, 0x51, 0x70, 0xde, 0x73, + 0xe8, 0x05, 0x18, 0x52, 0xcd, 0x73, 0x33, 0xe2, 0x14, 0x41, 0x79, 0x31, 0x14, 0x99, 0xb9, 0x26, + 0x36, 0xf1, 0xd0, 0x07, 0xe1, 0x51, 0xfd, 0x97, 0xa7, 0x80, 0xf1, 0xa3, 0xb5, 0x19, 0x51, 0x2c, + 0x49, 0x5d, 0xe9, 0x31, 0x9b, 0x8b, 0xd6, 0xc4, 0xbd, 0x9e, 0x47, 0xab, 0x70, 0x41, 0x81, 0x2e, + 0xfb, 0x09, 0xcb, 0xe7, 0x88, 0xc9, 0x94, 0x13, 0x93, 0x9b, 0x91, 0xc7, 0xca, 0x2b, 0xd5, 0xf5, + 0x6d, 0x8b, 0xb3, 0x6e, 0x72, 0x35, 0x0f, 0x13, 0xcf, 0xe3, 0x3d, 0xa8, 0xa0, 0x09, 0xa8, 0x13, + 0xdf, 0x59, 0xf5, 0xc8, 0xe2, 0xf4, 0x1c, 0x2b, 0xba, 0x64, 0x9c, 0xe4, 0x5d, 0x96, 0x00, 0xac, + 0x71, 0x54, 0x84, 0xe9, 0x70, 0xcf, 0x9b, 0x3f, 0x97, 0xe0, 0x6c, 0xab, 0x11, 0x52, 0xdb, 0xc3, + 0x6d, 0x90, 0xc9, 0x06, 0x0b, 0xa8, 0xa3, 0x1f, 0x86, 0x17, 0xa2, 0x54, 0xe1, 0xd3, 0xb3, 0xd3, + 0x4b, 0x5d, 0x38, 0x38, 0xf7, 0x49, 0x16, 0x78, 0x19, 0x05, 0x5b, 0xdb, 0xa3, 0x67, 0x32, 0x81, + 0x97, 0xb4, 0x11, 0x73, 0x18, 0xba, 0x06, 0x88, 0xc5, 0xe2, 0x5f, 0x4d, 0x92, 0x50, 0x19, 0x3b, + 0xa3, 0x67, 0xd9, 0x2b, 0xa9, 0x30, 0xb2, 0x2b, 0x5d, 0x18, 0x38, 0xe7, 0x29, 0xfb, 0x3f, 0x59, + 0x70, 0x42, 0xad, 0xd7, 0xfb, 0x90, 0x8d, 0xe2, 0xa5, 0xb3, 0x51, 0x66, 0x8f, 0x2e, 0xf1, 0x58, + 0xcf, 0x7b, 0x84, 0x34, 0xff, 0xf4, 0x10, 0x80, 0x96, 0x8a, 0x4a, 0x21, 0x59, 0x3d, 0x15, 0xd2, + 0x43, 0x2b, 0x91, 0xf2, 0x2a, 0xac, 0x54, 0x1f, 0x6c, 0x85, 0x95, 0x65, 0x38, 0x27, 0xcd, 0x05, + 0x7e, 0x56, 0x74, 0x35, 0x88, 0x95, 0x80, 0xab, 0x4d, 0x3d, 0x2e, 0x08, 0x9d, 0x9b, 0xcb, 0x43, + 0xc2, 0xf9, 0xcf, 0xa6, 0xac, 0x94, 0xc1, 0xfd, 0xac, 0x14, 0xbd, 0xa6, 0xe7, 0xd7, 0xe4, 0xdd, + 0x1d, 0x99, 0x35, 0x3d, 0x7f, 0x65, 0x19, 0x6b, 0x9c, 0x7c, 0xc1, 0x5e, 0x2f, 0x48, 0xb0, 0xc3, + 0x81, 0x05, 0xbb, 0x14, 0x31, 0x43, 0x3d, 0x45, 0x8c, 0xf4, 0x49, 0x0f, 0xf7, 0xf4, 0x49, 0xbf, + 0x0f, 0x46, 0x5c, 0x7f, 0x9d, 0x44, 0x6e, 0x42, 0x9a, 0x6c, 0x2d, 0x30, 0xf1, 0x53, 0xd3, 0x6a, + 0x7d, 0x2e, 0x05, 0xc5, 0x19, 0xec, 0xb4, 0x5c, 0x1c, 0xe9, 0x43, 0x2e, 0xf6, 0xd0, 0x46, 0x27, + 0x8b, 0xd1, 0x46, 0xa7, 0x8e, 0xae, 0x8d, 0x4e, 0x1f, 0xab, 0x36, 0x42, 0x85, 0x68, 0xa3, 0xbe, + 0x04, 0xbd, 0xb1, 0xfd, 0x3b, 0xbb, 0xcf, 0xf6, 0xaf, 0x97, 0x2a, 0x3a, 0x77, 0x68, 0x55, 0x94, + 0xaf, 0x65, 0x1e, 0x39, 0x94, 0x96, 0xf9, 0x4c, 0x09, 0xce, 0x69, 0x39, 0x4c, 0x67, 0xbf, 0xbb, + 0x46, 0x25, 0x11, 0xbb, 0xfe, 0x89, 0x9f, 0xdb, 0x18, 0xc9, 0x51, 0x3a, 0xcf, 0x4a, 0x41, 0xb0, + 0x81, 0xc5, 0x72, 0x8c, 0x48, 0xc4, 0xca, 0xed, 0x66, 0x85, 0xf4, 0xb4, 0x68, 0xc7, 0x0a, 0x83, + 0xce, 0x2f, 0xfa, 0x5b, 0xe4, 0x6d, 0x66, 0x8b, 0xca, 0x4d, 0x6b, 0x10, 0x36, 0xf1, 0xd0, 0xd3, + 0x9c, 0x09, 0x13, 0x10, 0x54, 0x50, 0x0f, 0x8b, 0xfb, 0x60, 0xa5, 0x4c, 0x50, 0x50, 0xd9, 0x1d, + 0x96, 0x4c, 0x56, 0xed, 0xee, 0x0e, 0x0b, 0x81, 0x52, 0x18, 0xf6, 0xff, 0xb2, 0xe0, 0x7c, 0xee, + 0x50, 0xdc, 0x07, 0xe5, 0xbb, 0x95, 0x56, 0xbe, 0xcb, 0x45, 0x6d, 0x37, 0x8c, 0xb7, 0xe8, 0xa1, + 0x88, 0xff, 0x83, 0x05, 0x23, 0x1a, 0xff, 0x3e, 0xbc, 0xaa, 0x9b, 0x7e, 0xd5, 0xe2, 0x76, 0x56, + 0xf5, 0xae, 0x77, 0xfb, 0xed, 0x12, 0xa8, 0x42, 0x8f, 0x93, 0x0d, 0x59, 0x46, 0x77, 0x9f, 0x93, + 0xc4, 0x6d, 0x18, 0x60, 0x07, 0xa1, 0x71, 0x31, 0x41, 0x1e, 0x69, 0xfe, 0xec, 0x50, 0x55, 0x1f, + 0x32, 0xb3, 0xbf, 0x31, 0x16, 0x0c, 0x59, 0x31, 0x68, 0x37, 0xa6, 0xd2, 0xbc, 0x29, 0xd2, 0xb2, + 0x74, 0x31, 0x68, 0xd1, 0x8e, 0x15, 0x06, 0x55, 0x0f, 0x6e, 0x23, 0xf0, 0xa7, 0x3d, 0x27, 0x96, + 0x77, 0x1e, 0x2a, 0xf5, 0x30, 0x27, 0x01, 0x58, 0xe3, 0xb0, 0x33, 0x52, 0x37, 0x0e, 0x3d, 0x67, + 0xdb, 0xd8, 0x3f, 0x1b, 0xf5, 0x09, 0x14, 0x08, 0x9b, 0x78, 0x76, 0x1b, 0x46, 0xd3, 0x2f, 0x31, + 0x43, 0xd6, 0x58, 0x80, 0x62, 0x5f, 0xc3, 0x39, 0x01, 0x75, 0x87, 0x3d, 0x35, 0xdf, 0x71, 0xb2, + 0x57, 0x95, 0x4f, 0x4a, 0x00, 0xd6, 0x38, 0xf6, 0xaf, 0x58, 0x70, 0x26, 0x67, 0xd0, 0x0a, 0x4c, + 0x7b, 0x4b, 0xb4, 0xb4, 0xc9, 0x53, 0xec, 0x3f, 0x08, 0x83, 0x4d, 0xb2, 0xe6, 0xc8, 0x10, 0x38, + 0x43, 0xb6, 0xcf, 0xf0, 0x66, 0x2c, 0xe1, 0xf6, 0xff, 0xb0, 0xe0, 0x64, 0xba, 0xaf, 0x31, 0x4b, + 0x25, 0xe1, 0xc3, 0xe4, 0xc6, 0x8d, 0x60, 0x93, 0x44, 0xdb, 0xf4, 0xcd, 0xad, 0x4c, 0x2a, 0x49, + 0x17, 0x06, 0xce, 0x79, 0x8a, 0x95, 0x79, 0x6d, 0xaa, 0xd1, 0x96, 0x33, 0xf2, 0x56, 0x91, 0x33, + 0x52, 0x7f, 0x4c, 0xf3, 0xb8, 0x5c, 0xb1, 0xc4, 0x26, 0x7f, 0xfb, 0xdb, 0x15, 0x50, 0x79, 0xb1, + 0x2c, 0xfe, 0xa8, 0xa0, 0xe8, 0xad, 0x83, 0x66, 0x10, 0xa9, 0xc9, 0x50, 0xd9, 0x2b, 0x20, 0x80, + 0x7b, 0x49, 0x4c, 0xd7, 0xa5, 0x7a, 0xc3, 0x15, 0x0d, 0xc2, 0x26, 0x1e, 0xed, 0x89, 0xe7, 0x6e, + 0x12, 0xfe, 0xd0, 0x40, 0xba, 0x27, 0xf3, 0x12, 0x80, 0x35, 0x0e, 0xed, 0x49, 0xd3, 0x5d, 0x5b, + 0x13, 0x5b, 0x7e, 0xd5, 0x13, 0x3a, 0x3a, 0x98, 0x41, 0x78, 0xe5, 0xee, 0x60, 0x43, 0x58, 0xc1, + 0x46, 0xe5, 0xee, 0x60, 0x03, 0x33, 0x08, 0xb5, 0xdb, 0xfc, 0x20, 0x6a, 0xb3, 0xab, 0xe4, 0x9b, + 0x8a, 0x8b, 0xb0, 0x7e, 0x95, 0xdd, 0x76, 0xa3, 0x1b, 0x05, 0xe7, 0x3d, 0x47, 0x67, 0x60, 0x18, + 0x91, 0xa6, 0xdb, 0x48, 0x4c, 0x6a, 0x90, 0x9e, 0x81, 0x4b, 0x5d, 0x18, 0x38, 0xe7, 0x29, 0x34, + 0x09, 0x27, 0x65, 0x5e, 0xb3, 0xac, 0x5a, 0x33, 0x94, 0xae, 0x92, 0x81, 0xd3, 0x60, 0x9c, 0xc5, + 0xa7, 0x52, 0xad, 0x2d, 0x0a, 0x5b, 0x31, 0x63, 0xd9, 0x90, 0x6a, 0xb2, 0xe0, 0x15, 0x56, 0x18, + 0xf6, 0x27, 0xcb, 0x54, 0x0b, 0xf7, 0x28, 0xe8, 0x76, 0xdf, 0xa2, 0x05, 0xd3, 0x33, 0xb2, 0xd2, + 0xc7, 0x8c, 0x7c, 0x1e, 0x86, 0xef, 0xc4, 0x81, 0xaf, 0x22, 0xf1, 0xaa, 0x3d, 0x23, 0xf1, 0x0c, + 0xac, 0xfc, 0x48, 0xbc, 0x81, 0xa2, 0x22, 0xf1, 0x06, 0x0f, 0x19, 0x89, 0xf7, 0xcd, 0x2a, 0xa8, + 0x2b, 0x44, 0x6e, 0x90, 0xe4, 0x6e, 0x10, 0x6d, 0xb8, 0x7e, 0x8b, 0xe5, 0x83, 0x7f, 0xd5, 0x82, + 0x61, 0xbe, 0x5e, 0xe6, 0xcd, 0x4c, 0xaa, 0xb5, 0x82, 0xee, 0xa6, 0x48, 0x31, 0x1b, 0x5f, 0x31, + 0x18, 0x65, 0xae, 0xdc, 0x34, 0x41, 0x38, 0xd5, 0x23, 0xf4, 0x31, 0x00, 0xe9, 0x1f, 0x5d, 0x93, + 0x22, 0x73, 0xae, 0x98, 0xfe, 0x61, 0xb2, 0xa6, 0x6d, 0xe0, 0x15, 0xc5, 0x04, 0x1b, 0x0c, 0xd1, + 0x67, 0x74, 0x96, 0x19, 0x0f, 0xd9, 0xff, 0xc8, 0xb1, 0x8c, 0x4d, 0x3f, 0x39, 0x66, 0x18, 0x06, + 0x5d, 0xbf, 0x45, 0xe7, 0x89, 0x88, 0x58, 0x7a, 0x57, 0x5e, 0x2d, 0x85, 0xf9, 0xc0, 0x69, 0x4e, + 0x39, 0x9e, 0xe3, 0x37, 0x48, 0x34, 0xc7, 0xd1, 0xcd, 0x8b, 0xa6, 0x59, 0x03, 0x96, 0x84, 0xba, + 0x2e, 0x5f, 0xa9, 0xf6, 0x73, 0xf9, 0xca, 0x85, 0xf7, 0xc3, 0xe9, 0xae, 0x8f, 0x79, 0xa0, 0x94, + 0xb2, 0xc3, 0x67, 0xa3, 0xd9, 0xff, 0x72, 0x40, 0x2b, 0xad, 0x1b, 0x41, 0x93, 0x5f, 0x01, 0x12, + 0xe9, 0x2f, 0x2a, 0x6c, 0xdc, 0x02, 0xa7, 0x88, 0x71, 0x59, 0xb5, 0x6a, 0xc4, 0x26, 0x4b, 0x3a, + 0x47, 0x43, 0x27, 0x22, 0xfe, 0x71, 0xcf, 0xd1, 0x25, 0xc5, 0x04, 0x1b, 0x0c, 0xd1, 0x7a, 0x2a, + 0xa7, 0xe4, 0xca, 0xd1, 0x73, 0x4a, 0x58, 0x95, 0xa9, 0xbc, 0xaa, 0xfd, 0x5f, 0xb4, 0x60, 0xc4, + 0x4f, 0xcd, 0xdc, 0x62, 0xc2, 0x48, 0xf3, 0x57, 0x05, 0xbf, 0x81, 0x2a, 0xdd, 0x86, 0x33, 0xfc, + 0xf3, 0x54, 0x5a, 0xf5, 0x80, 0x2a, 0x4d, 0xdf, 0x25, 0x34, 0xd0, 0xeb, 0x2e, 0x21, 0xe4, 0xab, + 0xcb, 0xd4, 0x06, 0x0b, 0xbf, 0x4c, 0x0d, 0x72, 0x2e, 0x52, 0xbb, 0x0d, 0xf5, 0x46, 0x44, 0x9c, + 0xe4, 0x90, 0xf7, 0x6a, 0xb1, 0x03, 0xfa, 0x69, 0x49, 0x00, 0x6b, 0x5a, 0xf6, 0xff, 0xa9, 0xc0, + 0x29, 0x39, 0x22, 0x32, 0x04, 0x9d, 0xea, 0x47, 0xce, 0x57, 0x1b, 0xb7, 0x4a, 0x3f, 0x5e, 0x95, + 0x00, 0xac, 0x71, 0xa8, 0x3d, 0xd6, 0x89, 0xc9, 0x62, 0x48, 0xfc, 0x79, 0x77, 0x35, 0x16, 0xe7, + 0x9c, 0x6a, 0xa1, 0xdc, 0xd4, 0x20, 0x6c, 0xe2, 0x51, 0x63, 0x9c, 0xdb, 0xc5, 0x71, 0x36, 0x7d, + 0x45, 0xd8, 0xdb, 0x58, 0xc2, 0xd1, 0xcf, 0xe7, 0x56, 0x98, 0x2d, 0x26, 0x71, 0xab, 0x2b, 0xf2, + 0xfe, 0x80, 0x57, 0x31, 0xfe, 0x1d, 0x0b, 0xce, 0xf1, 0x56, 0x39, 0x92, 0x37, 0xc3, 0xa6, 0x93, + 0x90, 0xb8, 0x98, 0x8a, 0xef, 0x39, 0xfd, 0xd3, 0x4e, 0xde, 0x3c, 0xb6, 0x38, 0xbf, 0x37, 0xe8, + 0x4d, 0x0b, 0x4e, 0x6e, 0xa4, 0x6a, 0x7e, 0x48, 0xd5, 0x71, 0xd4, 0x74, 0xfc, 0x14, 0x51, 0xbd, + 0xd4, 0xd2, 0xed, 0x31, 0xce, 0x72, 0xb7, 0xff, 0xd4, 0x02, 0x53, 0x8c, 0xde, 0xff, 0x52, 0x21, + 0x07, 0x37, 0x05, 0xa5, 0x75, 0x59, 0xed, 0x69, 0x5d, 0x3e, 0x0e, 0xe5, 0x8e, 0xdb, 0x14, 0xfb, + 0x0b, 0x7d, 0xfa, 0x3a, 0x37, 0x83, 0x69, 0xbb, 0xfd, 0xcf, 0xaa, 0xda, 0x6f, 0x21, 0xf2, 0xa2, + 0xbe, 0x27, 0x5e, 0x7b, 0x4d, 0x15, 0x1b, 0xe3, 0x6f, 0x7e, 0xa3, 0xab, 0xd8, 0xd8, 0x8f, 0x1c, + 0x3c, 0xed, 0x8d, 0x0f, 0x50, 0xaf, 0x5a, 0x63, 0x83, 0xfb, 0xe4, 0xbc, 0xdd, 0x81, 0x1a, 0xdd, + 0x82, 0x31, 0x07, 0x64, 0x2d, 0xd5, 0xa9, 0xda, 0x55, 0xd1, 0x7e, 0x6f, 0x67, 0xec, 0xbd, 0x07, + 0xef, 0x96, 0x7c, 0x1a, 0x2b, 0xfa, 0x28, 0x86, 0x3a, 0xfd, 0xcd, 0xd2, 0xf3, 0xc4, 0xe6, 0xee, + 0xa6, 0x92, 0x99, 0x12, 0x50, 0x48, 0xee, 0x9f, 0xe6, 0x83, 0x7c, 0xa8, 0xb3, 0x5b, 0x6b, 0x19, + 0x53, 0xbe, 0x07, 0x5c, 0x52, 0x49, 0x72, 0x12, 0x70, 0x6f, 0x67, 0xec, 0xa5, 0x83, 0x33, 0x55, + 0x8f, 0x63, 0xcd, 0xc2, 0xfe, 0x52, 0x45, 0xcf, 0x5d, 0x51, 0x63, 0xee, 0x7b, 0x62, 0xee, 0xbe, + 0x98, 0x99, 0xbb, 0x17, 0xbb, 0xe6, 0xee, 0x88, 0xbe, 0x5d, 0x35, 0x35, 0x1b, 0xef, 0xb7, 0x21, + 0xb0, 0xbf, 0xbf, 0x81, 0x59, 0x40, 0xaf, 0x77, 0xdc, 0x88, 0xc4, 0x4b, 0x51, 0xc7, 0x77, 0xfd, + 0x16, 0x9b, 0x8e, 0x35, 0xd3, 0x02, 0x4a, 0x81, 0x71, 0x16, 0x9f, 0x6e, 0xea, 0xe9, 0x37, 0xbf, + 0xed, 0x6c, 0xf2, 0x59, 0x65, 0x94, 0xdd, 0x5a, 0x16, 0xed, 0x58, 0x61, 0xd8, 0x5f, 0x67, 0x67, + 0xd9, 0x46, 0x5e, 0x30, 0x9d, 0x13, 0x1e, 0xbb, 0x26, 0x98, 0xd7, 0xec, 0x52, 0x73, 0x82, 0xdf, + 0x0d, 0xcc, 0x61, 0xe8, 0x2e, 0x0c, 0xae, 0xf2, 0x7b, 0xf2, 0x8a, 0xa9, 0x63, 0x2e, 0x2e, 0xdd, + 0x63, 0xb7, 0xa1, 0xc8, 0x1b, 0xf8, 0xee, 0xe9, 0x9f, 0x58, 0x72, 0xb3, 0xbf, 0x51, 0x81, 0x93, + 0x99, 0x8b, 0x64, 0x53, 0xd5, 0x52, 0x4b, 0xfb, 0x56, 0x4b, 0xfd, 0x30, 0x40, 0x93, 0x84, 0x5e, + 0xb0, 0xcd, 0xcc, 0xb1, 0xca, 0x81, 0xcd, 0x31, 0x65, 0xc1, 0xcf, 0x28, 0x2a, 0xd8, 0xa0, 0x28, + 0x0a, 0x95, 0xf1, 0xe2, 0xab, 0x99, 0x42, 0x65, 0xc6, 0x6d, 0x07, 0x03, 0xf7, 0xf7, 0xb6, 0x03, + 0x17, 0x4e, 0xf2, 0x2e, 0xaa, 0xec, 0xdb, 0x43, 0x24, 0xd9, 0xb2, 0xfc, 0x85, 0x99, 0x34, 0x19, + 0x9c, 0xa5, 0xfb, 0x20, 0xef, 0x89, 0x46, 0xef, 0x86, 0xba, 0xfc, 0xce, 0xf1, 0x68, 0x5d, 0x57, + 0x30, 0x90, 0xd3, 0x80, 0xdd, 0xdf, 0x2c, 0x7e, 0xda, 0x5f, 0x28, 0x51, 0xeb, 0x99, 0xff, 0x53, + 0x95, 0x68, 0x9e, 0x82, 0x01, 0xa7, 0x93, 0xac, 0x07, 0x5d, 0x77, 0xed, 0x4d, 0xb2, 0x56, 0x2c, + 0xa0, 0x68, 0x1e, 0x2a, 0x4d, 0x5d, 0x5d, 0xe4, 0x20, 0xa3, 0xa8, 0x1d, 0x91, 0x4e, 0x42, 0x30, + 0xa3, 0x82, 0x1e, 0x83, 0x4a, 0xe2, 0xb4, 0x64, 0xa2, 0x13, 0x4b, 0x6e, 0x5d, 0x71, 0x5a, 0x31, + 0x66, 0xad, 0xa6, 0xd2, 0xac, 0xec, 0xa3, 0x34, 0x5f, 0x82, 0x13, 0xb1, 0xdb, 0xf2, 0x9d, 0xa4, + 0x13, 0x11, 0xe3, 0x70, 0x4d, 0xc7, 0x4b, 0x98, 0x40, 0x9c, 0xc6, 0xb5, 0x7f, 0x63, 0x18, 0xce, + 0x2e, 0x4f, 0x2f, 0xc8, 0x9a, 0xd9, 0xc7, 0x96, 0xab, 0x94, 0xc7, 0xe3, 0xfe, 0xe5, 0x2a, 0xf5, + 0xe0, 0xee, 0x19, 0xb9, 0x4a, 0x9e, 0x91, 0xab, 0x94, 0x4e, 0x1c, 0x29, 0x17, 0x91, 0x38, 0x92, + 0xd7, 0x83, 0x7e, 0x12, 0x47, 0x8e, 0x2d, 0x79, 0x69, 0xcf, 0x0e, 0x1d, 0x28, 0x79, 0x49, 0x65, + 0x76, 0x15, 0x12, 0xd2, 0xdf, 0xe3, 0x53, 0xe5, 0x66, 0x76, 0xa9, 0xac, 0x1a, 0x9e, 0xae, 0x22, + 0x04, 0xec, 0xab, 0xc5, 0x77, 0xa0, 0x8f, 0xac, 0x1a, 0x91, 0x31, 0x63, 0x66, 0x72, 0x0d, 0x16, + 0x91, 0xc9, 0x95, 0xd7, 0x9d, 0x7d, 0x33, 0xb9, 0x5e, 0x82, 0x13, 0x0d, 0x2f, 0xf0, 0xc9, 0x52, + 0x14, 0x24, 0x41, 0x23, 0xf0, 0x84, 0x31, 0xad, 0x44, 0xc2, 0xb4, 0x09, 0xc4, 0x69, 0xdc, 0x5e, + 0x69, 0x60, 0xf5, 0xa3, 0xa6, 0x81, 0xc1, 0x03, 0x4a, 0x03, 0xfb, 0x19, 0x9d, 0xb0, 0x3c, 0xc4, + 0xbe, 0xc8, 0x87, 0x8b, 0xff, 0x22, 0xfd, 0x64, 0x2d, 0xa3, 0xb7, 0xf8, 0x65, 0x77, 0xd4, 0x1c, + 0x9d, 0x0e, 0xda, 0xd4, 0xdc, 0x1a, 0x66, 0x43, 0xf2, 0xda, 0x31, 0x4c, 0xd8, 0xdb, 0xcb, 0x9a, + 0x8d, 0xba, 0x00, 0x4f, 0x37, 0xe1, 0x74, 0x47, 0x8e, 0x92, 0x50, 0xfd, 0x95, 0x12, 0x7c, 0xdf, + 0xbe, 0x5d, 0x40, 0x77, 0x01, 0x12, 0xa7, 0x25, 0x26, 0xaa, 0x38, 0xa6, 0x38, 0x62, 0x50, 0xe3, + 0x8a, 0xa4, 0xc7, 0x2b, 0x81, 0xa8, 0xbf, 0xec, 0x00, 0x40, 0xfe, 0x66, 0xb1, 0x8c, 0x81, 0xd7, + 0x55, 0x30, 0x11, 0x07, 0x1e, 0xc1, 0x0c, 0x42, 0xd5, 0x7f, 0x44, 0x5a, 0xfa, 0x76, 0x66, 0xf5, + 0xf9, 0x30, 0x6b, 0xc5, 0x02, 0x8a, 0x5e, 0x80, 0x21, 0xc7, 0xf3, 0x78, 0x56, 0x0a, 0x89, 0xc5, + 0x6d, 0x37, 0xba, 0x72, 0x9b, 0x06, 0x61, 0x13, 0xcf, 0xfe, 0x93, 0x12, 0x8c, 0xed, 0x23, 0x53, + 0xba, 0xf2, 0xec, 0xaa, 0x7d, 0xe7, 0xd9, 0x89, 0xcc, 0x80, 0x81, 0x1e, 0x99, 0x01, 0x2f, 0xc0, + 0x50, 0x42, 0x9c, 0xb6, 0x08, 0x83, 0x12, 0xfb, 0x6f, 0x7d, 0xee, 0xaa, 0x41, 0xd8, 0xc4, 0xa3, + 0x52, 0x6c, 0xc4, 0x69, 0x34, 0x48, 0x1c, 0xcb, 0xd0, 0x7f, 0xe1, 0xc3, 0x2c, 0x2c, 0xaf, 0x80, + 0xb9, 0x86, 0x27, 0x53, 0x2c, 0x70, 0x86, 0x65, 0x76, 0xc0, 0xeb, 0x7d, 0x0e, 0xf8, 0xd7, 0x4a, + 0xf0, 0xf8, 0x9e, 0xda, 0xad, 0xef, 0xac, 0x8c, 0x4e, 0x4c, 0xa2, 0xec, 0xc4, 0xb9, 0x19, 0x93, + 0x08, 0x33, 0x08, 0x1f, 0xa5, 0x30, 0x34, 0x6e, 0xbf, 0x2e, 0x3a, 0x65, 0x88, 0x8f, 0x52, 0x8a, + 0x05, 0xce, 0xb0, 0x3c, 0xec, 0xb4, 0xfc, 0x07, 0x25, 0x78, 0xb2, 0x0f, 0x1b, 0xa0, 0xc0, 0xd4, + 0xaa, 0x74, 0x82, 0x5b, 0xf9, 0x01, 0xe5, 0x21, 0x1e, 0x72, 0xb8, 0xbe, 0x5e, 0x82, 0x0b, 0xbd, + 0x55, 0x31, 0xfa, 0x51, 0xba, 0x87, 0x97, 0xb1, 0x4f, 0x66, 0x6e, 0xdc, 0x19, 0xbe, 0x7f, 0x4f, + 0x81, 0x70, 0x16, 0x17, 0x8d, 0x03, 0x84, 0x4e, 0xb2, 0x1e, 0x5f, 0xde, 0x72, 0xe3, 0x44, 0xd4, + 0x7e, 0x19, 0xe1, 0x27, 0x46, 0xb2, 0x15, 0x1b, 0x18, 0x94, 0x1d, 0xfb, 0x37, 0x13, 0xdc, 0x08, + 0x12, 0xfe, 0x10, 0xdf, 0x46, 0x9c, 0x91, 0x37, 0x65, 0x18, 0x20, 0x9c, 0xc5, 0xa5, 0xec, 0xd8, + 0x99, 0x24, 0xef, 0x28, 0xdf, 0x5f, 0x30, 0x76, 0xf3, 0xaa, 0x15, 0x1b, 0x18, 0xd9, 0xac, 0xbf, + 0xea, 0xfe, 0x59, 0x7f, 0xf6, 0x3f, 0x2d, 0xc1, 0xf9, 0x9e, 0xa6, 0x5c, 0x7f, 0x0b, 0xf0, 0xe1, + 0xcb, 0xd4, 0x3b, 0xdc, 0xdc, 0x39, 0x60, 0x46, 0xd9, 0x1f, 0xf6, 0x98, 0x69, 0x22, 0xa3, 0xec, + 0xf0, 0x29, 0xd9, 0x0f, 0xdf, 0x78, 0x76, 0x25, 0x91, 0x55, 0x0e, 0x90, 0x44, 0x96, 0xf9, 0x18, + 0xd5, 0x3e, 0x17, 0xf2, 0x9f, 0x95, 0x7b, 0x0e, 0x2f, 0xdd, 0xfa, 0xf5, 0xe5, 0x1d, 0x9d, 0x81, + 0x53, 0xae, 0xcf, 0x6e, 0x4d, 0x5a, 0xee, 0xac, 0x8a, 0x72, 0x20, 0xa5, 0xf4, 0xdd, 0xe6, 0x73, + 0x19, 0x38, 0xee, 0x7a, 0xe2, 0x21, 0x4c, 0xea, 0x3b, 0xdc, 0x90, 0x1e, 0x2c, 0xad, 0x14, 0x2d, + 0xc2, 0x39, 0x39, 0x14, 0xeb, 0x4e, 0x44, 0x9a, 0x42, 0x8d, 0xc4, 0x22, 0x8d, 0xe1, 0x3c, 0x4f, + 0x85, 0xc8, 0x41, 0xc0, 0xf9, 0xcf, 0xb1, 0x8b, 0x6a, 0x82, 0xd0, 0x6d, 0x88, 0x4d, 0x8e, 0xbe, + 0xa8, 0x86, 0x36, 0x62, 0x0e, 0xb3, 0x3f, 0x0c, 0x75, 0xf5, 0xfe, 0x3c, 0x98, 0x5a, 0x4d, 0xba, + 0xae, 0x60, 0x6a, 0x35, 0xe3, 0x0c, 0x2c, 0xfa, 0xb5, 0xa8, 0x49, 0x9c, 0x59, 0x3d, 0xd7, 0xc9, + 0x36, 0xb3, 0x8f, 0xed, 0x1f, 0x86, 0x61, 0xe5, 0x67, 0xe9, 0xf7, 0xfa, 0x1e, 0xfb, 0x4b, 0x03, + 0x70, 0x22, 0x55, 0x92, 0x2f, 0xe5, 0xd6, 0xb4, 0xf6, 0x75, 0x6b, 0xb2, 0xe0, 0xf8, 0x8e, 0x2f, + 0xef, 0xf6, 0x32, 0x82, 0xe3, 0x3b, 0x3e, 0xc1, 0x1c, 0x46, 0xcd, 0xdb, 0x66, 0xb4, 0x8d, 0x3b, + 0xbe, 0x08, 0x62, 0x55, 0xe6, 0xed, 0x0c, 0x6b, 0xc5, 0x02, 0x8a, 0x3e, 0x61, 0xc1, 0x70, 0xcc, + 0x7c, 0xe6, 0xdc, 0x29, 0x2c, 0x26, 0xdd, 0xb5, 0xa3, 0x57, 0x1c, 0x54, 0xe5, 0x27, 0x59, 0x5c, + 0x8a, 0xd9, 0x82, 0x53, 0x1c, 0xd1, 0xa7, 0x2d, 0xa8, 0xab, 0x2b, 0x48, 0xc4, 0x45, 0x7d, 0xcb, + 0xc5, 0x56, 0x3c, 0xe4, 0xde, 0x44, 0x75, 0xfc, 0xa0, 0x4a, 0xcf, 0x61, 0xcd, 0x18, 0xc5, 0xca, + 0x63, 0x3b, 0x78, 0x3c, 0x1e, 0x5b, 0xc8, 0xf1, 0xd6, 0xbe, 0x1b, 0xea, 0x6d, 0xc7, 0x77, 0xd7, + 0x48, 0x9c, 0x70, 0x27, 0xaa, 0x2c, 0xc4, 0x2a, 0x1b, 0xb1, 0x86, 0x53, 0x85, 0x1c, 0xb3, 0x17, + 0x4b, 0x0c, 0xaf, 0x27, 0x53, 0xc8, 0xcb, 0xba, 0x19, 0x9b, 0x38, 0xa6, 0x8b, 0x16, 0x1e, 0xa8, + 0x8b, 0x76, 0x68, 0x1f, 0x17, 0xed, 0x3f, 0xb2, 0xe0, 0x5c, 0xee, 0x57, 0x7b, 0x78, 0xc3, 0x0d, + 0xed, 0x2f, 0x57, 0xe1, 0x4c, 0x4e, 0x6d, 0x4d, 0xb4, 0x6d, 0xce, 0x67, 0xab, 0x88, 0x93, 0xfb, + 0xf4, 0x41, 0xb4, 0x1c, 0xc6, 0x9c, 0x49, 0x7c, 0xb0, 0x03, 0x12, 0x7d, 0x48, 0x51, 0xbe, 0xbf, + 0x87, 0x14, 0xc6, 0xb4, 0xac, 0x3c, 0xd0, 0x69, 0x59, 0xdd, 0x7b, 0x5a, 0xa2, 0x5f, 0xb5, 0x60, + 0xb4, 0xdd, 0xa3, 0xa0, 0xbb, 0x70, 0x3c, 0xde, 0x3a, 0x9e, 0x72, 0xf1, 0x53, 0x8f, 0xed, 0xee, + 0x8c, 0xf5, 0xac, 0xa3, 0x8f, 0x7b, 0xf6, 0xca, 0xfe, 0x76, 0x19, 0x58, 0x61, 0x57, 0x56, 0x3f, + 0x6d, 0x1b, 0x7d, 0xdc, 0x2c, 0xd1, 0x6b, 0x15, 0x55, 0x4e, 0x96, 0x13, 0x57, 0x25, 0x7e, 0xf9, + 0x08, 0xe6, 0x55, 0xfc, 0xcd, 0x0a, 0xad, 0x52, 0x1f, 0x42, 0xcb, 0x93, 0xb5, 0x90, 0xcb, 0xc5, + 0xd7, 0x42, 0xae, 0x67, 0xeb, 0x20, 0xef, 0xfd, 0x89, 0x2b, 0x0f, 0xe5, 0x27, 0xfe, 0x5b, 0x16, + 0x17, 0x3c, 0x99, 0xaf, 0xa0, 0x2d, 0x03, 0x6b, 0x0f, 0xcb, 0xe0, 0x19, 0xa8, 0xc5, 0xc4, 0x5b, + 0xbb, 0x4a, 0x1c, 0x4f, 0x58, 0x10, 0xfa, 0xd4, 0x58, 0xb4, 0x63, 0x85, 0xc1, 0x2e, 0x4b, 0xf5, + 0xbc, 0xe0, 0xee, 0xe5, 0x76, 0x98, 0x6c, 0x0b, 0x5b, 0x42, 0x5f, 0x96, 0xaa, 0x20, 0xd8, 0xc0, + 0xb2, 0xff, 0x76, 0x89, 0xcf, 0x40, 0x11, 0x7a, 0xf0, 0x62, 0xe6, 0x7a, 0xbb, 0xfe, 0x4f, 0xed, + 0x3f, 0x0a, 0xd0, 0x50, 0x17, 0xc8, 0x8b, 0x33, 0xa1, 0xab, 0x47, 0xbe, 0xdd, 0x5a, 0xd0, 0xd3, + 0xaf, 0xa1, 0xdb, 0xb0, 0xc1, 0x2f, 0x25, 0x4b, 0xcb, 0xfb, 0xca, 0xd2, 0x94, 0x58, 0xa9, 0xec, + 0xa3, 0xed, 0xfe, 0xc4, 0x82, 0x94, 0x45, 0x84, 0x42, 0xa8, 0xd2, 0xee, 0x6e, 0x17, 0x73, 0x37, + 0xbe, 0x49, 0x9a, 0x8a, 0x46, 0x31, 0xed, 0xd9, 0x4f, 0xcc, 0x19, 0x21, 0x4f, 0x44, 0x28, 0xf0, + 0x51, 0xbd, 0x51, 0x1c, 0xc3, 0xab, 0x41, 0xb0, 0xc1, 0x0f, 0x36, 0x75, 0xb4, 0x83, 0xfd, 0x22, + 0x9c, 0xee, 0xea, 0x14, 0xbb, 0xc9, 0x2a, 0xa0, 0xda, 0x27, 0x33, 0x5d, 0x59, 0xda, 0x24, 0xe6, + 0x30, 0xfb, 0xeb, 0x16, 0x9c, 0xca, 0x92, 0x47, 0x6f, 0x59, 0x70, 0x3a, 0xce, 0xd2, 0x3b, 0xae, + 0xb1, 0x53, 0x51, 0x86, 0x5d, 0x20, 0xdc, 0xdd, 0x09, 0xfb, 0xff, 0x8a, 0xc9, 0x7f, 0xdb, 0xf5, + 0x9b, 0xc1, 0x5d, 0x65, 0x98, 0x58, 0x3d, 0x0d, 0x13, 0xba, 0x1e, 0x1b, 0xeb, 0xa4, 0xd9, 0xf1, + 0xba, 0xf2, 0x35, 0x97, 0x45, 0x3b, 0x56, 0x18, 0x2c, 0x3d, 0xad, 0x23, 0x8a, 0xa5, 0x67, 0x26, + 0xe5, 0x8c, 0x68, 0xc7, 0x0a, 0x03, 0x3d, 0x0f, 0xc3, 0xc6, 0x4b, 0xca, 0x79, 0xc9, 0x0c, 0x72, + 0x43, 0x65, 0xc6, 0x38, 0x85, 0x85, 0xc6, 0x01, 0x94, 0x91, 0x23, 0x55, 0x24, 0x73, 0x14, 0x29, + 0x49, 0x14, 0x63, 0x03, 0x83, 0x25, 0x83, 0x7a, 0x9d, 0x98, 0xf9, 0xf8, 0x07, 0x74, 0x01, 0xcf, + 0x69, 0xd1, 0x86, 0x15, 0x94, 0x4a, 0x93, 0xb6, 0xe3, 0x77, 0x1c, 0x8f, 0x8e, 0x90, 0xd8, 0xfa, + 0xa9, 0x65, 0xb8, 0xa0, 0x20, 0xd8, 0xc0, 0xa2, 0x6f, 0x9c, 0xb8, 0x6d, 0xf2, 0x4a, 0xe0, 0xcb, + 0xe8, 0x30, 0x7d, 0xec, 0x23, 0xda, 0xb1, 0xc2, 0xb0, 0xff, 0x9b, 0x05, 0x27, 0x75, 0x6a, 0x39, + 0xbf, 0xb3, 0xda, 0xdc, 0xa9, 0x5a, 0xfb, 0xee, 0x54, 0xd3, 0x39, 0xb7, 0xa5, 0xbe, 0x72, 0x6e, + 0xcd, 0x74, 0xd8, 0xf2, 0x9e, 0xe9, 0xb0, 0x3f, 0xa0, 0xef, 0x43, 0xe5, 0x79, 0xb3, 0x43, 0x79, + 0x77, 0xa1, 0x22, 0x1b, 0x06, 0x1a, 0x8e, 0xaa, 0xab, 0x32, 0xcc, 0xf7, 0x0e, 0xd3, 0x93, 0x0c, + 0x49, 0x40, 0xec, 0x45, 0xa8, 0xab, 0xd3, 0x0f, 0xb9, 0x51, 0xb5, 0xf2, 0x37, 0xaa, 0x7d, 0xa5, + 0xe5, 0x4d, 0xad, 0x7e, 0xe3, 0x3b, 0x4f, 0xbc, 0xe3, 0x77, 0xbf, 0xf3, 0xc4, 0x3b, 0xfe, 0xe0, + 0x3b, 0x4f, 0xbc, 0xe3, 0x13, 0xbb, 0x4f, 0x58, 0xdf, 0xd8, 0x7d, 0xc2, 0xfa, 0xdd, 0xdd, 0x27, + 0xac, 0x3f, 0xd8, 0x7d, 0xc2, 0xfa, 0xf6, 0xee, 0x13, 0xd6, 0x17, 0xff, 0xcb, 0x13, 0xef, 0x78, + 0x25, 0x37, 0x3c, 0x90, 0xfe, 0x78, 0xb6, 0xd1, 0x9c, 0xd8, 0xbc, 0xc4, 0x22, 0xd4, 0xe8, 0xf2, + 0x9a, 0x30, 0xe6, 0xd4, 0x84, 0x5c, 0x5e, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x13, 0x68, 0x38, + 0x58, 0x64, 0xe0, 0x00, 0x00, } func (m *AWSAuthConfig) Marshal() (dAtA []byte, err error) { @@ -5617,6 +5786,15 @@ func (m *ApplicationPreservedFields) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Labels[iNdEx]) + copy(dAtA[i:], m.Labels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Labels[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } if len(m.Annotations) > 0 { for iNdEx := len(m.Annotations) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Annotations[iNdEx]) @@ -6125,6 +6303,52 @@ func (m *ApplicationSetNestedGenerator) MarshalToSizedBuffer(dAtA []byte) (int, return len(dAtA) - i, nil } +func (m *ApplicationSetResourceIgnoreDifferences) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplicationSetResourceIgnoreDifferences) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ApplicationSetResourceIgnoreDifferences) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.JQPathExpressions) > 0 { + for iNdEx := len(m.JQPathExpressions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.JQPathExpressions[iNdEx]) + copy(dAtA[i:], m.JQPathExpressions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JQPathExpressions[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.JSONPointers) > 0 { + for iNdEx := len(m.JSONPointers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.JSONPointers[iNdEx]) + copy(dAtA[i:], m.JSONPointers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPointers[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ApplicationSetRolloutStep) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6231,6 +6455,20 @@ func (m *ApplicationSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.IgnoreApplicationDifferences) > 0 { + for iNdEx := len(m.IgnoreApplicationDifferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.IgnoreApplicationDifferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } i-- if m.ApplyNestedSelectors { dAtA[i] = 1 @@ -7038,6 +7276,20 @@ func (m *ApplicationSourceKustomize) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + if len(m.Patches) > 0 { + for iNdEx := len(m.Patches) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Patches[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } if len(m.Replicas) > 0 { for iNdEx := len(m.Replicas) - 1; iNdEx >= 0; iNdEx-- { { @@ -9446,6 +9698,44 @@ func (m *KnownTypeField) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *KustomizeGvk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KustomizeGvk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KustomizeGvk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *KustomizeOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9479,6 +9769,78 @@ func (m *KustomizeOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *KustomizePatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KustomizePatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KustomizePatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i-- + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.Target != nil { + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Patch) + copy(dAtA[i:], m.Patch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch))) + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *KustomizeReplica) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9517,6 +9879,92 @@ func (m *KustomizeReplica) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *KustomizeResId) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KustomizeResId) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KustomizeResId) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + { + size, err := m.KustomizeGvk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KustomizeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KustomizeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KustomizeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0x1a + i -= len(m.AnnotationSelector) + copy(dAtA[i:], m.AnnotationSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AnnotationSelector))) + i-- + dAtA[i] = 0x12 + { + size, err := m.KustomizeResId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ListGenerator) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13089,6 +13537,21 @@ func (m *SCMProviderGeneratorGitlab) MarshalToSizedBuffer(dAtA []byte) (int, err _ = i var l int _ = l + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x42 + if m.IncludeSharedProjects != nil { + i-- + if *m.IncludeSharedProjects { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } i-- if m.Insecure { dAtA[i] = 1 @@ -14147,6 +14610,12 @@ func (m *ApplicationPreservedFields) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Labels) > 0 { + for _, s := range m.Labels { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -14322,6 +14791,29 @@ func (m *ApplicationSetNestedGenerator) Size() (n int) { return n } +func (m *ApplicationSetResourceIgnoreDifferences) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.JSONPointers) > 0 { + for _, s := range m.JSONPointers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.JQPathExpressions) > 0 { + for _, s := range m.JQPathExpressions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ApplicationSetRolloutStep) Size() (n int) { if m == nil { return 0 @@ -14390,6 +14882,12 @@ func (m *ApplicationSetSpec) Size() (n int) { } } n += 2 + if len(m.IgnoreApplicationDifferences) > 0 { + for _, e := range m.IgnoreApplicationDifferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -14695,6 +15193,12 @@ func (m *ApplicationSourceKustomize) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if len(m.Patches) > 0 { + for _, e := range m.Patches { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -15565,6 +16069,21 @@ func (m *KnownTypeField) Size() (n int) { return n } +func (m *KustomizeGvk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *KustomizeOptions) Size() (n int) { if m == nil { return 0 @@ -15578,6 +16097,31 @@ func (m *KustomizeOptions) Size() (n int) { return n } +func (m *KustomizePatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Patch) + n += 1 + l + sovGenerated(uint64(l)) + if m.Target != nil { + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + func (m *KustomizeReplica) Size() (n int) { if m == nil { return 0 @@ -15591,6 +16135,36 @@ func (m *KustomizeReplica) Size() (n int) { return n } +func (m *KustomizeResId) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KustomizeGvk.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *KustomizeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KustomizeResId.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AnnotationSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *ListGenerator) Size() (n int) { if m == nil { return 0 @@ -16935,6 +17509,11 @@ func (m *SCMProviderGeneratorGitlab) Size() (n int) { } n += 2 n += 2 + if m.IncludeSharedProjects != nil { + n += 2 + } + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -17434,6 +18013,7 @@ func (this *ApplicationPreservedFields) String() string { } s := strings.Join([]string{`&ApplicationPreservedFields{`, `Annotations:` + fmt.Sprintf("%v", this.Annotations) + `,`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, `}`, }, "") return s @@ -17532,6 +18112,18 @@ func (this *ApplicationSetNestedGenerator) String() string { }, "") return s } +func (this *ApplicationSetResourceIgnoreDifferences) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ApplicationSetResourceIgnoreDifferences{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `JSONPointers:` + fmt.Sprintf("%v", this.JSONPointers) + `,`, + `JQPathExpressions:` + fmt.Sprintf("%v", this.JQPathExpressions) + `,`, + `}`, + }, "") + return s +} func (this *ApplicationSetRolloutStep) String() string { if this == nil { return "nil" @@ -17572,6 +18164,11 @@ func (this *ApplicationSetSpec) String() string { repeatedStringForGenerators += strings.Replace(strings.Replace(f.String(), "ApplicationSetGenerator", "ApplicationSetGenerator", 1), `&`, ``, 1) + "," } repeatedStringForGenerators += "}" + repeatedStringForIgnoreApplicationDifferences := "[]ApplicationSetResourceIgnoreDifferences{" + for _, f := range this.IgnoreApplicationDifferences { + repeatedStringForIgnoreApplicationDifferences += strings.Replace(strings.Replace(f.String(), "ApplicationSetResourceIgnoreDifferences", "ApplicationSetResourceIgnoreDifferences", 1), `&`, ``, 1) + "," + } + repeatedStringForIgnoreApplicationDifferences += "}" s := strings.Join([]string{`&ApplicationSetSpec{`, `GoTemplate:` + fmt.Sprintf("%v", this.GoTemplate) + `,`, `Generators:` + repeatedStringForGenerators + `,`, @@ -17581,6 +18178,7 @@ func (this *ApplicationSetSpec) String() string { `PreservedFields:` + strings.Replace(this.PreservedFields.String(), "ApplicationPreservedFields", "ApplicationPreservedFields", 1) + `,`, `GoTemplateOptions:` + fmt.Sprintf("%v", this.GoTemplateOptions) + `,`, `ApplyNestedSelectors:` + fmt.Sprintf("%v", this.ApplyNestedSelectors) + `,`, + `IgnoreApplicationDifferences:` + repeatedStringForIgnoreApplicationDifferences + `,`, `}`, }, "") return s @@ -17781,6 +18379,11 @@ func (this *ApplicationSourceKustomize) String() string { repeatedStringForReplicas += strings.Replace(strings.Replace(f.String(), "KustomizeReplica", "KustomizeReplica", 1), `&`, ``, 1) + "," } repeatedStringForReplicas += "}" + repeatedStringForPatches := "[]KustomizePatch{" + for _, f := range this.Patches { + repeatedStringForPatches += strings.Replace(strings.Replace(f.String(), "KustomizePatch", "KustomizePatch", 1), `&`, ``, 1) + "," + } + repeatedStringForPatches += "}" keysForCommonLabels := make([]string, 0, len(this.CommonLabels)) for k := range this.CommonLabels { keysForCommonLabels = append(keysForCommonLabels, k) @@ -17813,6 +18416,7 @@ func (this *ApplicationSourceKustomize) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `CommonAnnotationsEnvsubst:` + fmt.Sprintf("%v", this.CommonAnnotationsEnvsubst) + `,`, `Replicas:` + repeatedStringForReplicas + `,`, + `Patches:` + repeatedStringForPatches + `,`, `}`, }, "") return s @@ -18511,6 +19115,18 @@ func (this *KnownTypeField) String() string { }, "") return s } +func (this *KustomizeGvk) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KustomizeGvk{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} func (this *KustomizeOptions) String() string { if this == nil { return "nil" @@ -18522,6 +19138,29 @@ func (this *KustomizeOptions) String() string { }, "") return s } +func (this *KustomizePatch) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]bool{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&KustomizePatch{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Patch:` + fmt.Sprintf("%v", this.Patch) + `,`, + `Target:` + strings.Replace(this.Target.String(), "KustomizeSelector", "KustomizeSelector", 1) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} func (this *KustomizeReplica) String() string { if this == nil { return "nil" @@ -18533,6 +19172,30 @@ func (this *KustomizeReplica) String() string { }, "") return s } +func (this *KustomizeResId) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KustomizeResId{`, + `KustomizeGvk:` + strings.Replace(strings.Replace(this.KustomizeGvk.String(), "KustomizeGvk", "KustomizeGvk", 1), `&`, ``, 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *KustomizeSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KustomizeSelector{`, + `KustomizeResId:` + strings.Replace(strings.Replace(this.KustomizeResId.String(), "KustomizeResId", "KustomizeResId", 1), `&`, ``, 1) + `,`, + `AnnotationSelector:` + fmt.Sprintf("%v", this.AnnotationSelector) + `,`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `}`, + }, "") + return s +} func (this *ListGenerator) String() string { if this == nil { return "nil" @@ -19515,6 +20178,8 @@ func (this *SCMProviderGeneratorGitlab) String() string { `TokenRef:` + strings.Replace(this.TokenRef.String(), "SecretRef", "SecretRef", 1) + `,`, `AllBranches:` + fmt.Sprintf("%v", this.AllBranches) + `,`, `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, + `IncludeSharedProjects:` + valueToStringGenerated(this.IncludeSharedProjects) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, `}`, }, "") return s @@ -21564,6 +22229,38 @@ func (m *ApplicationPreservedFields) Unmarshal(dAtA []byte) error { } m.Annotations = append(m.Annotations, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23099,7 +23796,7 @@ func (m *ApplicationSetNestedGenerator) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationSetRolloutStep) Unmarshal(dAtA []byte) error { +func (m *ApplicationSetResourceIgnoreDifferences) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23122,17 +23819,17 @@ func (m *ApplicationSetRolloutStep) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationSetRolloutStep: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSetResourceIgnoreDifferences: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSetRolloutStep: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSetResourceIgnoreDifferences: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23142,31 +23839,29 @@ func (m *ApplicationSetRolloutStep) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchExpressions = append(m.MatchExpressions, ApplicationMatchExpression{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JSONPointers", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23176,27 +23871,55 @@ func (m *ApplicationSetRolloutStep) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.MaxUpdate == nil { - m.MaxUpdate = &intstr.IntOrString{} + m.JSONPointers = append(m.JSONPointers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JQPathExpressions", wireType) } - if err := m.MaxUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JQPathExpressions = append(m.JQPathExpressions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -23219,7 +23942,7 @@ func (m *ApplicationSetRolloutStep) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationSetRolloutStrategy) Unmarshal(dAtA []byte) error { +func (m *ApplicationSetRolloutStep) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23242,15 +23965,135 @@ func (m *ApplicationSetRolloutStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationSetRolloutStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSetRolloutStep: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSetRolloutStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSetRolloutStep: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, ApplicationMatchExpression{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUpdate == nil { + m.MaxUpdate = &intstr.IntOrString{} + } + if err := m.MaxUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplicationSetRolloutStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationSetRolloutStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationSetRolloutStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23579,93 +24422,9 @@ func (m *ApplicationSetSpec) Unmarshal(dAtA []byte) error { } } m.ApplyNestedSelectors = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ApplicationSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplicationSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ApplicationSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplicationStatus", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreApplicationDifferences", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23692,8 +24451,8 @@ func (m *ApplicationSetStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ApplicationStatus = append(m.ApplicationStatus, ApplicationSetApplicationStatus{}) - if err := m.ApplicationStatus[len(m.ApplicationStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.IgnoreApplicationDifferences = append(m.IgnoreApplicationDifferences, ApplicationSetResourceIgnoreDifferences{}) + if err := m.IgnoreApplicationDifferences[len(m.IgnoreApplicationDifferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23718,7 +24477,7 @@ func (m *ApplicationSetStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationSetStrategy) Unmarshal(dAtA []byte) error { +func (m *ApplicationSetStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23741,17 +24500,17 @@ func (m *ApplicationSetStrategy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationSetStrategy: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSetStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23761,27 +24520,29 @@ func (m *ApplicationSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.Conditions = append(m.Conditions, ApplicationSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplicationStatus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -23808,10 +24569,8 @@ func (m *ApplicationSetStrategy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RollingSync == nil { - m.RollingSync = &ApplicationSetRolloutStrategy{} - } - if err := m.RollingSync.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ApplicationStatus = append(m.ApplicationStatus, ApplicationSetApplicationStatus{}) + if err := m.ApplicationStatus[len(m.ApplicationStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -23836,7 +24595,7 @@ func (m *ApplicationSetStrategy) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationSetSyncPolicy) Unmarshal(dAtA []byte) error { +func (m *ApplicationSetStrategy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -23859,35 +24618,153 @@ func (m *ApplicationSetSyncPolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationSetSyncPolicy: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSetStrategy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSetSyncPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PreserveResourcesOnDeletion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PreserveResourcesOnDeletion = bool(v != 0) - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplicationsSync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingSync", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingSync == nil { + m.RollingSync = &ApplicationSetRolloutStrategy{} + } + if err := m.RollingSync.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplicationSetSyncPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationSetSyncPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationSetSyncPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PreserveResourcesOnDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PreserveResourcesOnDeletion = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplicationsSync", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26343,125 +27220,9 @@ func (m *ApplicationSourceKustomize) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ApplicationSourcePlugin) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplicationSourcePlugin: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSourcePlugin: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, &EnvEntry{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Patches", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26488,8 +27249,8 @@ func (m *ApplicationSourcePlugin) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, ApplicationSourcePluginParameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Patches = append(m.Patches, KustomizePatch{}) + if err := m.Patches[len(m.Patches)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -26514,7 +27275,7 @@ func (m *ApplicationSourcePlugin) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { +func (m *ApplicationSourcePlugin) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26537,10 +27298,10 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationSourcePluginParameter: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSourcePlugin: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSourcePluginParameter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSourcePlugin: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -26575,9 +27336,9 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OptionalMap", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26604,16 +27365,14 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.OptionalMap == nil { - m.OptionalMap = &OptionalMap{} - } - if err := m.OptionalMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Env = append(m.Env, &EnvEntry{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OptionalArray", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26640,46 +27399,11 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.OptionalArray == nil { - m.OptionalArray = &OptionalArray{} - } - if err := m.OptionalArray.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Parameters = append(m.Parameters, ApplicationSourcePluginParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.String_ = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -26701,7 +27425,7 @@ func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { +func (m *ApplicationSourcePluginParameter) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -26724,84 +27448,15 @@ func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationSpec: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSourcePluginParameter: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSourcePluginParameter: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Source == nil { - m.Source = &ApplicationSource{} - } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Destination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Destination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -26829,11 +27484,11 @@ func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Project = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SyncPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OptionalMap", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26860,16 +27515,16 @@ func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SyncPolicy == nil { - m.SyncPolicy = &SyncPolicy{} + if m.OptionalMap == nil { + m.OptionalMap = &OptionalMap{} } - if err := m.SyncPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.OptionalMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreDifferences", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OptionalArray", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -26896,70 +27551,18 @@ func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IgnoreDifferences = append(m.IgnoreDifferences, ResourceIgnoreDifferences{}) - if err := m.IgnoreDifferences[len(m.IgnoreDifferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.OptionalArray == nil { + m.OptionalArray = &OptionalArray{} } - m.Info = append(m.Info, Info{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.OptionalArray.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 8: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -26969,25 +27572,24 @@ func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Sources = append(m.Sources, ApplicationSource{}) - if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.String_ = &s iNdEx = postIndex default: iNdEx = preIndex @@ -27010,7 +27612,7 @@ func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { +func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -27033,15 +27635,15 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationStatus: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27068,14 +27670,16 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, ResourceStatus{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Source == nil { + m.Source = &ApplicationSource{} + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Destination", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27102,15 +27706,15 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Sync.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Destination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Project", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -27120,28 +27724,27 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Health.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Project = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field History", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SyncPolicy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27168,14 +27771,16 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.History = append(m.History, RevisionHistory{}) - if err := m.History[len(m.History)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SyncPolicy == nil { + m.SyncPolicy = &SyncPolicy{} + } + if err := m.SyncPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreDifferences", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -27202,14 +27807,320 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ApplicationCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.IgnoreDifferences = append(m.IgnoreDifferences, ResourceIgnoreDifferences{}) + if err := m.IgnoreDifferences[len(m.IgnoreDifferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReconciledAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Info = append(m.Info, Info{}) + if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, ApplicationSource{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceStatus{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Sync.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Health.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field History", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.History = append(m.History, RevisionHistory{}) + if err := m.History[len(m.History)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ApplicationCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReconciledAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33845,6 +34756,152 @@ func (m *KnownTypeField) Unmarshal(dAtA []byte) error { } return nil } +func (m *KustomizeGvk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KustomizeGvk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KustomizeGvk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *KustomizeOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -33936,7 +34993,387 @@ func (m *KustomizeOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.BinaryPath = string(dAtA[iNdEx:postIndex]) + m.BinaryPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KustomizePatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KustomizePatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KustomizePatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Patch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Target == nil { + m.Target = &KustomizeSelector{} + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]bool) + } + var mapkey string + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KustomizeReplica) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KustomizeReplica: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KustomizeReplica: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -33959,7 +35396,7 @@ func (m *KustomizeOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *KustomizeReplica) Unmarshal(dAtA []byte) error { +func (m *KustomizeResId) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33982,13 +35419,46 @@ func (m *KustomizeReplica) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KustomizeReplica: wiretype end group for non-group") + return fmt.Errorf("proto: KustomizeResId: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KustomizeReplica: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KustomizeResId: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KustomizeGvk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KustomizeGvk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } @@ -34020,9 +35490,91 @@ func (m *KustomizeReplica) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KustomizeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KustomizeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KustomizeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KustomizeResId", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34049,10 +35601,74 @@ func (m *KustomizeReplica) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Count.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KustomizeResId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnnotationSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnnotationSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -46460,6 +48076,59 @@ func (m *SCMProviderGeneratorGitlab) Unmarshal(dAtA []byte) error { } } m.Insecure = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeSharedProjects", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IncludeSharedProjects = &b + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Topic = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto index c9225e327ae..ec6363dbd0b 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto @@ -130,14 +130,14 @@ message ApplicationCondition { // ApplicationDestination holds information about the application's destination message ApplicationDestination { - // Server specifies the URL of the target cluster and must be set to the Kubernetes control plane API + // Server specifies the URL of the target cluster's Kubernetes control plane API. This must be set if Name is not set. optional string server = 1; // Namespace specifies the target namespace for the application's resources. // The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace optional string namespace = 2; - // Name is an alternate way of specifying the target cluster by its symbolic name + // Name is an alternate way of specifying the target cluster by its symbolic name. This must be set if Server is not set. optional string name = 3; } @@ -159,6 +159,8 @@ message ApplicationMatchExpression { message ApplicationPreservedFields { repeated string annotations = 1; + + repeated string labels = 2; } // ApplicationSet is a set of Application resources @@ -271,6 +273,19 @@ message ApplicationSetNestedGenerator { optional PluginGenerator plugin = 10; } +// ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live +// applications when applying changes from generated applications. +message ApplicationSetResourceIgnoreDifferences { + // Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications. + optional string name = 1; + + // JSONPointers is a list of JSON pointers to fields to ignore differences for. + repeated string jsonPointers = 2; + + // JQPathExpressions is a list of JQ path expressions to fields to ignore differences for. + repeated string jqPathExpressions = 3; +} + message ApplicationSetRolloutStep { repeated ApplicationMatchExpression matchExpressions = 1; @@ -299,6 +314,8 @@ message ApplicationSetSpec { // ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators optional bool applyNestedSelectors = 8; + + repeated ApplicationSetResourceIgnoreDifferences ignoreApplicationDifferences = 9; } // ApplicationSetStatus defines the observed state of ApplicationSet @@ -501,6 +518,9 @@ message ApplicationSourceKustomize { // Replicas is a list of Kustomize Replicas override specifications repeated KustomizeReplica replicas = 11; + + // Patches is a list of Kustomize patches + repeated KustomizePatch patches = 12; } // ApplicationSourcePlugin holds options specific to config management plugins @@ -1056,6 +1076,14 @@ message KnownTypeField { optional string type = 2; } +message KustomizeGvk { + optional string group = 1; + + optional string version = 2; + + optional string kind = 3; +} + // KustomizeOptions are options for kustomize to use when building manifests message KustomizeOptions { // BuildOptions is a string of build parameters to use when calling `kustomize build` @@ -1065,6 +1093,16 @@ message KustomizeOptions { optional string binaryPath = 2; } +message KustomizePatch { + optional string path = 1; + + optional string patch = 2; + + optional KustomizeSelector target = 3; + + map options = 4; +} + message KustomizeReplica { // Name of Deployment or StatefulSet optional string name = 1; @@ -1073,6 +1111,22 @@ message KustomizeReplica { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString count = 2; } +message KustomizeResId { + optional KustomizeGvk gvk = 1; + + optional string name = 2; + + optional string namespace = 3; +} + +message KustomizeSelector { + optional KustomizeResId resId = 1; + + optional string annotationSelector = 2; + + optional string labelSelector = 3; +} + // ListGenerator include items info message ListGenerator { // +kubebuilder:validation:Optional @@ -2036,6 +2090,12 @@ message SCMProviderGeneratorGitlab { // Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false optional bool insecure = 6; + + // When recursing through subgroups, also include shared Projects (true) or scan only the subgroups under same path (false). Defaults to "true" + optional bool includeSharedProjects = 7; + + // Filter repos list based on Gitlab Topic. + optional string topic = 8; } // Utility struct for a reference to a secret key. diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go index 1387b953892..faaec52bbb2 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go @@ -14,155 +14,160 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AWSAuthConfig": schema_pkg_apis_application_v1alpha1_AWSAuthConfig(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProject": schema_pkg_apis_application_v1alpha1_AppProject(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProjectList": schema_pkg_apis_application_v1alpha1_AppProjectList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProjectSpec": schema_pkg_apis_application_v1alpha1_AppProjectSpec(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProjectStatus": schema_pkg_apis_application_v1alpha1_AppProjectStatus(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Application": schema_pkg_apis_application_v1alpha1_Application(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationCondition": schema_pkg_apis_application_v1alpha1_ApplicationCondition(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationDestination": schema_pkg_apis_application_v1alpha1_ApplicationDestination(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationList": schema_pkg_apis_application_v1alpha1_ApplicationList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationMatchExpression": schema_pkg_apis_application_v1alpha1_ApplicationMatchExpression(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationPreservedFields": schema_pkg_apis_application_v1alpha1_ApplicationPreservedFields(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSet": schema_pkg_apis_application_v1alpha1_ApplicationSet(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetApplicationStatus": schema_pkg_apis_application_v1alpha1_ApplicationSetApplicationStatus(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetCondition": schema_pkg_apis_application_v1alpha1_ApplicationSetCondition(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetGenerator": schema_pkg_apis_application_v1alpha1_ApplicationSetGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetList": schema_pkg_apis_application_v1alpha1_ApplicationSetList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetNestedGenerator": schema_pkg_apis_application_v1alpha1_ApplicationSetNestedGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetRolloutStep": schema_pkg_apis_application_v1alpha1_ApplicationSetRolloutStep(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetRolloutStrategy": schema_pkg_apis_application_v1alpha1_ApplicationSetRolloutStrategy(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSpec": schema_pkg_apis_application_v1alpha1_ApplicationSetSpec(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStatus": schema_pkg_apis_application_v1alpha1_ApplicationSetStatus(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStrategy": schema_pkg_apis_application_v1alpha1_ApplicationSetStrategy(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSyncPolicy": schema_pkg_apis_application_v1alpha1_ApplicationSetSyncPolicy(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate": schema_pkg_apis_application_v1alpha1_ApplicationSetTemplate(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplateMeta": schema_pkg_apis_application_v1alpha1_ApplicationSetTemplateMeta(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTerminalGenerator": schema_pkg_apis_application_v1alpha1_ApplicationSetTerminalGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource": schema_pkg_apis_application_v1alpha1_ApplicationSource(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceDirectory": schema_pkg_apis_application_v1alpha1_ApplicationSourceDirectory(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceHelm": schema_pkg_apis_application_v1alpha1_ApplicationSourceHelm(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceJsonnet": schema_pkg_apis_application_v1alpha1_ApplicationSourceJsonnet(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceKustomize": schema_pkg_apis_application_v1alpha1_ApplicationSourceKustomize(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourcePlugin": schema_pkg_apis_application_v1alpha1_ApplicationSourcePlugin(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourcePluginParameter": schema_pkg_apis_application_v1alpha1_ApplicationSourcePluginParameter(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSpec": schema_pkg_apis_application_v1alpha1_ApplicationSpec(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationStatus": schema_pkg_apis_application_v1alpha1_ApplicationStatus(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSummary": schema_pkg_apis_application_v1alpha1_ApplicationSummary(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationTree": schema_pkg_apis_application_v1alpha1_ApplicationTree(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationWatchEvent": schema_pkg_apis_application_v1alpha1_ApplicationWatchEvent(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Backoff": schema_pkg_apis_application_v1alpha1_Backoff(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BasicAuthBitbucketServer": schema_pkg_apis_application_v1alpha1_BasicAuthBitbucketServer(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BearerTokenBitbucketCloud": schema_pkg_apis_application_v1alpha1_BearerTokenBitbucketCloud(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ChartDetails": schema_pkg_apis_application_v1alpha1_ChartDetails(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Cluster": schema_pkg_apis_application_v1alpha1_Cluster(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterCacheInfo": schema_pkg_apis_application_v1alpha1_ClusterCacheInfo(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterConfig": schema_pkg_apis_application_v1alpha1_ClusterConfig(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator": schema_pkg_apis_application_v1alpha1_ClusterGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterInfo": schema_pkg_apis_application_v1alpha1_ClusterInfo(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterList": schema_pkg_apis_application_v1alpha1_ClusterList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Command": schema_pkg_apis_application_v1alpha1_Command(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ComparedTo": schema_pkg_apis_application_v1alpha1_ComparedTo(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ComponentParameter": schema_pkg_apis_application_v1alpha1_ComponentParameter(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConfigManagementPlugin": schema_pkg_apis_application_v1alpha1_ConfigManagementPlugin(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConnectionState": schema_pkg_apis_application_v1alpha1_ConnectionState(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator": schema_pkg_apis_application_v1alpha1_DuckTypeGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.EnvEntry": schema_pkg_apis_application_v1alpha1_EnvEntry(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ErrApplicationNotAllowedToUseProject": schema_pkg_apis_application_v1alpha1_ErrApplicationNotAllowedToUseProject(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ExecProviderConfig": schema_pkg_apis_application_v1alpha1_ExecProviderConfig(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitDirectoryGeneratorItem": schema_pkg_apis_application_v1alpha1_GitDirectoryGeneratorItem(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitFileGeneratorItem": schema_pkg_apis_application_v1alpha1_GitFileGeneratorItem(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator": schema_pkg_apis_application_v1alpha1_GitGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GnuPGPublicKey": schema_pkg_apis_application_v1alpha1_GnuPGPublicKey(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GnuPGPublicKeyList": schema_pkg_apis_application_v1alpha1_GnuPGPublicKeyList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HealthStatus": schema_pkg_apis_application_v1alpha1_HealthStatus(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmFileParameter": schema_pkg_apis_application_v1alpha1_HelmFileParameter(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmOptions": schema_pkg_apis_application_v1alpha1_HelmOptions(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmParameter": schema_pkg_apis_application_v1alpha1_HelmParameter(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HostInfo": schema_pkg_apis_application_v1alpha1_HostInfo(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HostResourceInfo": schema_pkg_apis_application_v1alpha1_HostResourceInfo(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Info": schema_pkg_apis_application_v1alpha1_Info(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.InfoItem": schema_pkg_apis_application_v1alpha1_InfoItem(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JWTToken": schema_pkg_apis_application_v1alpha1_JWTToken(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JWTTokens": schema_pkg_apis_application_v1alpha1_JWTTokens(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JsonnetVar": schema_pkg_apis_application_v1alpha1_JsonnetVar(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KnownTypeField": schema_pkg_apis_application_v1alpha1_KnownTypeField(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeOptions": schema_pkg_apis_application_v1alpha1_KustomizeOptions(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeReplica": schema_pkg_apis_application_v1alpha1_KustomizeReplica(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator": schema_pkg_apis_application_v1alpha1_ListGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ManagedNamespaceMetadata": schema_pkg_apis_application_v1alpha1_ManagedNamespaceMetadata(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MatrixGenerator": schema_pkg_apis_application_v1alpha1_MatrixGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MergeGenerator": schema_pkg_apis_application_v1alpha1_MergeGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.NestedMatrixGenerator": schema_pkg_apis_application_v1alpha1_NestedMatrixGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.NestedMergeGenerator": schema_pkg_apis_application_v1alpha1_NestedMergeGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Operation": schema_pkg_apis_application_v1alpha1_Operation(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationInitiator": schema_pkg_apis_application_v1alpha1_OperationInitiator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationState": schema_pkg_apis_application_v1alpha1_OperationState(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OptionalArray": schema_pkg_apis_application_v1alpha1_OptionalArray(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OptionalMap": schema_pkg_apis_application_v1alpha1_OptionalMap(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OrphanedResourceKey": schema_pkg_apis_application_v1alpha1_OrphanedResourceKey(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OrphanedResourcesMonitorSettings": schema_pkg_apis_application_v1alpha1_OrphanedResourcesMonitorSettings(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OverrideIgnoreDiff": schema_pkg_apis_application_v1alpha1_OverrideIgnoreDiff(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginConfigMapRef": schema_pkg_apis_application_v1alpha1_PluginConfigMapRef(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator": schema_pkg_apis_application_v1alpha1_PluginGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginInput": schema_pkg_apis_application_v1alpha1_PluginInput(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ProjectRole": schema_pkg_apis_application_v1alpha1_ProjectRole(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator": schema_pkg_apis_application_v1alpha1_PullRequestGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorAzureDevOps": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorAzureDevOps(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucket": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucket(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucketServer": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucketServer(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorFilter": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorFilter(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitLab": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGitLab(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitea": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGitea(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGithub": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGithub(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RefTarget": schema_pkg_apis_application_v1alpha1_RefTarget(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepoCreds": schema_pkg_apis_application_v1alpha1_RepoCreds(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepoCredsList": schema_pkg_apis_application_v1alpha1_RepoCredsList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Repository": schema_pkg_apis_application_v1alpha1_Repository(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepositoryCertificate": schema_pkg_apis_application_v1alpha1_RepositoryCertificate(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepositoryCertificateList": schema_pkg_apis_application_v1alpha1_RepositoryCertificateList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepositoryList": schema_pkg_apis_application_v1alpha1_RepositoryList(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceAction": schema_pkg_apis_application_v1alpha1_ResourceAction(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceActionDefinition": schema_pkg_apis_application_v1alpha1_ResourceActionDefinition(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceActionParam": schema_pkg_apis_application_v1alpha1_ResourceActionParam(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceActions": schema_pkg_apis_application_v1alpha1_ResourceActions(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceDiff": schema_pkg_apis_application_v1alpha1_ResourceDiff(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceIgnoreDifferences": schema_pkg_apis_application_v1alpha1_ResourceIgnoreDifferences(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceNetworkingInfo": schema_pkg_apis_application_v1alpha1_ResourceNetworkingInfo(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceNode": schema_pkg_apis_application_v1alpha1_ResourceNode(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceOverride": schema_pkg_apis_application_v1alpha1_ResourceOverride(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceRef": schema_pkg_apis_application_v1alpha1_ResourceRef(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceResult": schema_pkg_apis_application_v1alpha1_ResourceResult(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceStatus": schema_pkg_apis_application_v1alpha1_ResourceStatus(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RetryStrategy": schema_pkg_apis_application_v1alpha1_RetryStrategy(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionHistory": schema_pkg_apis_application_v1alpha1_RevisionHistory(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionMetadata": schema_pkg_apis_application_v1alpha1_RevisionMetadata(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator": schema_pkg_apis_application_v1alpha1_SCMProviderGenerator(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAWSCodeCommit": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorAWSCodeCommit(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAzureDevOps": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorAzureDevOps(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucket": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorBitbucket(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucketServer": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorBitbucketServer(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorFilter": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorFilter(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitea": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGitea(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGithub": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGithub(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitlab": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGitlab(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SecretRef": schema_pkg_apis_application_v1alpha1_SecretRef(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SignatureKey": schema_pkg_apis_application_v1alpha1_SignatureKey(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperation": schema_pkg_apis_application_v1alpha1_SyncOperation(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperationResource": schema_pkg_apis_application_v1alpha1_SyncOperationResource(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperationResult": schema_pkg_apis_application_v1alpha1_SyncOperationResult(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicy": schema_pkg_apis_application_v1alpha1_SyncPolicy(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicyAutomated": schema_pkg_apis_application_v1alpha1_SyncPolicyAutomated(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStatus": schema_pkg_apis_application_v1alpha1_SyncStatus(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategy": schema_pkg_apis_application_v1alpha1_SyncStrategy(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategyApply": schema_pkg_apis_application_v1alpha1_SyncStrategyApply(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategyHook": schema_pkg_apis_application_v1alpha1_SyncStrategyHook(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncWindow": schema_pkg_apis_application_v1alpha1_SyncWindow(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TLSClientConfig": schema_pkg_apis_application_v1alpha1_TLSClientConfig(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TagFilter": schema_pkg_apis_application_v1alpha1_TagFilter(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.objectMeta": schema_pkg_apis_application_v1alpha1_objectMeta(ref), - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.rawResourceOverride": schema_pkg_apis_application_v1alpha1_rawResourceOverride(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AWSAuthConfig": schema_pkg_apis_application_v1alpha1_AWSAuthConfig(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProject": schema_pkg_apis_application_v1alpha1_AppProject(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProjectList": schema_pkg_apis_application_v1alpha1_AppProjectList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProjectSpec": schema_pkg_apis_application_v1alpha1_AppProjectSpec(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.AppProjectStatus": schema_pkg_apis_application_v1alpha1_AppProjectStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Application": schema_pkg_apis_application_v1alpha1_Application(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationCondition": schema_pkg_apis_application_v1alpha1_ApplicationCondition(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationDestination": schema_pkg_apis_application_v1alpha1_ApplicationDestination(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationList": schema_pkg_apis_application_v1alpha1_ApplicationList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationMatchExpression": schema_pkg_apis_application_v1alpha1_ApplicationMatchExpression(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationPreservedFields": schema_pkg_apis_application_v1alpha1_ApplicationPreservedFields(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSet": schema_pkg_apis_application_v1alpha1_ApplicationSet(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetApplicationStatus": schema_pkg_apis_application_v1alpha1_ApplicationSetApplicationStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetCondition": schema_pkg_apis_application_v1alpha1_ApplicationSetCondition(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetGenerator": schema_pkg_apis_application_v1alpha1_ApplicationSetGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetList": schema_pkg_apis_application_v1alpha1_ApplicationSetList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetNestedGenerator": schema_pkg_apis_application_v1alpha1_ApplicationSetNestedGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetResourceIgnoreDifferences": schema_pkg_apis_application_v1alpha1_ApplicationSetResourceIgnoreDifferences(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetRolloutStep": schema_pkg_apis_application_v1alpha1_ApplicationSetRolloutStep(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetRolloutStrategy": schema_pkg_apis_application_v1alpha1_ApplicationSetRolloutStrategy(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSpec": schema_pkg_apis_application_v1alpha1_ApplicationSetSpec(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStatus": schema_pkg_apis_application_v1alpha1_ApplicationSetStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStrategy": schema_pkg_apis_application_v1alpha1_ApplicationSetStrategy(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSyncPolicy": schema_pkg_apis_application_v1alpha1_ApplicationSetSyncPolicy(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate": schema_pkg_apis_application_v1alpha1_ApplicationSetTemplate(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplateMeta": schema_pkg_apis_application_v1alpha1_ApplicationSetTemplateMeta(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTerminalGenerator": schema_pkg_apis_application_v1alpha1_ApplicationSetTerminalGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource": schema_pkg_apis_application_v1alpha1_ApplicationSource(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceDirectory": schema_pkg_apis_application_v1alpha1_ApplicationSourceDirectory(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceHelm": schema_pkg_apis_application_v1alpha1_ApplicationSourceHelm(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceJsonnet": schema_pkg_apis_application_v1alpha1_ApplicationSourceJsonnet(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourceKustomize": schema_pkg_apis_application_v1alpha1_ApplicationSourceKustomize(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourcePlugin": schema_pkg_apis_application_v1alpha1_ApplicationSourcePlugin(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSourcePluginParameter": schema_pkg_apis_application_v1alpha1_ApplicationSourcePluginParameter(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSpec": schema_pkg_apis_application_v1alpha1_ApplicationSpec(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationStatus": schema_pkg_apis_application_v1alpha1_ApplicationStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSummary": schema_pkg_apis_application_v1alpha1_ApplicationSummary(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationTree": schema_pkg_apis_application_v1alpha1_ApplicationTree(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationWatchEvent": schema_pkg_apis_application_v1alpha1_ApplicationWatchEvent(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Backoff": schema_pkg_apis_application_v1alpha1_Backoff(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BasicAuthBitbucketServer": schema_pkg_apis_application_v1alpha1_BasicAuthBitbucketServer(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.BearerTokenBitbucketCloud": schema_pkg_apis_application_v1alpha1_BearerTokenBitbucketCloud(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ChartDetails": schema_pkg_apis_application_v1alpha1_ChartDetails(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Cluster": schema_pkg_apis_application_v1alpha1_Cluster(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterCacheInfo": schema_pkg_apis_application_v1alpha1_ClusterCacheInfo(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterConfig": schema_pkg_apis_application_v1alpha1_ClusterConfig(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterGenerator": schema_pkg_apis_application_v1alpha1_ClusterGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterInfo": schema_pkg_apis_application_v1alpha1_ClusterInfo(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ClusterList": schema_pkg_apis_application_v1alpha1_ClusterList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Command": schema_pkg_apis_application_v1alpha1_Command(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ComparedTo": schema_pkg_apis_application_v1alpha1_ComparedTo(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ComponentParameter": schema_pkg_apis_application_v1alpha1_ComponentParameter(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConfigManagementPlugin": schema_pkg_apis_application_v1alpha1_ConfigManagementPlugin(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConnectionState": schema_pkg_apis_application_v1alpha1_ConnectionState(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator": schema_pkg_apis_application_v1alpha1_DuckTypeGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.EnvEntry": schema_pkg_apis_application_v1alpha1_EnvEntry(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ErrApplicationNotAllowedToUseProject": schema_pkg_apis_application_v1alpha1_ErrApplicationNotAllowedToUseProject(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ExecProviderConfig": schema_pkg_apis_application_v1alpha1_ExecProviderConfig(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitDirectoryGeneratorItem": schema_pkg_apis_application_v1alpha1_GitDirectoryGeneratorItem(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitFileGeneratorItem": schema_pkg_apis_application_v1alpha1_GitFileGeneratorItem(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GitGenerator": schema_pkg_apis_application_v1alpha1_GitGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GnuPGPublicKey": schema_pkg_apis_application_v1alpha1_GnuPGPublicKey(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.GnuPGPublicKeyList": schema_pkg_apis_application_v1alpha1_GnuPGPublicKeyList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HealthStatus": schema_pkg_apis_application_v1alpha1_HealthStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmFileParameter": schema_pkg_apis_application_v1alpha1_HelmFileParameter(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmOptions": schema_pkg_apis_application_v1alpha1_HelmOptions(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmParameter": schema_pkg_apis_application_v1alpha1_HelmParameter(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HostInfo": schema_pkg_apis_application_v1alpha1_HostInfo(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HostResourceInfo": schema_pkg_apis_application_v1alpha1_HostResourceInfo(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Info": schema_pkg_apis_application_v1alpha1_Info(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.InfoItem": schema_pkg_apis_application_v1alpha1_InfoItem(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JWTToken": schema_pkg_apis_application_v1alpha1_JWTToken(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JWTTokens": schema_pkg_apis_application_v1alpha1_JWTTokens(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JsonnetVar": schema_pkg_apis_application_v1alpha1_JsonnetVar(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KnownTypeField": schema_pkg_apis_application_v1alpha1_KnownTypeField(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeGvk": schema_pkg_apis_application_v1alpha1_KustomizeGvk(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeOptions": schema_pkg_apis_application_v1alpha1_KustomizeOptions(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizePatch": schema_pkg_apis_application_v1alpha1_KustomizePatch(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeReplica": schema_pkg_apis_application_v1alpha1_KustomizeReplica(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeResId": schema_pkg_apis_application_v1alpha1_KustomizeResId(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeSelector": schema_pkg_apis_application_v1alpha1_KustomizeSelector(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ListGenerator": schema_pkg_apis_application_v1alpha1_ListGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ManagedNamespaceMetadata": schema_pkg_apis_application_v1alpha1_ManagedNamespaceMetadata(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MatrixGenerator": schema_pkg_apis_application_v1alpha1_MatrixGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.MergeGenerator": schema_pkg_apis_application_v1alpha1_MergeGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.NestedMatrixGenerator": schema_pkg_apis_application_v1alpha1_NestedMatrixGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.NestedMergeGenerator": schema_pkg_apis_application_v1alpha1_NestedMergeGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Operation": schema_pkg_apis_application_v1alpha1_Operation(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationInitiator": schema_pkg_apis_application_v1alpha1_OperationInitiator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationState": schema_pkg_apis_application_v1alpha1_OperationState(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OptionalArray": schema_pkg_apis_application_v1alpha1_OptionalArray(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OptionalMap": schema_pkg_apis_application_v1alpha1_OptionalMap(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OrphanedResourceKey": schema_pkg_apis_application_v1alpha1_OrphanedResourceKey(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OrphanedResourcesMonitorSettings": schema_pkg_apis_application_v1alpha1_OrphanedResourcesMonitorSettings(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OverrideIgnoreDiff": schema_pkg_apis_application_v1alpha1_OverrideIgnoreDiff(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginConfigMapRef": schema_pkg_apis_application_v1alpha1_PluginConfigMapRef(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginGenerator": schema_pkg_apis_application_v1alpha1_PluginGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PluginInput": schema_pkg_apis_application_v1alpha1_PluginInput(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ProjectRole": schema_pkg_apis_application_v1alpha1_ProjectRole(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGenerator": schema_pkg_apis_application_v1alpha1_PullRequestGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorAzureDevOps": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorAzureDevOps(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucket": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucket(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorBitbucketServer": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorBitbucketServer(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorFilter": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorFilter(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitLab": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGitLab(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGitea": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGitea(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.PullRequestGeneratorGithub": schema_pkg_apis_application_v1alpha1_PullRequestGeneratorGithub(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RefTarget": schema_pkg_apis_application_v1alpha1_RefTarget(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepoCreds": schema_pkg_apis_application_v1alpha1_RepoCreds(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepoCredsList": schema_pkg_apis_application_v1alpha1_RepoCredsList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Repository": schema_pkg_apis_application_v1alpha1_Repository(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepositoryCertificate": schema_pkg_apis_application_v1alpha1_RepositoryCertificate(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepositoryCertificateList": schema_pkg_apis_application_v1alpha1_RepositoryCertificateList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RepositoryList": schema_pkg_apis_application_v1alpha1_RepositoryList(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceAction": schema_pkg_apis_application_v1alpha1_ResourceAction(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceActionDefinition": schema_pkg_apis_application_v1alpha1_ResourceActionDefinition(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceActionParam": schema_pkg_apis_application_v1alpha1_ResourceActionParam(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceActions": schema_pkg_apis_application_v1alpha1_ResourceActions(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceDiff": schema_pkg_apis_application_v1alpha1_ResourceDiff(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceIgnoreDifferences": schema_pkg_apis_application_v1alpha1_ResourceIgnoreDifferences(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceNetworkingInfo": schema_pkg_apis_application_v1alpha1_ResourceNetworkingInfo(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceNode": schema_pkg_apis_application_v1alpha1_ResourceNode(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceOverride": schema_pkg_apis_application_v1alpha1_ResourceOverride(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceRef": schema_pkg_apis_application_v1alpha1_ResourceRef(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceResult": schema_pkg_apis_application_v1alpha1_ResourceResult(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceStatus": schema_pkg_apis_application_v1alpha1_ResourceStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RetryStrategy": schema_pkg_apis_application_v1alpha1_RetryStrategy(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionHistory": schema_pkg_apis_application_v1alpha1_RevisionHistory(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionMetadata": schema_pkg_apis_application_v1alpha1_RevisionMetadata(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGenerator": schema_pkg_apis_application_v1alpha1_SCMProviderGenerator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAWSCodeCommit": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorAWSCodeCommit(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorAzureDevOps": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorAzureDevOps(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucket": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorBitbucket(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorBitbucketServer": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorBitbucketServer(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorFilter": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorFilter(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitea": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGitea(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGithub": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGithub(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitlab": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGitlab(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SecretRef": schema_pkg_apis_application_v1alpha1_SecretRef(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SignatureKey": schema_pkg_apis_application_v1alpha1_SignatureKey(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperation": schema_pkg_apis_application_v1alpha1_SyncOperation(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperationResource": schema_pkg_apis_application_v1alpha1_SyncOperationResource(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperationResult": schema_pkg_apis_application_v1alpha1_SyncOperationResult(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicy": schema_pkg_apis_application_v1alpha1_SyncPolicy(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicyAutomated": schema_pkg_apis_application_v1alpha1_SyncPolicyAutomated(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStatus": schema_pkg_apis_application_v1alpha1_SyncStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategy": schema_pkg_apis_application_v1alpha1_SyncStrategy(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategyApply": schema_pkg_apis_application_v1alpha1_SyncStrategyApply(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategyHook": schema_pkg_apis_application_v1alpha1_SyncStrategyHook(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncWindow": schema_pkg_apis_application_v1alpha1_SyncWindow(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TLSClientConfig": schema_pkg_apis_application_v1alpha1_TLSClientConfig(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.TagFilter": schema_pkg_apis_application_v1alpha1_TagFilter(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.objectMeta": schema_pkg_apis_application_v1alpha1_objectMeta(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.rawResourceOverride": schema_pkg_apis_application_v1alpha1_rawResourceOverride(ref), } } @@ -596,7 +601,7 @@ func schema_pkg_apis_application_v1alpha1_ApplicationDestination(ref common.Refe Properties: map[string]spec.Schema{ "server": { SchemaProps: spec.SchemaProps{ - Description: "Server specifies the URL of the target cluster and must be set to the Kubernetes control plane API", + Description: "Server specifies the URL of the target cluster's Kubernetes control plane API. This must be set if Name is not set.", Type: []string{"string"}, Format: "", }, @@ -610,7 +615,7 @@ func schema_pkg_apis_application_v1alpha1_ApplicationDestination(ref common.Refe }, "name": { SchemaProps: spec.SchemaProps{ - Description: "Name is an alternate way of specifying the target cluster by its symbolic name", + Description: "Name is an alternate way of specifying the target cluster by its symbolic name. This must be set if Server is not set.", Type: []string{"string"}, Format: "", }, @@ -728,6 +733,20 @@ func schema_pkg_apis_application_v1alpha1_ApplicationPreservedFields(ref common. }, }, }, + "labels": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, @@ -1073,6 +1092,56 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetNestedGenerator(ref comm } } +func schema_pkg_apis_application_v1alpha1_ApplicationSetResourceIgnoreDifferences(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live applications when applying changes from generated applications.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications.", + Type: []string{"string"}, + Format: "", + }, + }, + "jsonPointers": { + SchemaProps: spec.SchemaProps{ + Description: "JSONPointers is a list of JSON pointers to fields to ignore differences for.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "jqPathExpressions": { + SchemaProps: spec.SchemaProps{ + Description: "JQPathExpressions is a list of JQ path expressions to fields to ignore differences for.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_ApplicationSetRolloutStep(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1200,12 +1269,25 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSetSpec(ref common.Referenc Format: "", }, }, + "ignoreApplicationDifferences": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetResourceIgnoreDifferences"), + }, + }, + }, + }, + }, }, Required: []string{"generators", "template"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationPreservedFields", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStrategy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSyncPolicy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationPreservedFields", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetGenerator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetResourceIgnoreDifferences", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetStrategy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetSyncPolicy", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSetTemplate"}, } } @@ -1862,11 +1944,25 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSourceKustomize(ref common. }, }, }, + "patches": { + SchemaProps: spec.SchemaProps{ + Description: "Patches is a list of Kustomize patches", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizePatch"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeReplica"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizePatch", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeReplica"}, } } @@ -3783,6 +3879,36 @@ func schema_pkg_apis_application_v1alpha1_KnownTypeField(ref common.ReferenceCal } } +func schema_pkg_apis_application_v1alpha1_KustomizeGvk(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_KustomizeOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3813,6 +3939,52 @@ func schema_pkg_apis_application_v1alpha1_KustomizeOptions(ref common.ReferenceC } } +func schema_pkg_apis_application_v1alpha1_KustomizePatch(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "patch": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "target": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeSelector"), + }, + }, + "options": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.KustomizeSelector"}, + } +} + func schema_pkg_apis_application_v1alpha1_KustomizeReplica(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3843,6 +4015,102 @@ func schema_pkg_apis_application_v1alpha1_KustomizeReplica(ref common.ReferenceC } } +func schema_pkg_apis_application_v1alpha1_KustomizeResId(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_application_v1alpha1_KustomizeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "annotationSelector": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "labelSelector": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_ListGenerator(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -6984,6 +7252,20 @@ func schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGitlab(ref common. Format: "", }, }, + "includeSharedProjects": { + SchemaProps: spec.SchemaProps{ + Description: "When recursing through subgroups, also include shared Projects (true) or scan only the subgroups under same path (false). Defaults to \"true\"", + Type: []string{"boolean"}, + Format: "", + }, + }, + "topic": { + SchemaProps: spec.SchemaProps{ + Description: "Filter repos list based on Gitlab Topic.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"group"}, }, diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go index b1814bfaa54..614cca979a1 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go @@ -465,6 +465,8 @@ type ApplicationSourceKustomize struct { CommonAnnotationsEnvsubst bool `json:"commonAnnotationsEnvsubst,omitempty" protobuf:"bytes,10,opt,name=commonAnnotationsEnvsubst"` // Replicas is a list of Kustomize Replicas override specifications Replicas KustomizeReplicas `json:"replicas,omitempty" protobuf:"bytes,11,opt,name=replicas"` + // Patches is a list of Kustomize patches + Patches KustomizePatches `json:"patches,omitempty" protobuf:"bytes,12,opt,name=patches"` } type KustomizeReplica struct { @@ -509,6 +511,43 @@ func NewKustomizeReplica(text string) (*KustomizeReplica, error) { return kr, nil } +type KustomizePatches []KustomizePatch + +type KustomizePatch struct { + Path string `json:"path,omitempty" yaml:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + Patch string `json:"patch,omitempty" yaml:"patch,omitempty" protobuf:"bytes,2,opt,name=patch"` + Target *KustomizeSelector `json:"target,omitempty" yaml:"target,omitempty" protobuf:"bytes,3,opt,name=target"` + Options map[string]bool `json:"options,omitempty" yaml:"options,omitempty" protobuf:"bytes,4,opt,name=options"` +} + +// Copied from: https://github.com/kubernetes-sigs/kustomize/blob/cd7ba1744eadb793ab7cd056a76ee8a5ca725db9/api/types/patch.go +func (p *KustomizePatch) Equals(o KustomizePatch) bool { + targetEqual := (p.Target == o.Target) || + (p.Target != nil && o.Target != nil && *p.Target == *o.Target) + return p.Path == o.Path && + p.Patch == o.Patch && + targetEqual && + reflect.DeepEqual(p.Options, o.Options) +} + +type KustomizeSelector struct { + KustomizeResId `json:",inline,omitempty" yaml:",inline,omitempty" protobuf:"bytes,1,opt,name=resId"` + AnnotationSelector string `json:"annotationSelector,omitempty" yaml:"annotationSelector,omitempty" protobuf:"bytes,2,opt,name=annotationSelector"` + LabelSelector string `json:"labelSelector,omitempty" yaml:"labelSelector,omitempty" protobuf:"bytes,3,opt,name=labelSelector"` +} + +type KustomizeResId struct { + KustomizeGvk `json:",inline,omitempty" yaml:",inline,omitempty" protobuf:"bytes,1,opt,name=gvk"` + Name string `json:"name,omitempty" yaml:"name,omitempty" protobuf:"bytes,2,opt,name=name"` + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` +} + +type KustomizeGvk struct { + Group string `json:"group,omitempty" yaml:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version,omitempty" yaml:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` +} + // AllowsConcurrentProcessing returns true if multiple processes can run Kustomize builds on the same source at the same time func (k *ApplicationSourceKustomize) AllowsConcurrentProcessing() bool { return len(k.Images) == 0 && @@ -516,7 +555,8 @@ func (k *ApplicationSourceKustomize) AllowsConcurrentProcessing() bool { len(k.CommonAnnotations) == 0 && k.NamePrefix == "" && k.Namespace == "" && - k.NameSuffix == "" + k.NameSuffix == "" && + len(k.Patches) == 0 } // IsZero returns true when the Kustomize options are considered empty @@ -529,7 +569,8 @@ func (k *ApplicationSourceKustomize) IsZero() bool { len(k.Images) == 0 && len(k.Replicas) == 0 && len(k.CommonLabels) == 0 && - len(k.CommonAnnotations) == 0 + len(k.CommonAnnotations) == 0 && + len(k.Patches) == 0 } // MergeImage merges a new Kustomize image identifier in to a list of images @@ -860,12 +901,12 @@ func (c *ApplicationSourcePlugin) RemoveEnvEntry(key string) error { // ApplicationDestination holds information about the application's destination type ApplicationDestination struct { - // Server specifies the URL of the target cluster and must be set to the Kubernetes control plane API + // Server specifies the URL of the target cluster's Kubernetes control plane API. This must be set if Name is not set. Server string `json:"server,omitempty" protobuf:"bytes,1,opt,name=server"` // Namespace specifies the target namespace for the application's resources. // The namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` - // Name is an alternate way of specifying the target cluster by its symbolic name + // Name is an alternate way of specifying the target cluster by its symbolic name. This must be set if Server is not set. Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` // nolint:govet @@ -910,6 +951,35 @@ type ApplicationStatus struct { ControllerNamespace string `json:"controllerNamespace,omitempty" protobuf:"bytes,13,opt,name=controllerNamespace"` } +// GetRevisions will return the current revision associated with the Application. +// If app has multisources, it will return all corresponding revisions preserving +// order from the app.spec.sources. If app has only one source, it will return a +// single revision in the list. +func (a *ApplicationStatus) GetRevisions() []string { + revisions := []string{} + if len(a.Sync.Revisions) > 0 { + revisions = a.Sync.Revisions + } else if a.Sync.Revision != "" { + revisions = append(revisions, a.Sync.Revision) + } + return revisions +} + +// BuildComparedToStatus will build a ComparedTo object based on the current +// Application state. +func (app *Application) BuildComparedToStatus() ComparedTo { + ct := ComparedTo{ + Destination: app.Spec.Destination, + IgnoreDifferences: app.Spec.IgnoreDifferences, + } + if app.Spec.HasMultipleSources() { + ct.Sources = app.Spec.Sources + } else { + ct.Source = app.Spec.GetSource() + } + return ct +} + // JWTTokens represents a list of JWT tokens type JWTTokens struct { Items []JWTToken `json:"items,omitempty" protobuf:"bytes,1,opt,name=items"` @@ -1094,11 +1164,12 @@ type SyncPolicy struct { Retry *RetryStrategy `json:"retry,omitempty" protobuf:"bytes,3,opt,name=retry"` // ManagedNamespaceMetadata controls metadata in the given namespace (if CreateNamespace=true) ManagedNamespaceMetadata *ManagedNamespaceMetadata `json:"managedNamespaceMetadata,omitempty" protobuf:"bytes,4,opt,name=managedNamespaceMetadata"` + // If you add a field here, be sure to update IsZero. } // IsZero returns true if the sync policy is empty func (p *SyncPolicy) IsZero() bool { - return p == nil || (p.Automated == nil && len(p.SyncOptions) == 0 && p.Retry == nil) + return p == nil || (p.Automated == nil && len(p.SyncOptions) == 0 && p.Retry == nil && p.ManagedNamespaceMetadata == nil) } // RetryStrategy contains information about the strategy to apply when a sync failed @@ -2863,7 +2934,12 @@ func (c *Cluster) RawRestConfig() *rest.Config { if exists { config, err = clientcmd.BuildConfigFromFlags("", conf) } else { - config, err = clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config")) + var homeDir string + homeDir, err = os.UserHomeDir() + if err != nil { + homeDir = "" + } + config, err = clientcmd.BuildConfigFromFlags("", filepath.Join(homeDir, ".kube", "config")) } } else if c.Server == KubernetesInternalAPIServerAddr && c.Config.Username == "" && c.Config.Password == "" && c.Config.BearerToken == "" { config, err = rest.InClusterConfig() diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go index 82d405118c5..e7245069b99 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go @@ -323,6 +323,11 @@ func (in *ApplicationPreservedFields) DeepCopyInto(out *ApplicationPreservedFiel *out = make([]string, len(*in)) copy(*out, *in) } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -470,6 +475,28 @@ func (in *ApplicationSetGenerator) DeepCopy() *ApplicationSetGenerator { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ApplicationSetIgnoreDifferences) DeepCopyInto(out *ApplicationSetIgnoreDifferences) { + { + in := &in + *out = make(ApplicationSetIgnoreDifferences, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSetIgnoreDifferences. +func (in ApplicationSetIgnoreDifferences) DeepCopy() ApplicationSetIgnoreDifferences { + if in == nil { + return nil + } + out := new(ApplicationSetIgnoreDifferences) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationSetList) DeepCopyInto(out *ApplicationSetList) { *out = *in @@ -591,6 +618,32 @@ func (in ApplicationSetNestedGenerators) DeepCopy() ApplicationSetNestedGenerato return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationSetResourceIgnoreDifferences) DeepCopyInto(out *ApplicationSetResourceIgnoreDifferences) { + *out = *in + if in.JSONPointers != nil { + in, out := &in.JSONPointers, &out.JSONPointers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.JQPathExpressions != nil { + in, out := &in.JQPathExpressions, &out.JQPathExpressions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationSetResourceIgnoreDifferences. +func (in *ApplicationSetResourceIgnoreDifferences) DeepCopy() *ApplicationSetResourceIgnoreDifferences { + if in == nil { + return nil + } + out := new(ApplicationSetResourceIgnoreDifferences) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationSetRolloutStep) DeepCopyInto(out *ApplicationSetRolloutStep) { *out = *in @@ -673,6 +726,13 @@ func (in *ApplicationSetSpec) DeepCopyInto(out *ApplicationSetSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.IgnoreApplicationDifferences != nil { + in, out := &in.IgnoreApplicationDifferences, &out.IgnoreApplicationDifferences + *out = make(ApplicationSetIgnoreDifferences, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1036,6 +1096,13 @@ func (in *ApplicationSourceKustomize) DeepCopyInto(out *ApplicationSourceKustomi *out = make(KustomizeReplicas, len(*in)) copy(*out, *in) } + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make(KustomizePatches, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -2168,6 +2235,22 @@ func (in *KnownTypeField) DeepCopy() *KnownTypeField { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KustomizeGvk) DeepCopyInto(out *KustomizeGvk) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KustomizeGvk. +func (in *KustomizeGvk) DeepCopy() *KustomizeGvk { + if in == nil { + return nil + } + out := new(KustomizeGvk) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in KustomizeImages) DeepCopyInto(out *KustomizeImages) { { @@ -2204,6 +2287,56 @@ func (in *KustomizeOptions) DeepCopy() *KustomizeOptions { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KustomizePatch) DeepCopyInto(out *KustomizePatch) { + *out = *in + if in.Target != nil { + in, out := &in.Target, &out.Target + *out = new(KustomizeSelector) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KustomizePatch. +func (in *KustomizePatch) DeepCopy() *KustomizePatch { + if in == nil { + return nil + } + out := new(KustomizePatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in KustomizePatches) DeepCopyInto(out *KustomizePatches) { + { + in := &in + *out = make(KustomizePatches, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KustomizePatches. +func (in KustomizePatches) DeepCopy() KustomizePatches { + if in == nil { + return nil + } + out := new(KustomizePatches) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KustomizeReplica) DeepCopyInto(out *KustomizeReplica) { *out = *in @@ -2241,6 +2374,40 @@ func (in KustomizeReplicas) DeepCopy() KustomizeReplicas { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KustomizeResId) DeepCopyInto(out *KustomizeResId) { + *out = *in + out.KustomizeGvk = in.KustomizeGvk + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KustomizeResId. +func (in *KustomizeResId) DeepCopy() *KustomizeResId { + if in == nil { + return nil + } + out := new(KustomizeResId) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KustomizeSelector) DeepCopyInto(out *KustomizeSelector) { + *out = *in + out.KustomizeResId = in.KustomizeResId + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KustomizeSelector. +func (in *KustomizeSelector) DeepCopy() *KustomizeSelector { + if in == nil { + return nil + } + out := new(KustomizeSelector) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListGenerator) DeepCopyInto(out *ListGenerator) { *out = *in @@ -3815,6 +3982,11 @@ func (in *SCMProviderGeneratorGitlab) DeepCopyInto(out *SCMProviderGeneratorGitl *out = new(SecretRef) **out = **in } + if in.IncludeSharedProjects != nil { + in, out := &in.IncludeSharedProjects, &out.IncludeSharedProjects + *out = new(bool) + **out = **in + } return } diff --git a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go index dd5a4559aca..914a967db3d 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/reposerver/apiclient/repository.pb.go @@ -46,17 +46,21 @@ type ManifestRequest struct { KubeVersion string `protobuf:"bytes,14,opt,name=kubeVersion,proto3" json:"kubeVersion,omitempty"` ApiVersions []string `protobuf:"bytes,15,rep,name=apiVersions,proto3" json:"apiVersions,omitempty"` // Request to verify the signature when generating the manifests (only for Git repositories) - VerifySignature bool `protobuf:"varint,16,opt,name=verifySignature,proto3" json:"verifySignature,omitempty"` - HelmRepoCreds []*v1alpha1.RepoCreds `protobuf:"bytes,17,rep,name=helmRepoCreds,proto3" json:"helmRepoCreds,omitempty"` - NoRevisionCache bool `protobuf:"varint,18,opt,name=noRevisionCache,proto3" json:"noRevisionCache,omitempty"` - TrackingMethod string `protobuf:"bytes,19,opt,name=trackingMethod,proto3" json:"trackingMethod,omitempty"` - EnabledSourceTypes map[string]bool `protobuf:"bytes,20,rep,name=enabledSourceTypes,proto3" json:"enabledSourceTypes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - HelmOptions *v1alpha1.HelmOptions `protobuf:"bytes,21,opt,name=helmOptions,proto3" json:"helmOptions,omitempty"` - HasMultipleSources bool `protobuf:"varint,22,opt,name=hasMultipleSources,proto3" json:"hasMultipleSources,omitempty"` - RefSources map[string]*v1alpha1.RefTarget `protobuf:"bytes,23,rep,name=refSources,proto3" json:"refSources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + VerifySignature bool `protobuf:"varint,16,opt,name=verifySignature,proto3" json:"verifySignature,omitempty"` + HelmRepoCreds []*v1alpha1.RepoCreds `protobuf:"bytes,17,rep,name=helmRepoCreds,proto3" json:"helmRepoCreds,omitempty"` + NoRevisionCache bool `protobuf:"varint,18,opt,name=noRevisionCache,proto3" json:"noRevisionCache,omitempty"` + TrackingMethod string `protobuf:"bytes,19,opt,name=trackingMethod,proto3" json:"trackingMethod,omitempty"` + EnabledSourceTypes map[string]bool `protobuf:"bytes,20,rep,name=enabledSourceTypes,proto3" json:"enabledSourceTypes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + HelmOptions *v1alpha1.HelmOptions `protobuf:"bytes,21,opt,name=helmOptions,proto3" json:"helmOptions,omitempty"` + HasMultipleSources bool `protobuf:"varint,22,opt,name=hasMultipleSources,proto3" json:"hasMultipleSources,omitempty"` + RefSources map[string]*v1alpha1.RefTarget `protobuf:"bytes,23,rep,name=refSources,proto3" json:"refSources,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // This is used to surface "source not permitted" errors for Helm repositories + ProjectSourceRepos []string `protobuf:"bytes,24,rep,name=projectSourceRepos,proto3" json:"projectSourceRepos,omitempty"` + // This is used to surface "source not permitted" errors for Helm repositories + ProjectName string `protobuf:"bytes,25,opt,name=projectName,proto3" json:"projectName,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ManifestRequest) Reset() { *m = ManifestRequest{} } @@ -232,6 +236,20 @@ func (m *ManifestRequest) GetRefSources() map[string]*v1alpha1.RefTarget { return nil } +func (m *ManifestRequest) GetProjectSourceRepos() []string { + if m != nil { + return m.ProjectSourceRepos + } + return nil +} + +func (m *ManifestRequest) GetProjectName() string { + if m != nil { + return m.ProjectName + } + return "" +} + type ManifestRequestWithFiles struct { // Types that are valid to be assigned to Part: // *ManifestRequestWithFiles_Request @@ -2187,138 +2205,140 @@ func init() { } var fileDescriptor_dd8723cfcc820480 = []byte{ - // 2096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xdb, 0x6e, 0x1b, 0xc7, - 0xf9, 0xe7, 0x92, 0x94, 0x44, 0x7e, 0x92, 0x25, 0x6a, 0xac, 0xc3, 0x9a, 0x71, 0x04, 0x65, 0xff, - 0x7f, 0x1b, 0xaa, 0x9d, 0x90, 0x90, 0x8c, 0xc4, 0x85, 0x93, 0xa6, 0x50, 0x14, 0x5b, 0x72, 0x6c, - 0xd9, 0xea, 0xda, 0x6d, 0x91, 0xd6, 0x6d, 0x31, 0x5c, 0x0e, 0xc9, 0x09, 0xf7, 0x30, 0xde, 0x9d, - 0x55, 0x20, 0x03, 0xbd, 0x28, 0x5a, 0xf4, 0x11, 0x8a, 0xa2, 0xaf, 0x51, 0x14, 0xbd, 0xec, 0x55, - 0x0f, 0x97, 0x41, 0x5f, 0xa0, 0x85, 0x6f, 0xfa, 0x1a, 0xc5, 0xcc, 0xce, 0x1e, 0xb9, 0x92, 0x9d, - 0x52, 0x56, 0x50, 0xf4, 0xc6, 0xde, 0x99, 0xf9, 0xe6, 0x3b, 0xcd, 0x77, 0xf8, 0xcd, 0x50, 0x70, - 0xdd, 0x27, 0xcc, 0x0b, 0x88, 0x7f, 0x4c, 0xfc, 0xae, 0xfc, 0xa4, 0xdc, 0xf3, 0x4f, 0x32, 0x9f, - 0x1d, 0xe6, 0x7b, 0xdc, 0x43, 0x90, 0xce, 0xb4, 0x1f, 0x0e, 0x29, 0x1f, 0x85, 0xbd, 0x8e, 0xe5, - 0x39, 0x5d, 0xec, 0x0f, 0x3d, 0xe6, 0x7b, 0x5f, 0xc8, 0x8f, 0xf7, 0xac, 0x7e, 0xf7, 0x78, 0xa7, - 0xcb, 0xc6, 0xc3, 0x2e, 0x66, 0x34, 0xe8, 0x62, 0xc6, 0x6c, 0x6a, 0x61, 0x4e, 0x3d, 0xb7, 0x7b, - 0xbc, 0x8d, 0x6d, 0x36, 0xc2, 0xdb, 0xdd, 0x21, 0x71, 0x89, 0x8f, 0x39, 0xe9, 0x47, 0x9c, 0xdb, - 0x6f, 0x0d, 0x3d, 0x6f, 0x68, 0x93, 0xae, 0x1c, 0xf5, 0xc2, 0x41, 0x97, 0x38, 0x8c, 0x2b, 0xb1, - 0xc6, 0x6f, 0x17, 0x60, 0xe9, 0x10, 0xbb, 0x74, 0x40, 0x02, 0x6e, 0x92, 0xe7, 0x21, 0x09, 0x38, - 0x7a, 0x06, 0x75, 0xa1, 0x8c, 0xae, 0x6d, 0x6a, 0x5b, 0xf3, 0x3b, 0x07, 0x9d, 0x54, 0x9b, 0x4e, - 0xac, 0x8d, 0xfc, 0xf8, 0x99, 0xd5, 0xef, 0x1c, 0xef, 0x74, 0xd8, 0x78, 0xd8, 0x11, 0xda, 0x74, - 0x32, 0xda, 0x74, 0x62, 0x6d, 0x3a, 0x66, 0x62, 0x96, 0x29, 0xb9, 0xa2, 0x36, 0x34, 0x7c, 0x72, - 0x4c, 0x03, 0xea, 0xb9, 0x7a, 0x75, 0x53, 0xdb, 0x6a, 0x9a, 0xc9, 0x18, 0xe9, 0x30, 0xe7, 0x7a, - 0x7b, 0xd8, 0x1a, 0x11, 0xbd, 0xb6, 0xa9, 0x6d, 0x35, 0xcc, 0x78, 0x88, 0x36, 0x61, 0x1e, 0x33, - 0xf6, 0x10, 0xf7, 0x88, 0xfd, 0x80, 0x9c, 0xe8, 0x75, 0xb9, 0x31, 0x3b, 0x25, 0xf6, 0x62, 0xc6, - 0x1e, 0x61, 0x87, 0xe8, 0x33, 0x72, 0x35, 0x1e, 0xa2, 0xab, 0xd0, 0x74, 0xb1, 0x43, 0x02, 0x86, - 0x2d, 0xa2, 0x37, 0xe4, 0x5a, 0x3a, 0x81, 0x7e, 0x0e, 0xcb, 0x19, 0xc5, 0x9f, 0x78, 0xa1, 0x6f, - 0x11, 0x1d, 0xa4, 0xe9, 0x8f, 0xa7, 0x33, 0x7d, 0xb7, 0xc8, 0xd6, 0x9c, 0x94, 0x84, 0x7e, 0x0a, - 0x33, 0xf2, 0xe4, 0xf5, 0xf9, 0xcd, 0xda, 0xb9, 0x7a, 0x3b, 0x62, 0x8b, 0x5c, 0x98, 0x63, 0x76, - 0x38, 0xa4, 0x6e, 0xa0, 0x2f, 0x48, 0x09, 0x4f, 0xa7, 0x93, 0xb0, 0xe7, 0xb9, 0x03, 0x3a, 0x3c, - 0xc4, 0x2e, 0x1e, 0x12, 0x87, 0xb8, 0xfc, 0x48, 0x32, 0x37, 0x63, 0x21, 0xe8, 0x05, 0xb4, 0xc6, - 0x61, 0xc0, 0x3d, 0x87, 0xbe, 0x20, 0x8f, 0x99, 0xd8, 0x1b, 0xe8, 0x97, 0xa4, 0x37, 0x1f, 0x4d, - 0x27, 0xf8, 0x41, 0x81, 0xab, 0x39, 0x21, 0x47, 0x04, 0xc9, 0x38, 0xec, 0x91, 0x1f, 0x10, 0x5f, - 0x46, 0xd7, 0x62, 0x14, 0x24, 0x99, 0xa9, 0x28, 0x8c, 0xa8, 0x1a, 0x05, 0xfa, 0xd2, 0x66, 0x2d, - 0x0a, 0xa3, 0x64, 0x0a, 0x6d, 0xc1, 0xd2, 0x31, 0xf1, 0xe9, 0xe0, 0xe4, 0x09, 0x1d, 0xba, 0x98, - 0x87, 0x3e, 0xd1, 0x5b, 0x32, 0x14, 0x8b, 0xd3, 0xc8, 0x81, 0x4b, 0x23, 0x62, 0x3b, 0xc2, 0xe5, - 0x7b, 0x3e, 0xe9, 0x07, 0xfa, 0xb2, 0xf4, 0xef, 0xfe, 0xf4, 0x27, 0x28, 0xd9, 0x99, 0x79, 0xee, - 0x42, 0x31, 0xd7, 0x33, 0x55, 0xa6, 0x44, 0x39, 0x82, 0x22, 0xc5, 0x0a, 0xd3, 0xe8, 0x3a, 0x2c, - 0x72, 0x1f, 0x5b, 0x63, 0xea, 0x0e, 0x0f, 0x09, 0x1f, 0x79, 0x7d, 0xfd, 0xb2, 0xf4, 0x44, 0x61, - 0x16, 0x59, 0x80, 0x88, 0x8b, 0x7b, 0x36, 0xe9, 0x47, 0xb1, 0xf8, 0xf4, 0x84, 0x91, 0x40, 0x5f, - 0x91, 0x56, 0xdc, 0xea, 0x64, 0x2a, 0x54, 0xa1, 0x40, 0x74, 0xee, 0x4e, 0xec, 0xba, 0xeb, 0x72, - 0xff, 0xc4, 0x2c, 0x61, 0x87, 0xc6, 0x30, 0x2f, 0xec, 0x88, 0x43, 0x61, 0x55, 0x86, 0xc2, 0xfd, - 0xe9, 0x7c, 0x74, 0x90, 0x32, 0x34, 0xb3, 0xdc, 0x51, 0x07, 0xd0, 0x08, 0x07, 0x87, 0xa1, 0xcd, - 0x29, 0xb3, 0x49, 0xa4, 0x46, 0xa0, 0xaf, 0x49, 0x37, 0x95, 0xac, 0xa0, 0x07, 0x00, 0x3e, 0x19, - 0xc4, 0x74, 0xeb, 0xd2, 0xf2, 0x9b, 0x67, 0x59, 0x6e, 0x26, 0xd4, 0x91, 0xc5, 0x99, 0xed, 0xed, - 0xbb, 0xb0, 0x7e, 0x8a, 0x63, 0x50, 0x0b, 0x6a, 0x63, 0x72, 0x22, 0x0b, 0x6a, 0xd3, 0x14, 0x9f, - 0x68, 0x05, 0x66, 0x8e, 0xb1, 0x1d, 0x12, 0x59, 0x02, 0x1b, 0x66, 0x34, 0xb8, 0x53, 0xfd, 0xb6, - 0xd6, 0xfe, 0xb5, 0x06, 0x4b, 0x05, 0x31, 0x25, 0xfb, 0x7f, 0x92, 0xdd, 0x7f, 0x0e, 0x41, 0x37, - 0x78, 0x8a, 0xfd, 0x21, 0xe1, 0x19, 0x45, 0x8c, 0xbf, 0x6b, 0xa0, 0x17, 0xec, 0xff, 0x21, 0xe5, - 0xa3, 0x7b, 0xd4, 0x26, 0x01, 0xba, 0x0d, 0x73, 0x7e, 0x34, 0xa7, 0xda, 0xc4, 0x5b, 0x67, 0xb8, - 0xed, 0xa0, 0x62, 0xc6, 0xd4, 0xe8, 0x63, 0x68, 0x38, 0x84, 0xe3, 0x3e, 0xe6, 0x58, 0xe9, 0xbe, - 0x59, 0xb6, 0x53, 0x48, 0x39, 0x54, 0x74, 0x07, 0x15, 0x33, 0xd9, 0x83, 0xde, 0x87, 0x19, 0x6b, - 0x14, 0xba, 0x63, 0xd9, 0x20, 0xe6, 0x77, 0xde, 0x3e, 0x6d, 0xf3, 0x9e, 0x20, 0x3a, 0xa8, 0x98, - 0x11, 0xf5, 0x27, 0xb3, 0x50, 0x67, 0xd8, 0xe7, 0xc6, 0x3d, 0x58, 0x29, 0x13, 0x21, 0xba, 0x92, - 0x35, 0x22, 0xd6, 0x38, 0x08, 0x1d, 0xe5, 0xe6, 0x64, 0x8c, 0x10, 0xd4, 0x03, 0xfa, 0x22, 0x72, - 0x75, 0xcd, 0x94, 0xdf, 0xc6, 0xb7, 0x60, 0x79, 0x42, 0x9a, 0x38, 0xd4, 0x48, 0x37, 0xc1, 0x61, - 0x41, 0x89, 0x36, 0x42, 0x58, 0x7d, 0x2a, 0x7d, 0x91, 0x94, 0xe6, 0x8b, 0xe8, 0xb3, 0xc6, 0x01, - 0xac, 0x15, 0xc5, 0x06, 0xcc, 0x73, 0x03, 0x22, 0xb2, 0x44, 0xd6, 0x32, 0x4a, 0xfa, 0xe9, 0xaa, - 0xd4, 0xa2, 0x61, 0x96, 0xac, 0x18, 0xbf, 0xa8, 0xc2, 0x9a, 0x49, 0x02, 0xcf, 0x3e, 0x26, 0x71, - 0xa1, 0xb9, 0x18, 0xa8, 0xf0, 0x63, 0xa8, 0x61, 0xc6, 0x54, 0x98, 0xdc, 0x3f, 0xb7, 0x66, 0x6c, - 0x0a, 0xae, 0xe8, 0x5d, 0x58, 0xc6, 0x4e, 0x8f, 0x0e, 0x43, 0x2f, 0x0c, 0x62, 0xb3, 0x64, 0x50, - 0x35, 0xcd, 0xc9, 0x05, 0xc3, 0x82, 0xf5, 0x09, 0x17, 0x28, 0x77, 0x66, 0x01, 0x8d, 0x56, 0x00, - 0x34, 0xa5, 0x42, 0xaa, 0xa7, 0x09, 0xf9, 0x8b, 0x06, 0xad, 0x34, 0x75, 0x14, 0xfb, 0xab, 0xd0, - 0x74, 0xd4, 0x5c, 0xa0, 0x6b, 0xb2, 0x61, 0xa5, 0x13, 0x79, 0x6c, 0x53, 0x2d, 0x62, 0x9b, 0x35, - 0x98, 0x8d, 0xa0, 0xa7, 0x32, 0x4c, 0x8d, 0x72, 0x2a, 0xd7, 0x0b, 0x2a, 0x6f, 0x00, 0x04, 0x49, - 0xfd, 0xd2, 0x67, 0xe5, 0x6a, 0x66, 0x06, 0x19, 0xb0, 0x10, 0x75, 0x42, 0x93, 0x04, 0xa1, 0xcd, - 0xf5, 0x39, 0x49, 0x91, 0x9b, 0x33, 0x3c, 0x58, 0x7a, 0x48, 0x85, 0x0d, 0x83, 0xe0, 0x62, 0x82, - 0xfd, 0x03, 0xa8, 0x0b, 0x61, 0xc2, 0xb0, 0x9e, 0x8f, 0x5d, 0x6b, 0x44, 0x62, 0x5f, 0x25, 0x63, - 0x91, 0xc6, 0x1c, 0x0f, 0x03, 0xbd, 0x2a, 0xe7, 0xe5, 0xb7, 0xf1, 0xc7, 0x6a, 0xa4, 0xe9, 0x2e, - 0x63, 0xc1, 0x37, 0x0f, 0x7f, 0xcb, 0x1b, 0x72, 0x6d, 0xb2, 0x21, 0x17, 0x54, 0xfe, 0x3a, 0x0d, - 0xf9, 0x9c, 0xda, 0x94, 0x11, 0xc2, 0xdc, 0x2e, 0x63, 0x42, 0x11, 0xb4, 0x0d, 0x75, 0xcc, 0x58, - 0xe4, 0xf0, 0x42, 0x45, 0x56, 0x24, 0xe2, 0x7f, 0xa5, 0x92, 0x24, 0x6d, 0xdf, 0x86, 0x66, 0x32, - 0xf5, 0x2a, 0xb1, 0xcd, 0xac, 0xd8, 0x4d, 0x80, 0x08, 0x71, 0xde, 0x77, 0x07, 0x9e, 0x38, 0x52, - 0x11, 0xec, 0x6a, 0xab, 0xfc, 0x36, 0xee, 0xc4, 0x14, 0x52, 0xb7, 0x77, 0x61, 0x86, 0x72, 0xe2, - 0xc4, 0xca, 0xad, 0x65, 0x95, 0x4b, 0x19, 0x99, 0x11, 0x91, 0xf1, 0xd7, 0x06, 0x5c, 0x11, 0x27, - 0xf6, 0x44, 0xa6, 0xc9, 0x2e, 0x63, 0x9f, 0x12, 0x8e, 0xa9, 0x1d, 0x7c, 0x2f, 0x24, 0xfe, 0xc9, - 0x1b, 0x0e, 0x8c, 0x21, 0xcc, 0x46, 0x59, 0xa6, 0xea, 0xdd, 0xb9, 0x5f, 0x3e, 0x14, 0xfb, 0xf4, - 0xc6, 0x51, 0x7b, 0x33, 0x37, 0x8e, 0xb2, 0x1b, 0x40, 0xfd, 0x82, 0x6e, 0x00, 0xa7, 0x5f, 0x02, - 0x33, 0x57, 0xcb, 0xd9, 0xfc, 0xd5, 0xb2, 0x04, 0x58, 0xcf, 0xbd, 0x2e, 0xb0, 0x6e, 0x94, 0x02, - 0x6b, 0xa7, 0x34, 0x8f, 0x9b, 0xd2, 0xdd, 0xdf, 0xc9, 0x46, 0xe0, 0xa9, 0xb1, 0x36, 0x0d, 0xc4, - 0x86, 0x37, 0x0a, 0xb1, 0xbf, 0x9f, 0x83, 0xcc, 0xd1, 0xa5, 0xf5, 0xfd, 0xd7, 0xb3, 0xe9, 0x7f, - 0x09, 0x3c, 0xff, 0x4a, 0x62, 0x26, 0xe6, 0xa5, 0x3e, 0x48, 0x1a, 0xba, 0xe8, 0x43, 0xa2, 0xb5, - 0xaa, 0xa2, 0x25, 0xbe, 0xd1, 0x4d, 0xa8, 0x0b, 0x27, 0x2b, 0x50, 0xbb, 0x9e, 0xf5, 0xa7, 0x38, - 0x89, 0x5d, 0xc6, 0x9e, 0x30, 0x62, 0x99, 0x92, 0x08, 0xdd, 0x81, 0x66, 0x12, 0xf8, 0x2a, 0xb3, - 0xae, 0x66, 0x77, 0x24, 0x79, 0x12, 0x6f, 0x4b, 0xc9, 0xc5, 0xde, 0x3e, 0xf5, 0x89, 0x25, 0x21, - 0xdf, 0xcc, 0xe4, 0xde, 0x4f, 0xe3, 0xc5, 0x64, 0x6f, 0x42, 0x8e, 0xb6, 0x61, 0x36, 0xba, 0xe5, - 0xcb, 0x0c, 0x9a, 0xdf, 0xb9, 0x32, 0x59, 0x4c, 0xe3, 0x5d, 0x8a, 0xd0, 0xf8, 0xb3, 0x06, 0xef, - 0xa4, 0x01, 0x11, 0x67, 0x53, 0x8c, 0xba, 0xbf, 0xf9, 0x8e, 0x7b, 0x1d, 0x16, 0x25, 0xcc, 0x4f, - 0x2f, 0xfb, 0xd1, 0xbb, 0x53, 0x61, 0xd6, 0xf8, 0x83, 0x06, 0xd7, 0x26, 0xed, 0xd8, 0x1b, 0x61, - 0x9f, 0x27, 0xc7, 0x7b, 0x11, 0xb6, 0xc4, 0x0d, 0xaf, 0x9a, 0x36, 0xbc, 0x9c, 0x7d, 0xb5, 0xbc, - 0x7d, 0xc6, 0x9f, 0xaa, 0x30, 0x9f, 0x09, 0xa0, 0xb2, 0x86, 0x29, 0x00, 0x9f, 0x8c, 0x5b, 0x79, - 0xb1, 0x93, 0x4d, 0xa1, 0x69, 0x66, 0x66, 0xd0, 0x18, 0x80, 0x61, 0x1f, 0x3b, 0x84, 0x13, 0x5f, - 0x54, 0x72, 0x91, 0xf1, 0x0f, 0xa6, 0xaf, 0x2e, 0x47, 0x31, 0x4f, 0x33, 0xc3, 0x5e, 0x20, 0x56, - 0x29, 0x3a, 0x50, 0xf5, 0x5b, 0x8d, 0xd0, 0x97, 0xb0, 0x38, 0xa0, 0x36, 0x39, 0x4a, 0x15, 0x99, - 0x95, 0x8a, 0x3c, 0x9e, 0x5e, 0x91, 0x7b, 0x59, 0xbe, 0x66, 0x41, 0x8c, 0x71, 0x03, 0x5a, 0xc5, - 0x7c, 0x12, 0x4a, 0x52, 0x07, 0x0f, 0x13, 0x6f, 0xa9, 0x91, 0x81, 0xa0, 0x55, 0xcc, 0x1f, 0xe3, - 0x1f, 0x55, 0x58, 0x4d, 0xd8, 0xed, 0xba, 0xae, 0x17, 0xba, 0x96, 0x7c, 0x38, 0x2b, 0x3d, 0x8b, - 0x15, 0x98, 0xe1, 0x94, 0xdb, 0x09, 0xf0, 0x91, 0x03, 0xd1, 0xbb, 0xb8, 0xe7, 0xd9, 0x9c, 0x32, - 0x75, 0xc0, 0xf1, 0x30, 0x3a, 0xfb, 0xe7, 0x21, 0xf5, 0x49, 0x5f, 0x56, 0x82, 0x86, 0x99, 0x8c, - 0xc5, 0x9a, 0x40, 0x35, 0x12, 0xc6, 0x47, 0xce, 0x4c, 0xc6, 0x32, 0xee, 0x3d, 0xdb, 0x26, 0x96, - 0x70, 0x47, 0x06, 0xe8, 0x17, 0x66, 0xe5, 0x05, 0x82, 0xfb, 0xd4, 0x1d, 0x2a, 0x98, 0xaf, 0x46, - 0x42, 0x4f, 0xec, 0xfb, 0xf8, 0x44, 0x6f, 0x48, 0x07, 0x44, 0x03, 0xf4, 0x11, 0xd4, 0x1c, 0xcc, - 0x54, 0xa3, 0xbb, 0x91, 0xab, 0x0e, 0x65, 0x1e, 0xe8, 0x1c, 0x62, 0x16, 0x75, 0x02, 0xb1, 0xad, - 0xfd, 0x01, 0x34, 0xe2, 0x89, 0xaf, 0x05, 0x09, 0xbf, 0x80, 0x4b, 0xb9, 0xe2, 0x83, 0x3e, 0x87, - 0xb5, 0x34, 0xa2, 0xb2, 0x02, 0x15, 0x08, 0x7c, 0xe7, 0x95, 0x9a, 0x99, 0xa7, 0x30, 0x30, 0x9e, - 0xc3, 0xb2, 0x08, 0x19, 0x99, 0xf8, 0x17, 0x74, 0xb5, 0xf9, 0x10, 0x9a, 0x89, 0xc8, 0xd2, 0x98, - 0x69, 0x43, 0xe3, 0x38, 0x7e, 0xd0, 0x8c, 0xee, 0x36, 0xc9, 0xd8, 0xd8, 0x05, 0x94, 0xd5, 0x57, - 0x75, 0xa0, 0x9b, 0x79, 0x50, 0xbc, 0x5a, 0x6c, 0x37, 0x92, 0x3c, 0xc6, 0xc4, 0xbf, 0xaf, 0xc2, - 0xd2, 0x3e, 0x95, 0xaf, 0x1c, 0x17, 0x54, 0xe4, 0x6e, 0x40, 0x2b, 0x08, 0x7b, 0x8e, 0xd7, 0x0f, - 0x6d, 0xa2, 0x40, 0x81, 0xea, 0xf4, 0x13, 0xf3, 0x67, 0x15, 0x3f, 0xe1, 0x2c, 0x86, 0xf9, 0x48, - 0xdd, 0x70, 0xe5, 0x37, 0xfa, 0x08, 0xae, 0x3c, 0x22, 0x5f, 0x2a, 0x7b, 0xf6, 0x6d, 0xaf, 0xd7, - 0xa3, 0xee, 0x30, 0x16, 0x32, 0x23, 0x85, 0x9c, 0x4e, 0x50, 0x06, 0x15, 0x67, 0x4b, 0xa1, 0xa2, - 0xf1, 0x4b, 0x0d, 0x5a, 0xa9, 0xd7, 0x94, 0xdf, 0x6f, 0x47, 0xf9, 0x11, 0x79, 0xfd, 0x5a, 0xd6, - 0xeb, 0x45, 0xd2, 0xff, 0x3c, 0x35, 0x16, 0xb2, 0xa9, 0xf1, 0x2f, 0x0d, 0x56, 0xf7, 0x29, 0x8f, - 0x8b, 0x12, 0xfd, 0x6f, 0x3b, 0xc1, 0x12, 0x7f, 0xd7, 0xcb, 0xfd, 0xdd, 0x81, 0xb5, 0xa2, 0xa1, - 0xca, 0xe9, 0x2b, 0x30, 0x23, 0x4e, 0x3e, 0x7e, 0x0f, 0x88, 0x06, 0x3b, 0x5f, 0x35, 0x61, 0x39, - 0x6d, 0xe8, 0xe2, 0x5f, 0x6a, 0x11, 0xf4, 0x18, 0x5a, 0xfb, 0xea, 0xd7, 0xb3, 0xf8, 0x1d, 0x06, - 0x9d, 0xf5, 0xb0, 0xd9, 0xbe, 0x5a, 0xbe, 0x18, 0x89, 0x36, 0x2a, 0xc8, 0x82, 0x2b, 0x45, 0x86, - 0xe9, 0x1b, 0xea, 0xff, 0x9f, 0xc1, 0x39, 0xa1, 0x7a, 0x95, 0x88, 0x2d, 0x0d, 0x7d, 0x0e, 0x8b, - 0xf9, 0x97, 0x3e, 0x94, 0xab, 0x70, 0xa5, 0x8f, 0x8f, 0x6d, 0xe3, 0x2c, 0x92, 0x44, 0xff, 0x67, - 0x02, 0x4e, 0xe7, 0x9e, 0xbd, 0x90, 0x91, 0x07, 0xfb, 0x65, 0xcf, 0x82, 0xed, 0xff, 0x3b, 0x93, - 0x26, 0xe1, 0xfe, 0x21, 0x34, 0xe2, 0x67, 0xa2, 0xbc, 0x9b, 0x0b, 0x8f, 0x47, 0xed, 0x56, 0x9e, - 0xdf, 0x20, 0x30, 0x2a, 0xe8, 0xe3, 0x68, 0xf3, 0x2e, 0x63, 0x25, 0x9b, 0x33, 0x8f, 0x23, 0xed, - 0xcb, 0x25, 0x0f, 0x12, 0x46, 0x05, 0x7d, 0x17, 0xe6, 0xc5, 0xd7, 0x91, 0xfa, 0xdd, 0x6a, 0xad, - 0x13, 0xfd, 0x4c, 0xda, 0x89, 0x7f, 0x26, 0xed, 0xdc, 0x75, 0x18, 0x3f, 0x69, 0x97, 0xbc, 0x18, - 0x28, 0x06, 0xcf, 0xe0, 0xd2, 0x3e, 0xe1, 0x29, 0xc0, 0x47, 0xd7, 0x5e, 0xeb, 0x1a, 0xd4, 0x36, - 0x8a, 0x64, 0x93, 0x77, 0x04, 0xa3, 0x82, 0x7e, 0xa3, 0xc1, 0xe5, 0x7d, 0xc2, 0x8b, 0x90, 0x19, - 0xbd, 0x57, 0x2e, 0xe4, 0x14, 0x68, 0xdd, 0x7e, 0x34, 0x6d, 0x66, 0xe7, 0xd9, 0x1a, 0x15, 0xf4, - 0x3b, 0x0d, 0xd6, 0x33, 0x8a, 0x65, 0x31, 0x30, 0xda, 0x3e, 0x5b, 0xb9, 0x12, 0xbc, 0xdc, 0xfe, - 0x6c, 0xca, 0x9f, 0x23, 0x33, 0x2c, 0x8d, 0x0a, 0x3a, 0x92, 0x67, 0x92, 0xb6, 0x3c, 0xf4, 0x76, - 0x69, 0x6f, 0x4b, 0xa4, 0x6f, 0x9c, 0xb6, 0x9c, 0x9c, 0xc3, 0x67, 0x30, 0xbf, 0x4f, 0x78, 0x5c, - 0x9f, 0xf3, 0x91, 0x56, 0x68, 0x8b, 0xf9, 0x54, 0x2d, 0x96, 0x74, 0x19, 0x31, 0xcb, 0x11, 0xaf, - 0x4c, 0x9d, 0xca, 0xe7, 0x6a, 0x69, 0xb1, 0xce, 0x47, 0x4c, 0x79, 0x99, 0x33, 0x2a, 0x9f, 0xec, - 0xfe, 0xed, 0xe5, 0x86, 0xf6, 0xd5, 0xcb, 0x0d, 0xed, 0x9f, 0x2f, 0x37, 0xb4, 0x1f, 0xdd, 0x7a, - 0xc5, 0xdf, 0x10, 0x64, 0xfe, 0x2c, 0x01, 0x33, 0x6a, 0xd9, 0x94, 0xb8, 0xbc, 0x37, 0x2b, 0x83, - 0xff, 0xd6, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x9f, 0xd1, 0x75, 0xb5, 0x20, 0x00, 0x00, + // 2127 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0x5b, 0x6f, 0x1b, 0xc7, + 0xf5, 0xe7, 0x92, 0x94, 0x44, 0x1e, 0xd9, 0x12, 0x35, 0xd6, 0x65, 0xc5, 0x38, 0x82, 0xb2, 0xff, + 0xbf, 0x0d, 0xd5, 0x4e, 0x48, 0x48, 0x46, 0xe2, 0xc2, 0x49, 0x53, 0x28, 0x8a, 0x2d, 0x39, 0xb6, + 0x6c, 0x75, 0xed, 0xb6, 0x48, 0xeb, 0xb6, 0x18, 0x2e, 0x87, 0xe4, 0x86, 0x7b, 0x19, 0xef, 0xce, + 0x2a, 0x90, 0x81, 0x3e, 0x14, 0x2d, 0xfa, 0x11, 0xfa, 0xd0, 0xaf, 0x51, 0x14, 0x7d, 0xec, 0x53, + 0x2f, 0x8f, 0x41, 0xbf, 0x40, 0x0b, 0xbf, 0x14, 0xe8, 0xa7, 0x28, 0xe6, 0xb2, 0x57, 0xae, 0x64, + 0xa7, 0x94, 0x15, 0xb4, 0x2f, 0xf6, 0xce, 0x99, 0x33, 0xe7, 0x9c, 0x39, 0x73, 0x2e, 0xbf, 0x19, + 0x0a, 0xae, 0x07, 0x84, 0xfa, 0x21, 0x09, 0x8e, 0x49, 0xd0, 0x15, 0x9f, 0x36, 0xf3, 0x83, 0x93, + 0xcc, 0x67, 0x87, 0x06, 0x3e, 0xf3, 0x11, 0xa4, 0x94, 0xf6, 0xc3, 0xa1, 0xcd, 0x46, 0x51, 0xaf, + 0x63, 0xf9, 0x6e, 0x17, 0x07, 0x43, 0x9f, 0x06, 0xfe, 0x17, 0xe2, 0xe3, 0x3d, 0xab, 0xdf, 0x3d, + 0xde, 0xe9, 0xd2, 0xf1, 0xb0, 0x8b, 0xa9, 0x1d, 0x76, 0x31, 0xa5, 0x8e, 0x6d, 0x61, 0x66, 0xfb, + 0x5e, 0xf7, 0x78, 0x1b, 0x3b, 0x74, 0x84, 0xb7, 0xbb, 0x43, 0xe2, 0x91, 0x00, 0x33, 0xd2, 0x97, + 0x92, 0xdb, 0x6f, 0x0d, 0x7d, 0x7f, 0xe8, 0x90, 0xae, 0x18, 0xf5, 0xa2, 0x41, 0x97, 0xb8, 0x94, + 0x29, 0xb5, 0xc6, 0xbf, 0x2e, 0xc1, 0xe2, 0x21, 0xf6, 0xec, 0x01, 0x09, 0x99, 0x49, 0x9e, 0x47, + 0x24, 0x64, 0xe8, 0x19, 0xd4, 0xb9, 0x31, 0xba, 0xb6, 0xa9, 0x6d, 0xcd, 0xef, 0x1c, 0x74, 0x52, + 0x6b, 0x3a, 0xb1, 0x35, 0xe2, 0xe3, 0x67, 0x56, 0xbf, 0x73, 0xbc, 0xd3, 0xa1, 0xe3, 0x61, 0x87, + 0x5b, 0xd3, 0xc9, 0x58, 0xd3, 0x89, 0xad, 0xe9, 0x98, 0xc9, 0xb6, 0x4c, 0x21, 0x15, 0xb5, 0xa1, + 0x11, 0x90, 0x63, 0x3b, 0xb4, 0x7d, 0x4f, 0xaf, 0x6e, 0x6a, 0x5b, 0x4d, 0x33, 0x19, 0x23, 0x1d, + 0xe6, 0x3c, 0x7f, 0x0f, 0x5b, 0x23, 0xa2, 0xd7, 0x36, 0xb5, 0xad, 0x86, 0x19, 0x0f, 0xd1, 0x26, + 0xcc, 0x63, 0x4a, 0x1f, 0xe2, 0x1e, 0x71, 0x1e, 0x90, 0x13, 0xbd, 0x2e, 0x16, 0x66, 0x49, 0x7c, + 0x2d, 0xa6, 0xf4, 0x11, 0x76, 0x89, 0x3e, 0x23, 0x66, 0xe3, 0x21, 0xba, 0x0a, 0x4d, 0x0f, 0xbb, + 0x24, 0xa4, 0xd8, 0x22, 0x7a, 0x43, 0xcc, 0xa5, 0x04, 0xf4, 0x73, 0x58, 0xca, 0x18, 0xfe, 0xc4, + 0x8f, 0x02, 0x8b, 0xe8, 0x20, 0xb6, 0xfe, 0x78, 0xba, 0xad, 0xef, 0x16, 0xc5, 0x9a, 0x93, 0x9a, + 0xd0, 0x4f, 0x61, 0x46, 0x9c, 0xbc, 0x3e, 0xbf, 0x59, 0x3b, 0x57, 0x6f, 0x4b, 0xb1, 0xc8, 0x83, + 0x39, 0xea, 0x44, 0x43, 0xdb, 0x0b, 0xf5, 0x4b, 0x42, 0xc3, 0xd3, 0xe9, 0x34, 0xec, 0xf9, 0xde, + 0xc0, 0x1e, 0x1e, 0x62, 0x0f, 0x0f, 0x89, 0x4b, 0x3c, 0x76, 0x24, 0x84, 0x9b, 0xb1, 0x12, 0xf4, + 0x02, 0x5a, 0xe3, 0x28, 0x64, 0xbe, 0x6b, 0xbf, 0x20, 0x8f, 0x29, 0x5f, 0x1b, 0xea, 0x97, 0x85, + 0x37, 0x1f, 0x4d, 0xa7, 0xf8, 0x41, 0x41, 0xaa, 0x39, 0xa1, 0x87, 0x07, 0xc9, 0x38, 0xea, 0x91, + 0x1f, 0x90, 0x40, 0x44, 0xd7, 0x82, 0x0c, 0x92, 0x0c, 0x49, 0x86, 0x91, 0xad, 0x46, 0xa1, 0xbe, + 0xb8, 0x59, 0x93, 0x61, 0x94, 0x90, 0xd0, 0x16, 0x2c, 0x1e, 0x93, 0xc0, 0x1e, 0x9c, 0x3c, 0xb1, + 0x87, 0x1e, 0x66, 0x51, 0x40, 0xf4, 0x96, 0x08, 0xc5, 0x22, 0x19, 0xb9, 0x70, 0x79, 0x44, 0x1c, + 0x97, 0xbb, 0x7c, 0x2f, 0x20, 0xfd, 0x50, 0x5f, 0x12, 0xfe, 0xdd, 0x9f, 0xfe, 0x04, 0x85, 0x38, + 0x33, 0x2f, 0x9d, 0x1b, 0xe6, 0xf9, 0xa6, 0xca, 0x14, 0x99, 0x23, 0x48, 0x1a, 0x56, 0x20, 0xa3, + 0xeb, 0xb0, 0xc0, 0x02, 0x6c, 0x8d, 0x6d, 0x6f, 0x78, 0x48, 0xd8, 0xc8, 0xef, 0xeb, 0x57, 0x84, + 0x27, 0x0a, 0x54, 0x64, 0x01, 0x22, 0x1e, 0xee, 0x39, 0xa4, 0x2f, 0x63, 0xf1, 0xe9, 0x09, 0x25, + 0xa1, 0xbe, 0x2c, 0x76, 0x71, 0xab, 0x93, 0xa9, 0x50, 0x85, 0x02, 0xd1, 0xb9, 0x3b, 0xb1, 0xea, + 0xae, 0xc7, 0x82, 0x13, 0xb3, 0x44, 0x1c, 0x1a, 0xc3, 0x3c, 0xdf, 0x47, 0x1c, 0x0a, 0x2b, 0x22, + 0x14, 0xee, 0x4f, 0xe7, 0xa3, 0x83, 0x54, 0xa0, 0x99, 0x95, 0x8e, 0x3a, 0x80, 0x46, 0x38, 0x3c, + 0x8c, 0x1c, 0x66, 0x53, 0x87, 0x48, 0x33, 0x42, 0x7d, 0x55, 0xb8, 0xa9, 0x64, 0x06, 0x3d, 0x00, + 0x08, 0xc8, 0x20, 0xe6, 0x5b, 0x13, 0x3b, 0xbf, 0x79, 0xd6, 0xce, 0xcd, 0x84, 0x5b, 0xee, 0x38, + 0xb3, 0x9c, 0x2b, 0xe7, 0xdb, 0x20, 0x16, 0x53, 0xd9, 0x2e, 0xd2, 0x5a, 0x17, 0x21, 0x56, 0x32, + 0xc3, 0x63, 0x51, 0x51, 0x45, 0xd1, 0x5a, 0x97, 0xd1, 0x9a, 0x21, 0xb5, 0xef, 0xc2, 0xda, 0x29, + 0xae, 0x46, 0x2d, 0xa8, 0x8d, 0xc9, 0x89, 0x28, 0xd1, 0x4d, 0x93, 0x7f, 0xa2, 0x65, 0x98, 0x39, + 0xc6, 0x4e, 0x44, 0x44, 0x51, 0x6d, 0x98, 0x72, 0x70, 0xa7, 0xfa, 0x6d, 0xad, 0xfd, 0x6b, 0x0d, + 0x16, 0x0b, 0x86, 0x97, 0xac, 0xff, 0x49, 0x76, 0xfd, 0x39, 0x84, 0xf1, 0xe0, 0x29, 0x0e, 0x86, + 0x84, 0x65, 0x0c, 0x31, 0xfe, 0xa6, 0x81, 0x5e, 0xf0, 0xe8, 0x0f, 0x6d, 0x36, 0xba, 0x67, 0x3b, + 0x24, 0x44, 0xb7, 0x61, 0x2e, 0x90, 0x34, 0xd5, 0x78, 0xde, 0x3a, 0xe3, 0x20, 0x0e, 0x2a, 0x66, + 0xcc, 0x8d, 0x3e, 0x86, 0x86, 0x4b, 0x18, 0xee, 0x63, 0x86, 0x95, 0xed, 0x9b, 0x65, 0x2b, 0xb9, + 0x96, 0x43, 0xc5, 0x77, 0x50, 0x31, 0x93, 0x35, 0xe8, 0x7d, 0x98, 0xb1, 0x46, 0x91, 0x37, 0x16, + 0x2d, 0x67, 0x7e, 0xe7, 0xed, 0xd3, 0x16, 0xef, 0x71, 0xa6, 0x83, 0x8a, 0x29, 0xb9, 0x3f, 0x99, + 0x85, 0x3a, 0xc5, 0x01, 0x33, 0xee, 0xc1, 0x72, 0x99, 0x0a, 0xde, 0xe7, 0xac, 0x11, 0xb1, 0xc6, + 0x61, 0xe4, 0x2a, 0x37, 0x27, 0x63, 0x84, 0xa0, 0x1e, 0xda, 0x2f, 0xa4, 0xab, 0x6b, 0xa6, 0xf8, + 0x36, 0xbe, 0x05, 0x4b, 0x13, 0xda, 0xf8, 0xa1, 0x4a, 0xdb, 0xb8, 0x84, 0x4b, 0x4a, 0xb5, 0x11, + 0xc1, 0xca, 0x53, 0xe1, 0x8b, 0xa4, 0xd8, 0x5f, 0x44, 0xe7, 0x36, 0x0e, 0x60, 0xb5, 0xa8, 0x36, + 0xa4, 0xbe, 0x17, 0x12, 0x1e, 0xfa, 0xa2, 0x3a, 0xda, 0xa4, 0x9f, 0xce, 0x0a, 0x2b, 0x1a, 0x66, + 0xc9, 0x8c, 0xf1, 0x8b, 0x2a, 0xac, 0x9a, 0x24, 0xf4, 0x9d, 0x63, 0x12, 0x97, 0xae, 0x8b, 0x01, + 0x1f, 0x3f, 0x86, 0x1a, 0xa6, 0x54, 0x85, 0xc9, 0xfd, 0x73, 0x6b, 0xef, 0x26, 0x97, 0x8a, 0xde, + 0x85, 0x25, 0xec, 0xf6, 0xec, 0x61, 0xe4, 0x47, 0x61, 0xbc, 0x2d, 0x11, 0x54, 0x4d, 0x73, 0x72, + 0xc2, 0xb0, 0x60, 0x6d, 0xc2, 0x05, 0xca, 0x9d, 0x59, 0x88, 0xa4, 0x15, 0x20, 0x52, 0xa9, 0x92, + 0xea, 0x69, 0x4a, 0xfe, 0xac, 0x41, 0x2b, 0x4d, 0x1d, 0x25, 0xfe, 0x2a, 0x34, 0x5d, 0x45, 0x0b, + 0x75, 0x4d, 0xd4, 0xa7, 0x94, 0x90, 0x47, 0x4b, 0xd5, 0x22, 0x5a, 0x5a, 0x85, 0x59, 0x09, 0x66, + 0xd5, 0xc6, 0xd4, 0x28, 0x67, 0x72, 0xbd, 0x60, 0xf2, 0x06, 0x40, 0x98, 0xd4, 0x2f, 0x7d, 0x56, + 0xcc, 0x66, 0x28, 0xc8, 0x80, 0x4b, 0xb2, 0xb7, 0x9a, 0x24, 0x8c, 0x1c, 0xa6, 0xcf, 0x09, 0x8e, + 0x1c, 0xcd, 0xf0, 0x61, 0xf1, 0xa1, 0xcd, 0xf7, 0x30, 0x08, 0x2f, 0x26, 0xd8, 0x3f, 0x80, 0x3a, + 0x57, 0xc6, 0x37, 0xd6, 0x0b, 0xb0, 0x67, 0x8d, 0x48, 0xec, 0xab, 0x64, 0xcc, 0xd3, 0x98, 0xe1, + 0x61, 0xa8, 0x57, 0x05, 0x5d, 0x7c, 0x1b, 0x7f, 0xa8, 0x4a, 0x4b, 0x77, 0x29, 0x0d, 0xbf, 0x79, + 0x40, 0x5d, 0xde, 0xe2, 0x6b, 0x93, 0x2d, 0xbe, 0x60, 0xf2, 0xd7, 0x69, 0xf1, 0xe7, 0xd4, 0xa6, + 0x8c, 0x08, 0xe6, 0x76, 0x29, 0xe5, 0x86, 0xa0, 0x6d, 0xa8, 0x63, 0x4a, 0xa5, 0xc3, 0x0b, 0x15, + 0x59, 0xb1, 0xf0, 0xff, 0x95, 0x49, 0x82, 0xb5, 0x7d, 0x1b, 0x9a, 0x09, 0xe9, 0x55, 0x6a, 0x9b, + 0x59, 0xb5, 0x9b, 0x00, 0x12, 0xc3, 0xde, 0xf7, 0x06, 0x3e, 0x3f, 0x52, 0x1e, 0xec, 0x6a, 0xa9, + 0xf8, 0x36, 0xee, 0xc4, 0x1c, 0xc2, 0xb6, 0x77, 0x61, 0xc6, 0x66, 0xc4, 0x8d, 0x8d, 0x5b, 0xcd, + 0x1a, 0x97, 0x0a, 0x32, 0x25, 0x93, 0xf1, 0x97, 0x06, 0xac, 0xf3, 0x13, 0x7b, 0x22, 0xd2, 0x64, + 0x97, 0xd2, 0x4f, 0x09, 0xc3, 0xb6, 0x13, 0x7e, 0x2f, 0x22, 0xc1, 0xc9, 0x1b, 0x0e, 0x8c, 0x21, + 0xcc, 0xca, 0x2c, 0x53, 0xf5, 0xee, 0xdc, 0xaf, 0x33, 0x4a, 0x7c, 0x7a, 0x87, 0xa9, 0xbd, 0x99, + 0x3b, 0x4c, 0xd9, 0x9d, 0xa2, 0x7e, 0x41, 0x77, 0x8a, 0xd3, 0xaf, 0x95, 0x99, 0xcb, 0xea, 0x6c, + 0xfe, 0xb2, 0x5a, 0x02, 0xd5, 0xe7, 0x5e, 0x17, 0xaa, 0x37, 0x4a, 0xa1, 0xba, 0x5b, 0x9a, 0xc7, + 0x4d, 0xe1, 0xee, 0xef, 0x64, 0x23, 0xf0, 0xd4, 0x58, 0x9b, 0x06, 0xb4, 0xc3, 0x1b, 0x05, 0xed, + 0xdf, 0xcf, 0x81, 0x70, 0x79, 0x0d, 0x7e, 0xff, 0xf5, 0xf6, 0x74, 0x06, 0x1c, 0xff, 0x9f, 0x03, + 0xcf, 0xbf, 0x12, 0x98, 0x89, 0xfa, 0xa9, 0x0f, 0x92, 0x86, 0xce, 0xfb, 0x10, 0x6f, 0xad, 0xaa, + 0x68, 0xf1, 0x6f, 0x74, 0x13, 0xea, 0xdc, 0xc9, 0x0a, 0xd4, 0xae, 0x65, 0xfd, 0xc9, 0x4f, 0x62, + 0x97, 0xd2, 0x27, 0x94, 0x58, 0xa6, 0x60, 0x42, 0x77, 0xa0, 0x99, 0x04, 0xbe, 0xca, 0xac, 0xab, + 0xd9, 0x15, 0x49, 0x9e, 0xc4, 0xcb, 0x52, 0x76, 0xbe, 0xb6, 0x6f, 0x07, 0xc4, 0x12, 0x90, 0x6f, + 0x66, 0x72, 0xed, 0xa7, 0xf1, 0x64, 0xb2, 0x36, 0x61, 0x47, 0xdb, 0x30, 0x2b, 0xdf, 0x0d, 0x44, + 0x06, 0xcd, 0xef, 0xac, 0x4f, 0x16, 0xd3, 0x78, 0x95, 0x62, 0x34, 0xfe, 0xa4, 0xc1, 0x3b, 0x69, + 0x40, 0xc4, 0xd9, 0x14, 0xa3, 0xee, 0x6f, 0xbe, 0xe3, 0x5e, 0x87, 0x05, 0x01, 0xf3, 0xd3, 0xe7, + 0x03, 0xf9, 0x92, 0x55, 0xa0, 0x1a, 0xbf, 0xd7, 0xe0, 0xda, 0xe4, 0x3e, 0xf6, 0x46, 0x38, 0x60, + 0xc9, 0xf1, 0x5e, 0xc4, 0x5e, 0xe2, 0x86, 0x57, 0x4d, 0x1b, 0x5e, 0x6e, 0x7f, 0xb5, 0xfc, 0xfe, + 0x8c, 0x3f, 0x56, 0x61, 0x3e, 0x13, 0x40, 0x65, 0x0d, 0x93, 0x03, 0x3e, 0x11, 0xb7, 0xe2, 0x62, + 0x27, 0x9a, 0x42, 0xd3, 0xcc, 0x50, 0xd0, 0x18, 0x80, 0xe2, 0x00, 0xbb, 0x84, 0x91, 0x80, 0x57, + 0x72, 0x9e, 0xf1, 0x0f, 0xa6, 0xaf, 0x2e, 0x47, 0xb1, 0x4c, 0x33, 0x23, 0x9e, 0x23, 0x56, 0xa1, + 0x3a, 0x54, 0xf5, 0x5b, 0x8d, 0xd0, 0x97, 0xb0, 0x30, 0xb0, 0x1d, 0x72, 0x94, 0x1a, 0x32, 0x2b, + 0x0c, 0x79, 0x3c, 0xbd, 0x21, 0xf7, 0xb2, 0x72, 0xcd, 0x82, 0x1a, 0xe3, 0x06, 0xb4, 0x8a, 0xf9, + 0xc4, 0x8d, 0xb4, 0x5d, 0x3c, 0x4c, 0xbc, 0xa5, 0x46, 0x06, 0x82, 0x56, 0x31, 0x7f, 0x8c, 0xbf, + 0x57, 0x61, 0x25, 0x11, 0xb7, 0xeb, 0x79, 0x7e, 0xe4, 0x59, 0xe2, 0x29, 0xae, 0xf4, 0x2c, 0x96, + 0x61, 0x86, 0xd9, 0xcc, 0x49, 0x80, 0x8f, 0x18, 0xf0, 0xde, 0xc5, 0x7c, 0xdf, 0x61, 0x36, 0x55, + 0x07, 0x1c, 0x0f, 0xe5, 0xd9, 0x3f, 0x8f, 0xec, 0x80, 0xf4, 0x45, 0x25, 0x68, 0x98, 0xc9, 0x98, + 0xcf, 0x71, 0x54, 0x23, 0x60, 0xbc, 0x74, 0x66, 0x32, 0x16, 0x71, 0xef, 0x3b, 0x0e, 0xb1, 0xb8, + 0x3b, 0x32, 0x40, 0xbf, 0x40, 0x15, 0x17, 0x08, 0x16, 0xd8, 0xde, 0x50, 0xc1, 0x7c, 0x35, 0xe2, + 0x76, 0xe2, 0x20, 0xc0, 0x27, 0x7a, 0x43, 0x38, 0x40, 0x0e, 0xd0, 0x47, 0x50, 0x73, 0x31, 0x55, + 0x8d, 0xee, 0x46, 0xae, 0x3a, 0x94, 0x79, 0xa0, 0x73, 0x88, 0xa9, 0xec, 0x04, 0x7c, 0x59, 0xfb, + 0x03, 0x68, 0xc4, 0x84, 0xaf, 0x05, 0x09, 0xbf, 0x80, 0xcb, 0xb9, 0xe2, 0x83, 0x3e, 0x87, 0xd5, + 0x34, 0xa2, 0xb2, 0x0a, 0x15, 0x08, 0x7c, 0xe7, 0x95, 0x96, 0x99, 0xa7, 0x08, 0x30, 0x9e, 0xc3, + 0x12, 0x0f, 0x19, 0x91, 0xf8, 0x17, 0x74, 0xb5, 0xf9, 0x10, 0x9a, 0x89, 0xca, 0xd2, 0x98, 0x69, + 0x43, 0xe3, 0x38, 0x7e, 0x22, 0x95, 0x77, 0x9b, 0x64, 0x6c, 0xec, 0x02, 0xca, 0xda, 0xab, 0x3a, + 0xd0, 0xcd, 0x3c, 0x28, 0x5e, 0x29, 0xb6, 0x1b, 0xc1, 0x1e, 0x63, 0xe2, 0xdf, 0x55, 0x61, 0x71, + 0xdf, 0x16, 0xaf, 0x1c, 0x17, 0x54, 0xe4, 0x6e, 0x40, 0x2b, 0x8c, 0x7a, 0xae, 0xdf, 0x8f, 0x1c, + 0xa2, 0x40, 0x81, 0xea, 0xf4, 0x13, 0xf4, 0xb3, 0x8a, 0x1f, 0x77, 0x16, 0xc5, 0x6c, 0xa4, 0x6e, + 0xb8, 0xe2, 0x1b, 0x7d, 0x04, 0xeb, 0x8f, 0xc8, 0x97, 0x6a, 0x3f, 0xfb, 0x8e, 0xdf, 0xeb, 0xd9, + 0xde, 0x30, 0x56, 0x32, 0x23, 0x94, 0x9c, 0xce, 0x50, 0x06, 0x15, 0x67, 0x4b, 0xa1, 0xa2, 0xf1, + 0x4b, 0x0d, 0x5a, 0xa9, 0xd7, 0x94, 0xdf, 0x6f, 0xcb, 0xfc, 0x90, 0x5e, 0xbf, 0x96, 0xf5, 0x7a, + 0x91, 0xf5, 0x3f, 0x4f, 0x8d, 0x4b, 0xd9, 0xd4, 0xf8, 0xa7, 0x06, 0x2b, 0xfb, 0x36, 0x8b, 0x8b, + 0x92, 0xfd, 0xdf, 0x76, 0x82, 0x25, 0xfe, 0xae, 0x97, 0xfb, 0xbb, 0x03, 0xab, 0xc5, 0x8d, 0x2a, + 0xa7, 0x2f, 0xc3, 0x0c, 0x3f, 0xf9, 0xf8, 0x3d, 0x40, 0x0e, 0x76, 0xbe, 0x6a, 0xc2, 0x52, 0xda, + 0xd0, 0xf9, 0xbf, 0xb6, 0x45, 0xd0, 0x63, 0x68, 0xed, 0xab, 0xdf, 0xe3, 0xe2, 0x77, 0x18, 0x74, + 0xd6, 0xc3, 0x66, 0xfb, 0x6a, 0xf9, 0xa4, 0x54, 0x6d, 0x54, 0x90, 0x05, 0xeb, 0x45, 0x81, 0xe9, + 0x1b, 0xea, 0xff, 0x9f, 0x21, 0x39, 0xe1, 0x7a, 0x95, 0x8a, 0x2d, 0x0d, 0x7d, 0x0e, 0x0b, 0xf9, + 0x97, 0x3e, 0x94, 0xab, 0x70, 0xa5, 0x8f, 0x8f, 0x6d, 0xe3, 0x2c, 0x96, 0xc4, 0xfe, 0x67, 0x1c, + 0x4e, 0xe7, 0x9e, 0xbd, 0x90, 0x91, 0x07, 0xfb, 0x65, 0xcf, 0x82, 0xed, 0xff, 0x3b, 0x93, 0x27, + 0x91, 0xfe, 0x21, 0x34, 0xe2, 0x67, 0xa2, 0xbc, 0x9b, 0x0b, 0x8f, 0x47, 0xed, 0x56, 0x5e, 0xde, + 0x20, 0x34, 0x2a, 0xe8, 0x63, 0xb9, 0x78, 0x97, 0xd2, 0x92, 0xc5, 0x99, 0xc7, 0x91, 0xf6, 0x95, + 0x92, 0x07, 0x09, 0xa3, 0x82, 0xbe, 0x0b, 0xf3, 0xfc, 0xeb, 0x48, 0xfd, 0x12, 0xb6, 0xda, 0x91, + 0x3f, 0xbc, 0x76, 0xe2, 0x1f, 0x5e, 0x3b, 0x77, 0x5d, 0xca, 0x4e, 0xda, 0x25, 0x2f, 0x06, 0x4a, + 0xc0, 0x33, 0xb8, 0xbc, 0x4f, 0x58, 0x0a, 0xf0, 0xd1, 0xb5, 0xd7, 0xba, 0x06, 0xb5, 0x8d, 0x22, + 0xdb, 0xe4, 0x1d, 0xc1, 0xa8, 0xa0, 0xdf, 0x68, 0x70, 0x65, 0x9f, 0xb0, 0x22, 0x64, 0x46, 0xef, + 0x95, 0x2b, 0x39, 0x05, 0x5a, 0xb7, 0x1f, 0x4d, 0x9b, 0xd9, 0x79, 0xb1, 0x46, 0x05, 0xfd, 0x56, + 0x83, 0xb5, 0x8c, 0x61, 0x59, 0x0c, 0x8c, 0xb6, 0xcf, 0x36, 0xae, 0x04, 0x2f, 0xb7, 0x3f, 0x9b, + 0xf2, 0x07, 0xce, 0x8c, 0x48, 0xa3, 0x82, 0x8e, 0xc4, 0x99, 0xa4, 0x2d, 0x0f, 0xbd, 0x5d, 0xda, + 0xdb, 0x12, 0xed, 0x1b, 0xa7, 0x4d, 0x27, 0xe7, 0xf0, 0x19, 0xcc, 0xef, 0x13, 0x16, 0xd7, 0xe7, + 0x7c, 0xa4, 0x15, 0xda, 0x62, 0x3e, 0x55, 0x8b, 0x25, 0x5d, 0x44, 0xcc, 0x92, 0x94, 0x95, 0xa9, + 0x53, 0xf9, 0x5c, 0x2d, 0x2d, 0xd6, 0xf9, 0x88, 0x29, 0x2f, 0x73, 0x46, 0xe5, 0x93, 0xdd, 0xbf, + 0xbe, 0xdc, 0xd0, 0xbe, 0x7a, 0xb9, 0xa1, 0xfd, 0xe3, 0xe5, 0x86, 0xf6, 0xa3, 0x5b, 0xaf, 0xf8, + 0xab, 0x84, 0xcc, 0x1f, 0x3a, 0x60, 0x6a, 0x5b, 0x8e, 0x4d, 0x3c, 0xd6, 0x9b, 0x15, 0xc1, 0x7f, + 0xeb, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x91, 0xe2, 0xd9, 0x07, 0x21, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2918,6 +2938,26 @@ func (m *ManifestRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.ProjectName) > 0 { + i -= len(m.ProjectName) + copy(dAtA[i:], m.ProjectName) + i = encodeVarintRepository(dAtA, i, uint64(len(m.ProjectName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if len(m.ProjectSourceRepos) > 0 { + for iNdEx := len(m.ProjectSourceRepos) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ProjectSourceRepos[iNdEx]) + copy(dAtA[i:], m.ProjectSourceRepos[iNdEx]) + i = encodeVarintRepository(dAtA, i, uint64(len(m.ProjectSourceRepos[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } if len(m.RefSources) > 0 { for k := range m.RefSources { v := m.RefSources[k] @@ -4978,6 +5018,16 @@ func (m *ManifestRequest) Size() (n int) { n += mapEntrySize + 2 + sovRepository(uint64(mapEntrySize)) } } + if len(m.ProjectSourceRepos) > 0 { + for _, s := range m.ProjectSourceRepos { + l = len(s) + n += 2 + l + sovRepository(uint64(l)) + } + } + l = len(m.ProjectName) + if l > 0 { + n += 2 + l + sovRepository(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6578,6 +6628,70 @@ func (m *ManifestRequest) Unmarshal(dAtA []byte) error { } m.RefSources[mapkey] = mapvalue iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectSourceRepos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProjectSourceRepos = append(m.ProjectSourceRepos, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRepository + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRepository + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRepository + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProjectName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRepository(dAtA[iNdEx:]) diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/collections/maps.go b/vendor/github.com/argoproj/argo-cd/v2/util/collections/maps.go index a615f810c4c..d7a42943674 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/collections/maps.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/collections/maps.go @@ -21,3 +21,16 @@ func StringMapsEqual(first map[string]string, second map[string]string) bool { } return reflect.DeepEqual(first, second) } + +func MergeStringMaps(items ...map[string]string) map[string]string { + res := make(map[string]string) + for _, m := range items { + if m == nil { + continue + } + for k, v := range m { + res[k] = v + } + } + return res +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go b/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go index 4f643a2895a..61df13b0a39 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/config/reader.go @@ -30,13 +30,7 @@ func unmarshalObject(data []byte, obj interface{}) error { if err != nil { return err } - - err = json.Unmarshal(jsonData, &obj) - if err != nil { - return err - } - - return err + return json.Unmarshal(jsonData, &obj) } // MarshalLocalYAMLFile writes JSON or YAML to a file on disk. diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go index b925789453b..d5a8652f7ce 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go @@ -14,14 +14,6 @@ func ensurePrefix(s, prefix string) string { return s } -// removeSuffix idempotently removes a given suffix -func removeSuffix(s, suffix string) string { - if strings.HasSuffix(s, suffix) { - return s[0 : len(s)-len(suffix)] - } - return s -} - var ( commitSHARegex = regexp.MustCompile("^[0-9A-Fa-f]{40}$") sshURLRegex = regexp.MustCompile("^(ssh://)?([^/:]*?)@[^@]+$") @@ -62,7 +54,7 @@ func NormalizeGitURL(repo string) string { repo = ensurePrefix(repo, "ssh://") } } - repo = removeSuffix(repo, ".git") + repo = strings.TrimSuffix(repo, ".git") repoURL, err := url.Parse(repo) if err != nil { return "" diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go index e4021602ea8..8b99cd67c69 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go @@ -318,8 +318,8 @@ func (c *nativeHelmChart) loadRepoIndex(maxIndexSize int64) ([]byte, error) { } tr := &http.Transport{ - Proxy: proxy.GetCallback(c.proxy), - TLSClientConfig: tlsConf, + Proxy: proxy.GetCallback(c.proxy), + TLSClientConfig: tlsConf, DisableKeepAlives: true, } client := http.Client{Transport: tr} @@ -431,8 +431,8 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) return nil, fmt.Errorf("failed setup tlsConfig: %v", err) } client := &http.Client{Transport: &http.Transport{ - Proxy: proxy.GetCallback(c.proxy), - TLSClientConfig: tlsConf, + Proxy: proxy.GetCallback(c.proxy), + TLSClientConfig: tlsConf, DisableKeepAlives: true, }} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go index e61dfa8e7ae..cc2a1388d65 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go @@ -1,6 +1,7 @@ package helm import ( + "errors" "fmt" "os" "os/exec" @@ -90,6 +91,28 @@ func (c *Cmd) RegistryLogin(repo string, creds Creds) (string, error) { args = append(args, "--password", creds.Password) } + if creds.CAPath != "" { + args = append(args, "--ca-file", creds.CAPath) + } + + if len(creds.CertData) > 0 { + filePath, closer, err := writeToTmp(creds.CertData) + if err != nil { + return "", err + } + defer argoio.Close(closer) + args = append(args, "--cert-file", filePath) + } + + if len(creds.KeyData) > 0 { + filePath, closer, err := writeToTmp(creds.KeyData) + if err != nil { + return "", err + } + defer argoio.Close(closer) + args = append(args, "--key-file", filePath) + } + if creds.InsecureSkipVerify { args = append(args, "--insecure") } @@ -237,6 +260,25 @@ func (c *Cmd) PullOCI(repo string, chart string, version string, destination str if creds.CAPath != "" { args = append(args, "--ca-file", creds.CAPath) } + + if len(creds.CertData) > 0 { + filePath, closer, err := writeToTmp(creds.CertData) + if err != nil { + return "", err + } + defer argoio.Close(closer) + args = append(args, "--cert-file", filePath) + } + + if len(creds.KeyData) > 0 { + filePath, closer, err := writeToTmp(creds.KeyData) + if err != nil { + return "", err + } + defer argoio.Close(closer) + args = append(args, "--key-file", filePath) + } + if creds.InsecureSkipVerify && c.insecureSkipVerifySupported { args = append(args, "--insecure-skip-tls-verify") } @@ -268,7 +310,8 @@ type TemplateOpts struct { } var ( - re = regexp.MustCompile(`([^\\]),`) + re = regexp.MustCompile(`([^\\]),`) + apiVersionsRemover = regexp.MustCompile(`(--api-versions [^ ]+ )+`) ) func cleanSetParameters(val string) string { @@ -315,7 +358,16 @@ func (c *Cmd) template(chartPath string, opts *TemplateOpts) (string, error) { args = append(args, "--include-crds") } - return c.run(args...) + out, err := c.run(args...) + if err != nil { + msg := err.Error() + if strings.Contains(msg, "--api-versions") { + log.Debug(msg) + msg = apiVersionsRemover.ReplaceAllString(msg, " ") + } + return "", errors.New(msg) + } + return out, nil } func (c *Cmd) Freestyle(args ...string) (string, error) { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go index 6bbcaa751d3..741f224c3c8 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/util.go @@ -20,19 +20,22 @@ var RelativeOutOfBoundErr = errors.New("full path does not contain base path") // does not match (example 2). // // Example 1: -// fullPath: /home/test/app/readme.md -// basePath: /home/test -// return: app/readme.md +// +// fullPath: /home/test/app/readme.md +// basePath: /home/test +// return: app/readme.md // // Example 2: -// fullPath: /home/test/app/readme.md -// basePath: /somewhere/else -// return: "", RelativeOutOfBoundErr +// +// fullPath: /home/test/app/readme.md +// basePath: /somewhere/else +// return: "", RelativeOutOfBoundErr // // Example 3: -// fullPath: /home/test/app/readme.md -// basePath: /home/test/app/readme.md -// return: . +// +// fullPath: /home/test/app/readme.md +// basePath: /home/test/app/readme.md +// return: . func RelativePath(fullPath, basePath string) (string, error) { fp := filepath.Clean(fullPath) if !strings.HasPrefix(fp, filepath.Clean(basePath)) { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go b/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go index 35bab0314b0..5ea4394b726 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/kube/kube.go @@ -23,7 +23,7 @@ func IsValidResourceName(name string) bool { func SetAppInstanceLabel(target *unstructured.Unstructured, key, val string) error { labels, _, err := nestedNullableStringMap(target.Object, "metadata", "labels") if err != nil { - return err + return fmt.Errorf("failed to get labels from target object %s %s/%s: %w", target.GroupVersionKind().String(), target.GetNamespace(), target.GetName(), err) } if labels == nil { labels = make(map[string]string) @@ -129,7 +129,7 @@ func GetAppInstanceAnnotation(un *unstructured.Unstructured, key string) (string func GetAppInstanceLabel(un *unstructured.Unstructured, key string) (string, error) { labels, _, err := nestedNullableStringMap(un.Object, "metadata", "labels") if err != nil { - return "", err + return "", fmt.Errorf("failed to get labels for %s %s/%s: %w", un.GroupVersionKind().String(), un.GetNamespace(), un.GetName(), err) } if labels != nil { return labels[key], nil @@ -141,7 +141,7 @@ func GetAppInstanceLabel(un *unstructured.Unstructured, key string) (string, err func RemoveLabel(un *unstructured.Unstructured, key string) error { labels, _, err := nestedNullableStringMap(un.Object, "metadata", "labels") if err != nil { - return err + return fmt.Errorf("failed to get labels for %s %s/%s: %w", un.GroupVersionKind().String(), un.GetNamespace(), un.GetName(), err) } if labels == nil { return nil diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go b/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go index 1ea6e0fdadb..f08f7832082 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/kube/portforwarder.go @@ -56,7 +56,7 @@ func PortForward(targetPort int, namespace string, overrides *clientcmd.ConfigOv } if pod == nil { - return -1, fmt.Errorf("cannot find pod with selector: %v", podSelectors) + return -1, fmt.Errorf("cannot find pod with selector: %v - use the --{component}-name flag in this command or set the environmental variable (Refer to https://argo-cd.readthedocs.io/en/stable/user-guide/environment-variables), to change the Argo CD component name in the CLI", podSelectors) } url := clientSet.CoreV1().RESTClient().Post(). diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go b/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go index 86f95cbfc7d..7e656eabba8 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/settings/resources_filter.go @@ -62,7 +62,6 @@ func (rf *ResourcesFilter) isExcludedResource(apiGroup, kind, cluster string) bo // +-------------+-------------+-------------+ // | Present | Present | Not Allowed | // +-------------+-------------+-------------+ -// func (rf *ResourcesFilter) IsExcludedResource(apiGroup, kind, cluster string) bool { // if excluded, do not allow if rf.isExcludedResource(apiGroup, kind, cluster) { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go b/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go index e01e950ca2e..ad4238eb122 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/settings/settings.go @@ -17,7 +17,6 @@ import ( "sync" "time" - timeutil "github.com/argoproj/pkg/time" log "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" apierr "k8s.io/apimachinery/pkg/api/errors" @@ -39,6 +38,8 @@ import ( "github.com/argoproj/argo-cd/v2/util/kube" "github.com/argoproj/argo-cd/v2/util/password" tlsutil "github.com/argoproj/argo-cd/v2/util/tls" + enginecache "github.com/argoproj/gitops-engine/pkg/cache" + timeutil "github.com/argoproj/pkg/time" ) // ArgoCDSettings holds in-memory runtime configuration options. @@ -71,6 +72,10 @@ type ArgoCDSettings struct { WebhookBitbucketServerSecret string `json:"webhookBitbucketServerSecret,omitempty"` // WebhookGogsSecret holds the shared secret for authenticating Gogs webhook events WebhookGogsSecret string `json:"webhookGogsSecret,omitempty"` + // WebhookAzureDevOpsUsername holds the username for authenticating Azure DevOps webhook events + WebhookAzureDevOpsUsername string `json:"webhookAzureDevOpsUsername,omitempty"` + // WebhookAzureDevOpsPassword holds the password for authenticating Azure DevOps webhook events + WebhookAzureDevOpsPassword string `json:"webhookAzureDevOpsPassword,omitempty"` // Secrets holds all secrets in argocd-secret as a map[string]string Secrets map[string]string `json:"secrets,omitempty"` // KustomizeBuildOptions is a string of kustomize build parameters @@ -411,6 +416,12 @@ const ( settingsWebhookBitbucketServerSecretKey = "webhook.bitbucketserver.secret" // settingsWebhookGogsSecret is the key for Gogs webhook secret settingsWebhookGogsSecretKey = "webhook.gogs.secret" + // settingsWebhookAzureDevOpsUsernameKey is the key for Azure DevOps webhook username + settingsWebhookAzureDevOpsUsernameKey = "webhook.azuredevops.username" + // settingsWebhookAzureDevOpsPasswordKey is the key for Azure DevOps webhook password + settingsWebhookAzureDevOpsPasswordKey = "webhook.azuredevops.password" + // settingsWebhookMaxPayloadSize is the key for the maximum payload size for webhooks in MB + settingsWebhookMaxPayloadSizeMB = "webhook.maxPayloadSizeMB" // settingsApplicationInstanceLabelKey is the key to configure injected app instance label key settingsApplicationInstanceLabelKey = "application.instanceLabelKey" // settingsResourceTrackingMethodKey is the key to configure tracking method for application resources @@ -482,16 +493,23 @@ const ( // ResourceDeepLinks is the resource deep link key ResourceDeepLinks = "resource.links" extensionConfig = "extension.config" + // RespectRBAC is the key to configure argocd to respect rbac while watching for resources + RespectRBAC = "resource.respectRBAC" + RespectRBACValueStrict = "strict" + RespectRBACValueNormal = "normal" ) -var ( - sourceTypeToEnableGenerationKey = map[v1alpha1.ApplicationSourceType]string{ - v1alpha1.ApplicationSourceTypeKustomize: "kustomize.enable", - v1alpha1.ApplicationSourceTypeHelm: "helm.enable", - v1alpha1.ApplicationSourceTypeDirectory: "jsonnet.enable", - } +const ( + // default max webhook payload size is 1GB + defaultMaxWebhookPayloadSize = int64(1) * 1024 * 1024 * 1024 ) +var sourceTypeToEnableGenerationKey = map[v1alpha1.ApplicationSourceType]string{ + v1alpha1.ApplicationSourceTypeKustomize: "kustomize.enable", + v1alpha1.ApplicationSourceTypeHelm: "helm.enable", + v1alpha1.ApplicationSourceTypeDirectory: "jsonnet.enable", +} + // SettingsManager holds config info for a new manager with which to access Kubernetes ConfigMaps. type SettingsManager struct { ctx context.Context @@ -545,6 +563,24 @@ func (mgr *SettingsManager) onRepoOrClusterChanged() { } } +func (mgr *SettingsManager) RespectRBAC() (int, error) { + cm, err := mgr.getConfigMap() + if err != nil { + return enginecache.RespectRbacDisabled, err + } + if cm.Data[RespectRBAC] != "" { + switch cm.Data[RespectRBAC] { + case RespectRBACValueNormal: + return enginecache.RespectRbacNormal, nil + case RespectRBACValueStrict: + return enginecache.RespectRbacStrict, nil + default: + return enginecache.RespectRbacDisabled, fmt.Errorf("invalid value for %s: %s", RespectRBAC, cm.Data[RespectRBAC]) + } + } + return enginecache.RespectRbacDisabled, nil +} + func (mgr *SettingsManager) GetSecretsLister() (v1listers.SecretLister, error) { err := mgr.ensureSynced(false) if err != nil { @@ -556,7 +592,7 @@ func (mgr *SettingsManager) GetSecretsLister() (v1listers.SecretLister, error) { func (mgr *SettingsManager) GetSecretsInformer() (cache.SharedIndexInformer, error) { err := mgr.ensureSynced(false) if err != nil { - return nil, err + return nil, fmt.Errorf("error ensuring that the secrets manager is synced: %w", err) } return mgr.secretsInformer, nil } @@ -680,14 +716,14 @@ func (mgr *SettingsManager) GetConfigMapByName(configMapName string) (*apiv1.Con func (mgr *SettingsManager) GetResourcesFilter() (*ResourcesFilter, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving argocd-cm: %w", err) } rf := &ResourcesFilter{} if value, ok := argoCDCM.Data[resourceInclusionsKey]; ok { includedResources := make([]FilteredResource, 0) err := yaml.Unmarshal([]byte(value), &includedResources) if err != nil { - return nil, err + return nil, fmt.Errorf("error unmarshalling included resources %w", err) } rf.ResourceInclusions = includedResources } @@ -696,7 +732,7 @@ func (mgr *SettingsManager) GetResourcesFilter() (*ResourcesFilter, error) { excludedResources := make([]FilteredResource, 0) err := yaml.Unmarshal([]byte(value), &excludedResources) if err != nil { - return nil, err + return nil, fmt.Errorf("error unmarshalling excluded resources %w", err) } rf.ResourceExclusions = excludedResources } @@ -751,13 +787,13 @@ func (mgr *SettingsManager) GetServerRBACLogEnforceEnable() (bool, error) { func (mgr *SettingsManager) GetDeepLinks(deeplinkType string) ([]DeepLink, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving argocd-cm: %w", err) } deepLinks := make([]DeepLink, 0) if value, ok := argoCDCM.Data[deeplinkType]; ok { err := yaml.Unmarshal([]byte(value), &deepLinks) if err != nil { - return nil, err + return nil, fmt.Errorf("error unmarshalling deep links %w", err) } } return deepLinks, nil @@ -820,7 +856,7 @@ func (mgr *SettingsManager) GetIgnoreResourceUpdatesOverrides() (map[string]v1al func (mgr *SettingsManager) GetIsIgnoreResourceUpdatesEnabled() (bool, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return false, err + return false, fmt.Errorf("error retrieving config map: %w", err) } if argoCDCM.Data[resourceIgnoreResourceUpdatesEnabledKey] == "" { @@ -834,7 +870,7 @@ func (mgr *SettingsManager) GetIsIgnoreResourceUpdatesEnabled() (bool, error) { func (mgr *SettingsManager) GetResourceOverrides() (map[string]v1alpha1.ResourceOverride, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving config map: %w", err) } resourceOverrides := map[string]v1alpha1.ResourceOverride{} if value, ok := argoCDCM.Data[resourceCustomizationsKey]; ok && value != "" { @@ -1028,7 +1064,7 @@ func (mgr *SettingsManager) GetHelmSettings() (*v1alpha1.HelmOptions, error) { func (mgr *SettingsManager) GetKustomizeSettings() (*KustomizeSettings, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving argocd-cm: %w", err) } kustomizeVersionsMap := map[string]KustomizeVersion{} buildOptions := map[string]string{} @@ -1088,14 +1124,14 @@ func addKustomizeVersion(prefix, name, path string, kvMap map[string]KustomizeVe func (mgr *SettingsManager) GetHelmRepositories() ([]HelmRepoCredentials, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving config map: %w", err) } helmRepositories := make([]HelmRepoCredentials, 0) helmRepositoriesStr := argoCDCM.Data[helmRepositoriesKey] if helmRepositoriesStr != "" { err := yaml.Unmarshal([]byte(helmRepositoriesStr), &helmRepositories) if err != nil { - return nil, err + return nil, fmt.Errorf("error unmarshalling helm repositories: %w", err) } } return helmRepositories, nil @@ -1173,7 +1209,7 @@ func (mgr *SettingsManager) GetRepositoryCredentials() ([]RepositoryCredentials, // Get the config map outside of the lock argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving config map: %w", err) } mgr.mutex.Lock() @@ -1194,7 +1230,7 @@ func (mgr *SettingsManager) GetRepositoryCredentials() ([]RepositoryCredentials, func (mgr *SettingsManager) GetGoogleAnalytics() (*GoogleAnalytics, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving config map: %w", err) } return &GoogleAnalytics{ TrackingID: argoCDCM.Data[gaTrackingID], @@ -1205,7 +1241,7 @@ func (mgr *SettingsManager) GetGoogleAnalytics() (*GoogleAnalytics, error) { func (mgr *SettingsManager) GetHelp() (*Help, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving config map: %w", err) } chatText, ok := argoCDCM.Data[helpChatText] if !ok { @@ -1230,15 +1266,15 @@ func (mgr *SettingsManager) GetSettings() (*ArgoCDSettings, error) { } argoCDCM, err := mgr.configmaps.ConfigMaps(mgr.namespace).Get(common.ArgoCDConfigMapName) if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving argocd-cm: %w", err) } argoCDSecret, err := mgr.secrets.Secrets(mgr.namespace).Get(common.ArgoCDSecretName) if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving argocd-secret: %w", err) } selector, err := labels.Parse(partOfArgoCDSelector) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing Argo CD selector %w", err) } secrets, err := mgr.secrets.Secrets(mgr.namespace).List(selector) if err != nil { @@ -1457,6 +1493,12 @@ func (mgr *SettingsManager) updateSettingsFromSecret(settings *ArgoCDSettings, a if gogsWebhookSecret := argoCDSecret.Data[settingsWebhookGogsSecretKey]; len(gogsWebhookSecret) > 0 { settings.WebhookGogsSecret = string(gogsWebhookSecret) } + if azureDevOpsUsername := argoCDSecret.Data[settingsWebhookAzureDevOpsUsernameKey]; len(azureDevOpsUsername) > 0 { + settings.WebhookAzureDevOpsUsername = string(azureDevOpsUsername) + } + if azureDevOpsPassword := argoCDSecret.Data[settingsWebhookAzureDevOpsPasswordKey]; len(azureDevOpsPassword) > 0 { + settings.WebhookAzureDevOpsPassword = string(azureDevOpsPassword) + } // The TLS certificate may be externally managed. We try to load it from an // external secret first. If the external secret doesn't exist, we either @@ -1576,6 +1618,12 @@ func (mgr *SettingsManager) SaveSettings(settings *ArgoCDSettings) error { if settings.WebhookGogsSecret != "" { argoCDSecret.Data[settingsWebhookGogsSecretKey] = []byte(settings.WebhookGogsSecret) } + if settings.WebhookAzureDevOpsUsername != "" { + argoCDSecret.Data[settingsWebhookAzureDevOpsUsernameKey] = []byte(settings.WebhookAzureDevOpsUsername) + } + if settings.WebhookAzureDevOpsPassword != "" { + argoCDSecret.Data[settingsWebhookAzureDevOpsPasswordKey] = []byte(settings.WebhookAzureDevOpsPassword) + } // we only write the certificate to the secret if it's not externally // managed. if settings.Certificate != nil && !settings.CertificateIsExternal { @@ -1957,7 +2005,7 @@ func (mgr *SettingsManager) InitializeSettings(insecureModeEnabled bool) (*ArgoC // set JWT signature signature, err := util.MakeSignature(32) if err != nil { - return nil, err + return nil, fmt.Errorf("error setting JWT signature: %w", err) } cdSettings.ServerSignature = signature log.Info("Initialized server signature") @@ -2087,14 +2135,14 @@ func ReplaceStringSecret(val string, secretValues map[string]string) string { func (mgr *SettingsManager) GetGlobalProjectsSettings() ([]GlobalProjectSettings, error) { argoCDCM, err := mgr.getConfigMap() if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving argocd-cm: %w", err) } globalProjectSettings := make([]GlobalProjectSettings, 0) if value, ok := argoCDCM.Data[globalProjectsKey]; ok { if value != "" { err := yaml.Unmarshal([]byte(value), &globalProjectSettings) if err != nil { - return nil, err + return nil, fmt.Errorf("error unmarshalling global project settings: %w", err) } } } @@ -2116,3 +2164,22 @@ func (mgr *SettingsManager) GetResourceCustomLabels() ([]string, error) { } return []string{}, nil } + +func (mgr *SettingsManager) GetMaxWebhookPayloadSize() int64 { + argoCDCM, err := mgr.getConfigMap() + if err != nil { + return defaultMaxWebhookPayloadSize + } + + if argoCDCM.Data[settingsWebhookMaxPayloadSizeMB] == "" { + return defaultMaxWebhookPayloadSize + } + + maxPayloadSizeMB, err := strconv.ParseInt(argoCDCM.Data[settingsWebhookMaxPayloadSizeMB], 10, 64) + if err != nil { + log.Warnf("Failed to parse '%s' key: %v", settingsWebhookMaxPayloadSizeMB, err) + return defaultMaxWebhookPayloadSize + } + + return maxPayloadSizeMB * 1024 * 1024 +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go b/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go index d963eed55ce..5e18c8eb75c 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/tls/tls.go @@ -123,11 +123,11 @@ func tlsVersionsToStr(versions []uint16) []string { func getTLSConfigCustomizer(minVersionStr, maxVersionStr, tlsCiphersStr string) (ConfigCustomizer, error) { minVersion, err := getTLSVersionByString(minVersionStr) if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving TLS version by min version %q: %w", minVersionStr, err) } maxVersion, err := getTLSVersionByString(maxVersionStr) if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving TLS version by max version %q: %w", maxVersionStr, err) } if minVersion > maxVersion { return nil, fmt.Errorf("Minimum TLS version %s must not be higher than maximum TLS version %s", minVersionStr, maxVersionStr) @@ -153,7 +153,7 @@ func getTLSConfigCustomizer(minVersionStr, maxVersionStr, tlsCiphersStr string) if tlsCiphersStr != "" { cipherSuites, err = getTLSCipherSuitesByString(tlsCiphersStr) if err != nil { - return nil, err + return nil, fmt.Errorf("error retrieving TLS cipher suites: %w", err) } } else { cipherSuites = make([]uint16, 0) @@ -309,7 +309,7 @@ func generatePEM(opts CertOptions) ([]byte, []byte, error) { func GenerateX509KeyPair(opts CertOptions) (*tls.Certificate, error) { certpem, keypem, err := generatePEM(opts) if err != nil { - return nil, err + return nil, fmt.Errorf("error generating X509 key pair: %w", err) } cert, err := tls.X509KeyPair(certpem, keypem) if err != nil { @@ -420,7 +420,7 @@ func CreateServerTLSConfig(tlsCertPath, tlsKeyPath string, hosts []string) (*tls IsCA: false, }) if err != nil { - return nil, err + return nil, fmt.Errorf("error generating X509 key pair: %w", err) } cert = c } else { diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go new file mode 100644 index 00000000000..70a365c7a12 --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/cluster.go @@ -0,0 +1,1185 @@ +package cache + +import ( + "context" + "fmt" + "runtime/debug" + "sort" + "strings" + "sync" + "time" + + "github.com/go-logr/logr" + "golang.org/x/sync/semaphore" + authorizationv1 "k8s.io/api/authorization/v1" + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/managedfields" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + authType1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/pager" + watchutil "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2/klogr" + "k8s.io/kubectl/pkg/util/openapi" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/argoproj/gitops-engine/pkg/utils/tracing" +) + +const ( + watchResourcesRetryTimeout = 1 * time.Second + ClusterRetryTimeout = 10 * time.Second + + // default duration before we invalidate entire cluster cache. Can be set to 0 to never invalidate cache + defaultClusterResyncTimeout = 24 * time.Hour + + // default duration before restarting individual resource watch + defaultWatchResyncTimeout = 10 * time.Minute + + // Same page size as in k8s.io/client-go/tools/pager/pager.go + defaultListPageSize = 500 + // Prefetch only a single page + defaultListPageBufferSize = 1 + // listSemaphore is used to limit the number of concurrent memory consuming operations on the + // k8s list queries results. + // Limit is required to avoid memory spikes during cache initialization. + // The default limit of 50 is chosen based on experiments. + defaultListSemaphoreWeight = 50 +) + +const ( + // RespectRbacDisabled default value for respectRbac + RespectRbacDisabled = iota + // RespectRbacNormal checks only api response for forbidden/unauthorized errors + RespectRbacNormal + // RespectRbacStrict checks both api response for forbidden/unauthorized errors and SelfSubjectAccessReview + RespectRbacStrict +) + +type apiMeta struct { + namespaced bool + watchCancel context.CancelFunc +} + +// ClusterInfo holds cluster cache stats +type ClusterInfo struct { + // Server holds cluster API server URL + Server string + // K8SVersion holds Kubernetes version + K8SVersion string + // ResourcesCount holds number of observed Kubernetes resources + ResourcesCount int + // APIsCount holds number of observed Kubernetes API count + APIsCount int + // LastCacheSyncTime holds time of most recent cache synchronization + LastCacheSyncTime *time.Time + // SyncError holds most recent cache synchronization error + SyncError error + // APIResources holds list of API resources supported by the cluster + APIResources []kube.APIResourceInfo +} + +// OnEventHandler is a function that handles Kubernetes event +type OnEventHandler func(event watch.EventType, un *unstructured.Unstructured) + +// OnPopulateResourceInfoHandler returns additional resource metadata that should be stored in cache +type OnPopulateResourceInfoHandler func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) + +// OnResourceUpdatedHandler handlers resource update event +type OnResourceUpdatedHandler func(newRes *Resource, oldRes *Resource, namespaceResources map[kube.ResourceKey]*Resource) +type Unsubscribe func() + +type ClusterCache interface { + // EnsureSynced checks cache state and synchronizes it if necessary + EnsureSynced() error + // GetServerVersion returns observed cluster version + GetServerVersion() string + // GetAPIResources returns information about observed API resources + GetAPIResources() []kube.APIResourceInfo + // GetOpenAPISchema returns open API schema of supported API resources + GetOpenAPISchema() openapi.Resources + // GetGVKParser returns a parser able to build a TypedValue used in + // structured merge diffs. + GetGVKParser() *managedfields.GvkParser + // Invalidate cache and executes callback that optionally might update cache settings + Invalidate(opts ...UpdateSettingsFunc) + // FindResources returns resources that matches given list of predicates from specified namespace or everywhere if specified namespace is empty + FindResources(namespace string, predicates ...func(r *Resource) bool) map[kube.ResourceKey]*Resource + // IterateHierarchy iterates resource tree starting from the specified top level resource and executes callback for each resource in the tree. + // The action callback returns true if iteration should continue and false otherwise. + IterateHierarchy(key kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) + // IsNamespaced answers if specified group/kind is a namespaced resource API or not + IsNamespaced(gk schema.GroupKind) (bool, error) + // GetManagedLiveObjs helps finding matching live K8S resources for a given resources list. + // The function returns all resources from cache for those `isManaged` function returns true and resources + // specified in targetObjs list. + GetManagedLiveObjs(targetObjs []*unstructured.Unstructured, isManaged func(r *Resource) bool) (map[kube.ResourceKey]*unstructured.Unstructured, error) + // GetClusterInfo returns cluster cache statistics + GetClusterInfo() ClusterInfo + // OnResourceUpdated register event handler that is executed every time when resource get's updated in the cache + OnResourceUpdated(handler OnResourceUpdatedHandler) Unsubscribe + // OnEvent register event handler that is executed every time when new K8S event received + OnEvent(handler OnEventHandler) Unsubscribe +} + +type WeightedSemaphore interface { + Acquire(ctx context.Context, n int64) error + TryAcquire(n int64) bool + Release(n int64) +} + +type ListRetryFunc func(err error) bool + +// NewClusterCache creates new instance of cluster cache +func NewClusterCache(config *rest.Config, opts ...UpdateSettingsFunc) *clusterCache { + log := klogr.New() + cache := &clusterCache{ + settings: Settings{ResourceHealthOverride: &noopSettings{}, ResourcesFilter: &noopSettings{}}, + apisMeta: make(map[schema.GroupKind]*apiMeta), + listPageSize: defaultListPageSize, + listPageBufferSize: defaultListPageBufferSize, + listSemaphore: semaphore.NewWeighted(defaultListSemaphoreWeight), + resources: make(map[kube.ResourceKey]*Resource), + nsIndex: make(map[string]map[kube.ResourceKey]*Resource), + config: config, + kubectl: &kube.KubectlCmd{ + Log: log, + Tracer: tracing.NopTracer{}, + }, + syncStatus: clusterCacheSync{ + resyncTimeout: defaultClusterResyncTimeout, + syncTime: nil, + }, + watchResyncTimeout: defaultWatchResyncTimeout, + clusterSyncRetryTimeout: ClusterRetryTimeout, + resourceUpdatedHandlers: map[uint64]OnResourceUpdatedHandler{}, + eventHandlers: map[uint64]OnEventHandler{}, + log: log, + listRetryLimit: 1, + listRetryUseBackoff: false, + listRetryFunc: ListRetryFuncNever, + } + for i := range opts { + opts[i](cache) + } + return cache +} + +type clusterCache struct { + syncStatus clusterCacheSync + + apisMeta map[schema.GroupKind]*apiMeta + serverVersion string + apiResources []kube.APIResourceInfo + // namespacedResources is a simple map which indicates a groupKind is namespaced + namespacedResources map[schema.GroupKind]bool + + // maximum time we allow watches to run before relisting the group/kind and restarting the watch + watchResyncTimeout time.Duration + // sync retry timeout for cluster when sync error happens + clusterSyncRetryTimeout time.Duration + + // size of a page for list operations pager. + listPageSize int64 + // number of pages to prefetch for list pager. + listPageBufferSize int32 + listSemaphore WeightedSemaphore + + // retry options for list operations + listRetryLimit int32 + listRetryUseBackoff bool + listRetryFunc ListRetryFunc + + // lock is a rw lock which protects the fields of clusterInfo + lock sync.RWMutex + resources map[kube.ResourceKey]*Resource + nsIndex map[string]map[kube.ResourceKey]*Resource + + kubectl kube.Kubectl + log logr.Logger + config *rest.Config + namespaces []string + clusterResources bool + settings Settings + + handlersLock sync.Mutex + handlerKey uint64 + populateResourceInfoHandler OnPopulateResourceInfoHandler + resourceUpdatedHandlers map[uint64]OnResourceUpdatedHandler + eventHandlers map[uint64]OnEventHandler + openAPISchema openapi.Resources + gvkParser *managedfields.GvkParser + + respectRBAC int +} + +type clusterCacheSync struct { + // When using this struct: + // 1) 'lock' mutex should be acquired when reading/writing from fields of this struct. + // 2) The parent 'clusterCache.lock' does NOT need to be owned to r/w from fields of this struct (if it is owned, that is fine, but see below) + // 3) To prevent deadlocks, do not acquire parent 'clusterCache.lock' after acquiring this lock; if you need both locks, always acquire the parent lock first + lock sync.Mutex + syncTime *time.Time + syncError error + resyncTimeout time.Duration +} + +// ListRetryFuncNever never retries on errors +func ListRetryFuncNever(err error) bool { + return false +} + +// ListRetryFuncAlways always retries on errors +func ListRetryFuncAlways(err error) bool { + return true +} + +// OnResourceUpdated register event handler that is executed every time when resource get's updated in the cache +func (c *clusterCache) OnResourceUpdated(handler OnResourceUpdatedHandler) Unsubscribe { + c.handlersLock.Lock() + defer c.handlersLock.Unlock() + key := c.handlerKey + c.handlerKey++ + c.resourceUpdatedHandlers[key] = handler + return func() { + c.handlersLock.Lock() + defer c.handlersLock.Unlock() + delete(c.resourceUpdatedHandlers, key) + } +} + +func (c *clusterCache) getResourceUpdatedHandlers() []OnResourceUpdatedHandler { + c.handlersLock.Lock() + defer c.handlersLock.Unlock() + var handlers []OnResourceUpdatedHandler + for _, h := range c.resourceUpdatedHandlers { + handlers = append(handlers, h) + } + return handlers +} + +// OnEvent register event handler that is executed every time when new K8S event received +func (c *clusterCache) OnEvent(handler OnEventHandler) Unsubscribe { + c.handlersLock.Lock() + defer c.handlersLock.Unlock() + key := c.handlerKey + c.handlerKey++ + c.eventHandlers[key] = handler + return func() { + c.handlersLock.Lock() + defer c.handlersLock.Unlock() + delete(c.eventHandlers, key) + } +} + +func (c *clusterCache) getEventHandlers() []OnEventHandler { + c.handlersLock.Lock() + defer c.handlersLock.Unlock() + handlers := make([]OnEventHandler, 0, len(c.eventHandlers)) + for _, h := range c.eventHandlers { + handlers = append(handlers, h) + } + return handlers +} + +// GetServerVersion returns observed cluster version +func (c *clusterCache) GetServerVersion() string { + return c.serverVersion +} + +// GetAPIResources returns information about observed API resources +func (c *clusterCache) GetAPIResources() []kube.APIResourceInfo { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.apiResources +} + +// GetOpenAPISchema returns open API schema of supported API resources +func (c *clusterCache) GetOpenAPISchema() openapi.Resources { + return c.openAPISchema +} + +// GetGVKParser returns a parser able to build a TypedValue used in +// structured merge diffs. +func (c *clusterCache) GetGVKParser() *managedfields.GvkParser { + return c.gvkParser +} + +func (c *clusterCache) appendAPIResource(info kube.APIResourceInfo) { + exists := false + for i := range c.apiResources { + if c.apiResources[i].GroupKind == info.GroupKind && c.apiResources[i].GroupVersionResource.Version == info.GroupVersionResource.Version { + exists = true + break + } + } + if !exists { + c.apiResources = append(c.apiResources, info) + } +} + +func (c *clusterCache) deleteAPIResource(info kube.APIResourceInfo) { + for i := range c.apiResources { + if c.apiResources[i].GroupKind == info.GroupKind && c.apiResources[i].GroupVersionResource.Version == info.GroupVersionResource.Version { + c.apiResources[i] = c.apiResources[len(c.apiResources)-1] + c.apiResources = c.apiResources[:len(c.apiResources)-1] + break + } + } +} + +func (c *clusterCache) replaceResourceCache(gk schema.GroupKind, resources []*Resource, ns string) { + objByKey := make(map[kube.ResourceKey]*Resource) + for i := range resources { + objByKey[resources[i].ResourceKey()] = resources[i] + } + + // update existing nodes + for i := range resources { + res := resources[i] + oldRes := c.resources[res.ResourceKey()] + if oldRes == nil || oldRes.ResourceVersion != res.ResourceVersion { + c.onNodeUpdated(oldRes, res) + } + } + + for key := range c.resources { + if key.Kind != gk.Kind || key.Group != gk.Group || ns != "" && key.Namespace != ns { + continue + } + + if _, ok := objByKey[key]; !ok { + c.onNodeRemoved(key) + } + } +} + +func (c *clusterCache) newResource(un *unstructured.Unstructured) *Resource { + ownerRefs, isInferredParentOf := c.resolveResourceReferences(un) + + cacheManifest := false + var info interface{} + if c.populateResourceInfoHandler != nil { + info, cacheManifest = c.populateResourceInfoHandler(un, len(ownerRefs) == 0) + } + var creationTimestamp *metav1.Time + ct := un.GetCreationTimestamp() + if !ct.IsZero() { + creationTimestamp = &ct + } + resource := &Resource{ + ResourceVersion: un.GetResourceVersion(), + Ref: kube.GetObjectRef(un), + OwnerRefs: ownerRefs, + Info: info, + CreationTimestamp: creationTimestamp, + isInferredParentOf: isInferredParentOf, + } + if cacheManifest { + resource.Resource = un + } + + return resource +} + +func (c *clusterCache) setNode(n *Resource) { + key := n.ResourceKey() + c.resources[key] = n + ns, ok := c.nsIndex[key.Namespace] + if !ok { + ns = make(map[kube.ResourceKey]*Resource) + c.nsIndex[key.Namespace] = ns + } + ns[key] = n + + // update inferred parent references + if n.isInferredParentOf != nil || mightHaveInferredOwner(n) { + for k, v := range ns { + // update child resource owner references + if n.isInferredParentOf != nil && mightHaveInferredOwner(v) { + v.setOwnerRef(n.toOwnerRef(), n.isInferredParentOf(k)) + } + if mightHaveInferredOwner(n) && v.isInferredParentOf != nil { + n.setOwnerRef(v.toOwnerRef(), v.isInferredParentOf(n.ResourceKey())) + } + } + } +} + +// Invalidate cache and executes callback that optionally might update cache settings +func (c *clusterCache) Invalidate(opts ...UpdateSettingsFunc) { + c.lock.Lock() + defer c.lock.Unlock() + + c.syncStatus.lock.Lock() + c.syncStatus.syncTime = nil + c.syncStatus.lock.Unlock() + + for i := range c.apisMeta { + c.apisMeta[i].watchCancel() + } + for i := range opts { + opts[i](c) + } + c.apisMeta = nil + c.namespacedResources = nil + c.log.Info("Invalidated cluster") +} + +// clusterCacheSync's lock should be held before calling this method +func (syncStatus *clusterCacheSync) synced(clusterRetryTimeout time.Duration) bool { + syncTime := syncStatus.syncTime + + if syncTime == nil { + return false + } + if syncStatus.syncError != nil { + return time.Now().Before(syncTime.Add(clusterRetryTimeout)) + } + if syncStatus.resyncTimeout == 0 { + // cluster resync timeout has been disabled + return true + } + return time.Now().Before(syncTime.Add(syncStatus.resyncTimeout)) +} + +func (c *clusterCache) stopWatching(gk schema.GroupKind, ns string) { + c.lock.Lock() + defer c.lock.Unlock() + if info, ok := c.apisMeta[gk]; ok { + info.watchCancel() + delete(c.apisMeta, gk) + c.replaceResourceCache(gk, nil, ns) + c.log.Info(fmt.Sprintf("Stop watching: %s not found", gk)) + } +} + +// startMissingWatches lists supported cluster resources and start watching for changes unless watch is already running +func (c *clusterCache) startMissingWatches() error { + apis, err := c.kubectl.GetAPIResources(c.config, true, c.settings.ResourcesFilter) + if err != nil { + return err + } + client, err := c.kubectl.NewDynamicClient(c.config) + if err != nil { + return err + } + clientset, err := kubernetes.NewForConfig(c.config) + if err != nil { + return err + } + namespacedResources := make(map[schema.GroupKind]bool) + for i := range apis { + api := apis[i] + namespacedResources[api.GroupKind] = api.Meta.Namespaced + if _, ok := c.apisMeta[api.GroupKind]; !ok { + ctx, cancel := context.WithCancel(context.Background()) + c.apisMeta[api.GroupKind] = &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel} + + err := c.processApi(client, api, func(resClient dynamic.ResourceInterface, ns string) error { + resourceVersion, err := c.loadInitialState(ctx, api, resClient, ns, false) // don't lock here, we are already in a lock before startMissingWatches is called inside watchEvents + if err != nil && c.isRestrictedResource(err) { + keep := false + if c.respectRBAC == RespectRbacStrict { + k, permErr := c.checkPermission(ctx, clientset.AuthorizationV1().SelfSubjectAccessReviews(), api) + if permErr != nil { + return fmt.Errorf("failed to check permissions for resource %s: %w, original error=%v", api.GroupKind.String(), permErr, err.Error()) + } + keep = k + } + // if we are not allowed to list the resource, remove it from the watch list + if !keep { + delete(c.apisMeta, api.GroupKind) + delete(namespacedResources, api.GroupKind) + return nil + } + } + go c.watchEvents(ctx, api, resClient, ns, resourceVersion) + return nil + }) + if err != nil { + return err + } + } + } + c.namespacedResources = namespacedResources + return nil +} + +func runSynced(lock sync.Locker, action func() error) error { + lock.Lock() + defer lock.Unlock() + return action() +} + +// listResources creates list pager and enforces number of concurrent list requests +func (c *clusterCache) listResources(ctx context.Context, resClient dynamic.ResourceInterface, callback func(*pager.ListPager) error) (string, error) { + if err := c.listSemaphore.Acquire(ctx, 1); err != nil { + return "", err + } + defer c.listSemaphore.Release(1) + var retryCount int64 = 0 + resourceVersion := "" + listPager := pager.New(func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + var res *unstructured.UnstructuredList + var listRetry wait.Backoff + + if c.listRetryUseBackoff { + listRetry = retry.DefaultBackoff + } else { + listRetry = retry.DefaultRetry + } + + listRetry.Steps = int(c.listRetryLimit) + err := retry.OnError(listRetry, c.listRetryFunc, func() error { + var ierr error + res, ierr = resClient.List(ctx, opts) + if ierr != nil { + // Log out a retry + if c.listRetryLimit > 1 && c.listRetryFunc(ierr) { + retryCount += 1 + c.log.Info(fmt.Sprintf("Error while listing resources: %v (try %d/%d)", ierr, retryCount, c.listRetryLimit)) + } + return ierr + } + resourceVersion = res.GetResourceVersion() + return nil + }) + return res, err + }) + listPager.PageBufferSize = c.listPageBufferSize + listPager.PageSize = c.listPageSize + + return resourceVersion, callback(listPager) +} + +func (c *clusterCache) loadInitialState(ctx context.Context, api kube.APIResourceInfo, resClient dynamic.ResourceInterface, ns string, lock bool) (string, error) { + return c.listResources(ctx, resClient, func(listPager *pager.ListPager) error { + var items []*Resource + err := listPager.EachListItem(ctx, metav1.ListOptions{}, func(obj runtime.Object) error { + if un, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("object %s/%s has an unexpected type", un.GroupVersionKind().String(), un.GetName()) + } else { + items = append(items, c.newResource(un)) + } + return nil + }) + + if err != nil { + return fmt.Errorf("failed to load initial state of resource %s: %w", api.GroupKind.String(), err) + } + if lock { + return runSynced(&c.lock, func() error { + c.replaceResourceCache(api.GroupKind, items, ns) + return nil + }) + } else { + c.replaceResourceCache(api.GroupKind, items, ns) + return nil + } + }) +} + +func (c *clusterCache) watchEvents(ctx context.Context, api kube.APIResourceInfo, resClient dynamic.ResourceInterface, ns string, resourceVersion string) { + kube.RetryUntilSucceed(ctx, watchResourcesRetryTimeout, fmt.Sprintf("watch %s on %s", api.GroupKind, c.config.Host), c.log, func() (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack()) + } + }() + + // load API initial state if no resource version provided + if resourceVersion == "" { + resourceVersion, err = c.loadInitialState(ctx, api, resClient, ns, true) + if err != nil { + return err + } + } + + w, err := watchutil.NewRetryWatcher(resourceVersion, &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + res, err := resClient.Watch(ctx, options) + if errors.IsNotFound(err) { + c.stopWatching(api.GroupKind, ns) + } + return res, err + }, + }) + if err != nil { + return err + } + + defer func() { + w.Stop() + resourceVersion = "" + }() + + var watchResyncTimeoutCh <-chan time.Time + if c.watchResyncTimeout > 0 { + shouldResync := time.NewTimer(c.watchResyncTimeout) + defer shouldResync.Stop() + watchResyncTimeoutCh = shouldResync.C + } + + for { + select { + // stop watching when parent context got cancelled + case <-ctx.Done(): + return nil + + // re-synchronize API state and restart watch periodically + case <-watchResyncTimeoutCh: + return fmt.Errorf("Resyncing %s on %s due to timeout", api.GroupKind, c.config.Host) + + // re-synchronize API state and restart watch if retry watcher failed to continue watching using provided resource version + case <-w.Done(): + return fmt.Errorf("Watch %s on %s has closed", api.GroupKind, c.config.Host) + + case event, ok := <-w.ResultChan(): + if !ok { + return fmt.Errorf("Watch %s on %s has closed", api.GroupKind, c.config.Host) + } + + obj, ok := event.Object.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("Failed to convert to *unstructured.Unstructured: %v", event.Object) + } + + c.processEvent(event.Type, obj) + if kube.IsCRD(obj) { + var resources []kube.APIResourceInfo + crd := v1.CustomResourceDefinition{} + err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &crd) + if err != nil { + c.log.Error(err, "Failed to extract CRD resources") + } + for _, v := range crd.Spec.Versions { + resources = append(resources, kube.APIResourceInfo{ + GroupKind: schema.GroupKind{ + Group: crd.Spec.Group, Kind: crd.Spec.Names.Kind}, + GroupVersionResource: schema.GroupVersionResource{ + Group: crd.Spec.Group, Version: v.Name, Resource: crd.Spec.Names.Plural}, + Meta: metav1.APIResource{ + Group: crd.Spec.Group, + SingularName: crd.Spec.Names.Singular, + Namespaced: crd.Spec.Scope == v1.NamespaceScoped, + Name: crd.Spec.Names.Plural, + Kind: crd.Spec.Names.Singular, + Version: v.Name, + ShortNames: crd.Spec.Names.ShortNames, + }, + }) + } + + if event.Type == watch.Deleted { + for i := range resources { + c.deleteAPIResource(resources[i]) + } + } else { + // add new CRD's groupkind to c.apigroups + if event.Type == watch.Added { + for i := range resources { + c.appendAPIResource(resources[i]) + } + } + err = runSynced(&c.lock, func() error { + return c.startMissingWatches() + }) + if err != nil { + c.log.Error(err, "Failed to start missing watch") + } + } + err = runSynced(&c.lock, func() error { + openAPISchema, gvkParser, err := c.kubectl.LoadOpenAPISchema(c.config) + if err != nil { + return fmt.Errorf("failed to load open api schema while handling CRD change: %w", err) + } + if gvkParser != nil { + c.gvkParser = gvkParser + } + c.openAPISchema = openAPISchema + return nil + }) + if err != nil { + c.log.Error(err, "Failed to reload open api schema") + } + } + } + } + }) +} + +func (c *clusterCache) processApi(client dynamic.Interface, api kube.APIResourceInfo, callback func(resClient dynamic.ResourceInterface, ns string) error) error { + resClient := client.Resource(api.GroupVersionResource) + switch { + // if manage whole cluster or resource is cluster level and cluster resources enabled + case len(c.namespaces) == 0 || (!api.Meta.Namespaced && c.clusterResources): + return callback(resClient, "") + // if manage some namespaces and resource is namespaced + case len(c.namespaces) != 0 && api.Meta.Namespaced: + for _, ns := range c.namespaces { + err := callback(resClient.Namespace(ns), ns) + if err != nil { + return err + } + } + } + + return nil +} + +// isRestrictedResource checks if the kube api call is unauthorized or forbidden +func (c *clusterCache) isRestrictedResource(err error) bool { + return c.respectRBAC != RespectRbacDisabled && (k8sErrors.IsForbidden(err) || k8sErrors.IsUnauthorized(err)) +} + +// checkPermission runs a self subject access review to check if the controller has permissions to list the resource +func (c *clusterCache) checkPermission(ctx context.Context, reviewInterface authType1.SelfSubjectAccessReviewInterface, api kube.APIResourceInfo) (keep bool, err error) { + sar := &authorizationv1.SelfSubjectAccessReview{ + Spec: authorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Namespace: "*", + Verb: "list", // uses list verb to check for permissions + Resource: api.GroupVersionResource.Resource, + }, + }, + } + + switch { + // if manage whole cluster or resource is cluster level and cluster resources enabled + case len(c.namespaces) == 0 || (!api.Meta.Namespaced && c.clusterResources): + resp, err := reviewInterface.Create(ctx, sar, metav1.CreateOptions{}) + if err != nil { + return false, err + } + if resp != nil && resp.Status.Allowed { + return true, nil + } + // unsupported, remove from watch list + return false, nil + // if manage some namespaces and resource is namespaced + case len(c.namespaces) != 0 && api.Meta.Namespaced: + for _, ns := range c.namespaces { + sar.Spec.ResourceAttributes.Namespace = ns + resp, err := reviewInterface.Create(ctx, sar, metav1.CreateOptions{}) + if err != nil { + return false, err + } + if resp != nil && resp.Status.Allowed { + return true, nil + } else { + // unsupported, remove from watch list + return false, nil + } + } + } + // checkPermission follows the same logic of determining namespace/cluster resource as the processApi function + // so if neither of the cases match it means the controller will not watch for it so it is safe to return true. + return true, nil +} + +func (c *clusterCache) sync() error { + c.log.Info("Start syncing cluster") + + for i := range c.apisMeta { + c.apisMeta[i].watchCancel() + } + c.apisMeta = make(map[schema.GroupKind]*apiMeta) + c.resources = make(map[kube.ResourceKey]*Resource) + c.namespacedResources = make(map[schema.GroupKind]bool) + config := c.config + version, err := c.kubectl.GetServerVersion(config) + + if err != nil { + return err + } + c.serverVersion = version + apiResources, err := c.kubectl.GetAPIResources(config, false, NewNoopSettings()) + if err != nil { + return err + } + c.apiResources = apiResources + + openAPISchema, gvkParser, err := c.kubectl.LoadOpenAPISchema(config) + if err != nil { + return fmt.Errorf("failed to load open api schema while syncing cluster cache: %w", err) + } + + if gvkParser != nil { + c.gvkParser = gvkParser + } + + c.openAPISchema = openAPISchema + + apis, err := c.kubectl.GetAPIResources(c.config, true, c.settings.ResourcesFilter) + + if err != nil { + return err + } + client, err := c.kubectl.NewDynamicClient(c.config) + if err != nil { + return err + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return err + } + lock := sync.Mutex{} + err = kube.RunAllAsync(len(apis), func(i int) error { + api := apis[i] + + lock.Lock() + ctx, cancel := context.WithCancel(context.Background()) + info := &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel} + c.apisMeta[api.GroupKind] = info + c.namespacedResources[api.GroupKind] = api.Meta.Namespaced + lock.Unlock() + + return c.processApi(client, api, func(resClient dynamic.ResourceInterface, ns string) error { + resourceVersion, err := c.listResources(ctx, resClient, func(listPager *pager.ListPager) error { + return listPager.EachListItem(context.Background(), metav1.ListOptions{}, func(obj runtime.Object) error { + if un, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("object %s/%s has an unexpected type", un.GroupVersionKind().String(), un.GetName()) + } else { + lock.Lock() + c.setNode(c.newResource(un)) + lock.Unlock() + } + return nil + }) + }) + if err != nil { + if c.isRestrictedResource(err) { + keep := false + if c.respectRBAC == RespectRbacStrict { + k, permErr := c.checkPermission(ctx, clientset.AuthorizationV1().SelfSubjectAccessReviews(), api) + if permErr != nil { + return fmt.Errorf("failed to check permissions for resource %s: %w, original error=%v", api.GroupKind.String(), permErr, err.Error()) + } + keep = k + } + // if we are not allowed to list the resource, remove it from the watch list + if !keep { + lock.Lock() + delete(c.apisMeta, api.GroupKind) + delete(c.namespacedResources, api.GroupKind) + lock.Unlock() + return nil + } + } + return fmt.Errorf("failed to load initial state of resource %s: %w", api.GroupKind.String(), err) + } + + go c.watchEvents(ctx, api, resClient, ns, resourceVersion) + + return nil + }) + }) + + if err != nil { + return fmt.Errorf("failed to sync cluster %s: %v", c.config.Host, err) + } + + c.log.Info("Cluster successfully synced") + return nil +} + +// EnsureSynced checks cache state and synchronizes it if necessary +func (c *clusterCache) EnsureSynced() error { + syncStatus := &c.syncStatus + + // first check if cluster is synced *without acquiring the full clusterCache lock* + syncStatus.lock.Lock() + if syncStatus.synced(c.clusterSyncRetryTimeout) { + syncError := syncStatus.syncError + syncStatus.lock.Unlock() + return syncError + } + syncStatus.lock.Unlock() // release the lock, so that we can acquire the parent lock (see struct comment re: lock acquisition ordering) + + c.lock.Lock() + defer c.lock.Unlock() + syncStatus.lock.Lock() + defer syncStatus.lock.Unlock() + + // before doing any work, check once again now that we have the lock, to see if it got + // synced between the first check and now + if syncStatus.synced(c.clusterSyncRetryTimeout) { + return syncStatus.syncError + } + err := c.sync() + syncTime := time.Now() + syncStatus.syncTime = &syncTime + syncStatus.syncError = err + return syncStatus.syncError +} + +func (c *clusterCache) FindResources(namespace string, predicates ...func(r *Resource) bool) map[kube.ResourceKey]*Resource { + c.lock.RLock() + defer c.lock.RUnlock() + result := map[kube.ResourceKey]*Resource{} + resources := map[kube.ResourceKey]*Resource{} + if namespace != "" { + if ns, ok := c.nsIndex[namespace]; ok { + resources = ns + } + } else { + resources = c.resources + } + + for k := range resources { + r := resources[k] + matches := true + for i := range predicates { + if !predicates[i](r) { + matches = false + break + } + } + + if matches { + result[k] = r + } + } + return result +} + +// IterateHierarchy iterates resource tree starting from the specified top level resource and executes callback for each resource in the tree +func (c *clusterCache) IterateHierarchy(key kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if res, ok := c.resources[key]; ok { + nsNodes := c.nsIndex[key.Namespace] + if !action(res, nsNodes) { + return + } + childrenByUID := make(map[types.UID][]*Resource) + for _, child := range nsNodes { + if res.isParentOf(child) { + childrenByUID[child.Ref.UID] = append(childrenByUID[child.Ref.UID], child) + } + } + // make sure children has no duplicates + for _, children := range childrenByUID { + if len(children) > 0 { + // The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group). It is ok to pick any object but we need to make sure + // we pick the same child after every refresh. + sort.Slice(children, func(i, j int) bool { + key1 := children[i].ResourceKey() + key2 := children[j].ResourceKey() + return strings.Compare(key1.String(), key2.String()) < 0 + }) + child := children[0] + if action(child, nsNodes) { + child.iterateChildren(nsNodes, map[kube.ResourceKey]bool{res.ResourceKey(): true}, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool { + if err != nil { + c.log.V(2).Info(err.Error()) + return false + } + return action(child, namespaceResources) + }) + } + } + } + } +} + +// IsNamespaced answers if specified group/kind is a namespaced resource API or not +func (c *clusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) { + if isNamespaced, ok := c.namespacedResources[gk]; ok { + return isNamespaced, nil + } + return false, errors.NewNotFound(schema.GroupResource{Group: gk.Group}, "") +} + +func (c *clusterCache) managesNamespace(namespace string) bool { + for _, ns := range c.namespaces { + if ns == namespace { + return true + } + } + return false +} + +// GetManagedLiveObjs helps finding matching live K8S resources for a given resources list. +// The function returns all resources from cache for those `isManaged` function returns true and resources +// specified in targetObjs list. +func (c *clusterCache) GetManagedLiveObjs(targetObjs []*unstructured.Unstructured, isManaged func(r *Resource) bool) (map[kube.ResourceKey]*unstructured.Unstructured, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + for _, o := range targetObjs { + if len(c.namespaces) > 0 { + if o.GetNamespace() == "" && !c.clusterResources { + return nil, fmt.Errorf("Cluster level %s %q can not be managed when in namespaced mode", o.GetKind(), o.GetName()) + } else if o.GetNamespace() != "" && !c.managesNamespace(o.GetNamespace()) { + return nil, fmt.Errorf("Namespace %q for %s %q is not managed", o.GetNamespace(), o.GetKind(), o.GetName()) + } + } + } + + managedObjs := make(map[kube.ResourceKey]*unstructured.Unstructured) + // iterate all objects in live state cache to find ones associated with app + for key, o := range c.resources { + if isManaged(o) && o.Resource != nil && len(o.OwnerRefs) == 0 { + managedObjs[key] = o.Resource + } + } + // but are simply missing our label + lock := &sync.Mutex{} + err := kube.RunAllAsync(len(targetObjs), func(i int) error { + targetObj := targetObjs[i] + key := kube.GetResourceKey(targetObj) + lock.Lock() + managedObj := managedObjs[key] + lock.Unlock() + + if managedObj == nil { + if existingObj, exists := c.resources[key]; exists { + if existingObj.Resource != nil { + managedObj = existingObj.Resource + } else { + var err error + managedObj, err = c.kubectl.GetResource(context.TODO(), c.config, targetObj.GroupVersionKind(), existingObj.Ref.Name, existingObj.Ref.Namespace) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + } + } else if _, watched := c.apisMeta[key.GroupKind()]; !watched { + var err error + managedObj, err = c.kubectl.GetResource(context.TODO(), c.config, targetObj.GroupVersionKind(), targetObj.GetName(), targetObj.GetNamespace()) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + } + } + + if managedObj != nil { + converted, err := c.kubectl.ConvertToVersion(managedObj, targetObj.GroupVersionKind().Group, targetObj.GroupVersionKind().Version) + if err != nil { + // fallback to loading resource from kubernetes if conversion fails + c.log.V(1).Info(fmt.Sprintf("Failed to convert resource: %v", err)) + managedObj, err = c.kubectl.GetResource(context.TODO(), c.config, targetObj.GroupVersionKind(), managedObj.GetName(), managedObj.GetNamespace()) + if err != nil { + if errors.IsNotFound(err) { + return nil + } + return err + } + } else { + managedObj = converted + } + lock.Lock() + managedObjs[key] = managedObj + lock.Unlock() + } + return nil + }) + if err != nil { + return nil, err + } + + return managedObjs, nil +} + +func (c *clusterCache) processEvent(event watch.EventType, un *unstructured.Unstructured) { + for _, h := range c.getEventHandlers() { + h(event, un) + } + key := kube.GetResourceKey(un) + if event == watch.Modified && skipAppRequeuing(key) { + return + } + + c.lock.Lock() + defer c.lock.Unlock() + existingNode, exists := c.resources[key] + if event == watch.Deleted { + if exists { + c.onNodeRemoved(key) + } + } else if event != watch.Deleted { + c.onNodeUpdated(existingNode, c.newResource(un)) + } +} + +func (c *clusterCache) onNodeUpdated(oldRes *Resource, newRes *Resource) { + c.setNode(newRes) + for _, h := range c.getResourceUpdatedHandlers() { + h(newRes, oldRes, c.nsIndex[newRes.Ref.Namespace]) + } +} + +func (c *clusterCache) onNodeRemoved(key kube.ResourceKey) { + existing, ok := c.resources[key] + if ok { + delete(c.resources, key) + ns, ok := c.nsIndex[key.Namespace] + if ok { + delete(ns, key) + if len(ns) == 0 { + delete(c.nsIndex, key.Namespace) + } + // remove ownership references from children with inferred references + if existing.isInferredParentOf != nil { + for k, v := range ns { + if mightHaveInferredOwner(v) && existing.isInferredParentOf(k) { + v.setOwnerRef(existing.toOwnerRef(), false) + } + } + } + } + for _, h := range c.getResourceUpdatedHandlers() { + h(nil, existing, ns) + } + } +} + +var ( + ignoredRefreshResources = map[string]bool{ + "/" + kube.EndpointsKind: true, + } +) + +// GetClusterInfo returns cluster cache statistics +func (c *clusterCache) GetClusterInfo() ClusterInfo { + c.lock.RLock() + defer c.lock.RUnlock() + c.syncStatus.lock.Lock() + defer c.syncStatus.lock.Unlock() + + return ClusterInfo{ + APIsCount: len(c.apisMeta), + K8SVersion: c.serverVersion, + ResourcesCount: len(c.resources), + Server: c.config.Host, + LastCacheSyncTime: c.syncStatus.syncTime, + SyncError: c.syncStatus.syncError, + APIResources: c.apiResources, + } +} + +// skipAppRequeuing checks if the object is an API type which we want to skip requeuing against. +// We ignore API types which have a high churn rate, and/or whose updates are irrelevant to the app +func skipAppRequeuing(key kube.ResourceKey) bool { + return ignoredRefreshResources[key.Group+"/"+key.Kind] +} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/doc.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/doc.go new file mode 100644 index 00000000000..b45bd352c12 --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/doc.go @@ -0,0 +1,9 @@ +/* +Package cache implements lightweight Kubernetes cluster caching that stores only resource references and ownership +references. In addition to references cache might be configured to store custom metadata and whole body of selected +resources. + +The library uses Kubernetes watch API to maintain cache up to date. This approach reduces number of Kubernetes +API requests and provides instant access to the required Kubernetes resources. +*/ +package cache diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/predicates.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/predicates.go new file mode 100644 index 00000000000..53c05529d73 --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/predicates.go @@ -0,0 +1,14 @@ +package cache + +// TopLevelResource returns true if resource has no parents +func TopLevelResource(r *Resource) bool { + return len(r.OwnerRefs) == 0 +} + +// ResourceOfGroupKind returns predicate that matches resource by specified group and kind +func ResourceOfGroupKind(group string, kind string) func(r *Resource) bool { + return func(r *Resource) bool { + key := r.ResourceKey() + return key.Group == group && key.Kind == kind + } +} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/references.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/references.go new file mode 100644 index 00000000000..21898365557 --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/references.go @@ -0,0 +1,109 @@ +package cache + +import ( + "encoding/json" + "fmt" + "strings" + + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" +) + +// mightHaveInferredOwner returns true of given resource might have inferred owners +func mightHaveInferredOwner(r *Resource) bool { + return r.Ref.GroupVersionKind().Group == "" && r.Ref.Kind == kube.PersistentVolumeClaimKind +} + +func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured) ([]metav1.OwnerReference, func(kube.ResourceKey) bool) { + var isInferredParentOf func(_ kube.ResourceKey) bool + ownerRefs := un.GetOwnerReferences() + gvk := un.GroupVersionKind() + + switch { + + // Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed + case gvk.Group == "" && gvk.Kind == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0: + ownerRefs = append(ownerRefs, metav1.OwnerReference{ + Name: un.GetName(), + Kind: kube.ServiceKind, + APIVersion: "v1", + }) + + // Special case for Operator Lifecycle Manager ClusterServiceVersion: + case un.GroupVersionKind().Group == "operators.coreos.com" && un.GetKind() == "ClusterServiceVersion": + if un.GetAnnotations()["olm.operatorGroup"] != "" { + ownerRefs = append(ownerRefs, metav1.OwnerReference{ + Name: un.GetAnnotations()["olm.operatorGroup"], + Kind: "OperatorGroup", + APIVersion: "operators.coreos.com/v1", + }) + } + + // Edge case: consider auto-created service account tokens as a child of service account objects + case un.GetKind() == kube.SecretKind && un.GroupVersionKind().Group == "": + if yes, ref := isServiceAccountTokenSecret(un); yes { + ownerRefs = append(ownerRefs, ref) + } + + case (un.GroupVersionKind().Group == "apps" || un.GroupVersionKind().Group == "extensions") && un.GetKind() == kube.StatefulSetKind: + if refs, err := isStatefulSetChild(un); err != nil { + c.log.Error(err, fmt.Sprintf("Failed to extract StatefulSet %s/%s PVC references", un.GetNamespace(), un.GetName())) + } else { + isInferredParentOf = refs + } + } + + return ownerRefs, isInferredParentOf +} + +func isStatefulSetChild(un *unstructured.Unstructured) (func(kube.ResourceKey) bool, error) { + sts := v1.StatefulSet{} + data, err := json.Marshal(un) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &sts) + if err != nil { + return nil, err + } + + templates := sts.Spec.VolumeClaimTemplates + return func(key kube.ResourceKey) bool { + if key.Kind == kube.PersistentVolumeClaimKind && key.GroupKind().Group == "" { + for _, templ := range templates { + if strings.HasPrefix(key.Name, fmt.Sprintf("%s-%s-", templ.Name, un.GetName())) { + return true + } + } + } + return false + }, nil +} + +func isServiceAccountTokenSecret(un *unstructured.Unstructured) (bool, metav1.OwnerReference) { + ref := metav1.OwnerReference{ + APIVersion: "v1", + Kind: kube.ServiceAccountKind, + } + + if typeVal, ok, err := unstructured.NestedString(un.Object, "type"); !ok || err != nil || typeVal != "kubernetes.io/service-account-token" { + return false, ref + } + + annotations := un.GetAnnotations() + if annotations == nil { + return false, ref + } + + id, okId := annotations["kubernetes.io/service-account.uid"] + name, okName := annotations["kubernetes.io/service-account.name"] + if okId && okName { + ref.Name = name + ref.UID = types.UID(id) + } + return ref.Name != "" && ref.UID != "", ref +} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go new file mode 100644 index 00000000000..4097f4dcafc --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/resource.go @@ -0,0 +1,101 @@ +package cache + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/argoproj/gitops-engine/pkg/utils/kube" +) + +// Resource holds the information about Kubernetes resource, ownership references and optional information +type Resource struct { + // ResourceVersion holds most recent observed resource version + ResourceVersion string + // Resource reference + Ref v1.ObjectReference + // References to resource owners + OwnerRefs []metav1.OwnerReference + // Optional creation timestamp of the resource + CreationTimestamp *metav1.Time + // Optional additional information about the resource + Info interface{} + // Optional whole resource manifest + Resource *unstructured.Unstructured + + // answers if resource is inferred parent of provided resource + isInferredParentOf func(key kube.ResourceKey) bool +} + +func (r *Resource) ResourceKey() kube.ResourceKey { + return kube.NewResourceKey(r.Ref.GroupVersionKind().Group, r.Ref.Kind, r.Ref.Namespace, r.Ref.Name) +} + +func (r *Resource) isParentOf(child *Resource) bool { + for i, ownerRef := range child.OwnerRefs { + + // backfill UID of inferred owner child references + if ownerRef.UID == "" && r.Ref.Kind == ownerRef.Kind && r.Ref.APIVersion == ownerRef.APIVersion && r.Ref.Name == ownerRef.Name { + ownerRef.UID = r.Ref.UID + child.OwnerRefs[i] = ownerRef + return true + } + + if r.Ref.UID == ownerRef.UID { + return true + } + } + + return false +} + +// setOwnerRef adds or removes specified owner reference +func (r *Resource) setOwnerRef(ref metav1.OwnerReference, add bool) { + index := -1 + for i, item := range r.OwnerRefs { + if item.UID == ref.UID { + index = i + break + } + } + added := index > -1 + if add != added { + if add { + r.OwnerRefs = append(r.OwnerRefs, ref) + } else { + r.OwnerRefs = append(r.OwnerRefs[:index], r.OwnerRefs[index+1:]...) + } + } +} + +func (r *Resource) toOwnerRef() metav1.OwnerReference { + return metav1.OwnerReference{UID: r.Ref.UID, Name: r.Ref.Name, Kind: r.Ref.Kind, APIVersion: r.Ref.APIVersion} +} + +func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey) map[kube.ResourceKey]bool { + newSet := make(map[kube.ResourceKey]bool) + for k, v := range set { + newSet[k] = v + } + for i := range keys { + newSet[keys[i]] = true + } + return newSet +} + +func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents map[kube.ResourceKey]bool, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) { + for childKey, child := range ns { + if r.isParentOf(ns[childKey]) { + if parents[childKey] { + key := r.ResourceKey() + _ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns) + } else { + if action(nil, child, ns) { + child.iterateChildren(ns, newResourceKeySet(parents, r.ResourceKey()), action) + } + } + } + } +} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/cache/settings.go b/vendor/github.com/argoproj/gitops-engine/pkg/cache/settings.go new file mode 100644 index 00000000000..a7194d0ca40 --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/cache/settings.go @@ -0,0 +1,172 @@ +package cache + +import ( + "time" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/rest" + + "github.com/argoproj/gitops-engine/pkg/health" + "github.com/argoproj/gitops-engine/pkg/utils/kube" + "github.com/argoproj/gitops-engine/pkg/utils/tracing" +) + +// NewNoopSettings returns cache settings that has not health customizations and don't filter any resources +func NewNoopSettings() *noopSettings { + return &noopSettings{} +} + +type noopSettings struct { +} + +func (f *noopSettings) GetResourceHealth(_ *unstructured.Unstructured) (*health.HealthStatus, error) { + return nil, nil +} + +func (f *noopSettings) IsExcludedResource(_, _, _ string) bool { + return false +} + +// Settings caching customizations +type Settings struct { + // ResourceHealthOverride contains health assessment overrides + ResourceHealthOverride health.HealthOverride + // ResourcesFilter holds filter that excludes resources + ResourcesFilter kube.ResourceFilter +} + +type UpdateSettingsFunc func(cache *clusterCache) + +// SetKubectl allows to override kubectl wrapper implementation +func SetKubectl(kubectl kube.Kubectl) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.kubectl = kubectl + } +} + +// SetPopulateResourceInfoHandler updates handler that populates resource info +func SetPopulateResourceInfoHandler(handler OnPopulateResourceInfoHandler) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.populateResourceInfoHandler = handler + } +} + +// SetSettings updates caching settings +func SetSettings(settings Settings) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.settings = Settings{settings.ResourceHealthOverride, settings.ResourcesFilter} + } +} + +// SetNamespaces updates list of monitored namespaces +func SetNamespaces(namespaces []string) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.namespaces = namespaces + } +} + +// SetClusterResources specifies if cluster level resource included or not. +// Flag is used only if cluster is changed to namespaced mode using SetNamespaces setting +func SetClusterResources(val bool) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.clusterResources = val + } +} + +// SetConfig updates cluster rest config +func SetConfig(config *rest.Config) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.config = config + } +} + +// SetListPageSize sets the page size for list pager. +func SetListPageSize(listPageSize int64) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.listPageSize = listPageSize + } +} + +// SetListPageBufferSize sets the number of pages to prefetch for list pager. +func SetListPageBufferSize(listPageBufferSize int32) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.listPageBufferSize = listPageBufferSize + } +} + +// SetListSemaphore sets the semaphore for list operations. +// Taking an object rather than a number allows to share a semaphore among multiple caches if necessary. +func SetListSemaphore(listSemaphore WeightedSemaphore) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.listSemaphore = listSemaphore + } +} + +// SetResyncTimeout updates cluster re-sync timeout +func SetResyncTimeout(timeout time.Duration) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.syncStatus.lock.Lock() + defer cache.syncStatus.lock.Unlock() + + cache.syncStatus.resyncTimeout = timeout + } +} + +// SetWatchResyncTimeout updates cluster re-sync timeout +func SetWatchResyncTimeout(timeout time.Duration) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.watchResyncTimeout = timeout + } +} + +// SetClusterSyncRetryTimeout updates cluster sync retry timeout when sync error happens +func SetClusterSyncRetryTimeout(timeout time.Duration) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.clusterSyncRetryTimeout = timeout + } +} + +// SetLogr sets the logger to use. +func SetLogr(log logr.Logger) UpdateSettingsFunc { + return func(cache *clusterCache) { + cache.log = log + if kcmd, ok := cache.kubectl.(*kube.KubectlCmd); ok { + kcmd.Log = log + } + } +} + +// SetTracer sets the tracer to use. +func SetTracer(tracer tracing.Tracer) UpdateSettingsFunc { + return func(cache *clusterCache) { + if kcmd, ok := cache.kubectl.(*kube.KubectlCmd); ok { + kcmd.Tracer = tracer + } + } +} + +// SetRetryOptions sets cluster list retry options +func SetRetryOptions(maxRetries int32, useBackoff bool, retryFunc ListRetryFunc) UpdateSettingsFunc { + return func(cache *clusterCache) { + // Max retries must be at least one + if maxRetries < 1 { + maxRetries = 1 + } + cache.listRetryLimit = maxRetries + cache.listRetryUseBackoff = useBackoff + cache.listRetryFunc = retryFunc + } +} + +// SetRespectRBAC allows to set whether to respect the controller rbac in list/watches +func SetRespectRBAC(respectRBAC int) UpdateSettingsFunc { + return func(cache *clusterCache) { + // if invalid value is provided disable respect rbac + if respectRBAC < RespectRbacDisabled || respectRBAC > RespectRbacStrict { + cache.respectRBAC = RespectRbacDisabled + } else { + cache.respectRBAC = respectRBAC + } + } +} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go index 4a93ef51aab..8918150f052 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go @@ -118,20 +118,6 @@ func isSupportedVerb(apiResource *metav1.APIResource, verb string) bool { return false } -type CreateGVKParserError struct { - err error -} - -func NewCreateGVKParserError(err error) *CreateGVKParserError { - return &CreateGVKParserError{ - err: err, - } -} - -func (e *CreateGVKParserError) Error() string { - return fmt.Sprintf("error creating gvk parser: %s", e.err) -} - // LoadOpenAPISchema will load all existing resource schemas from the cluster // and return: // - openapi.Resources: used for getting the proto.Schema from a GVK @@ -148,17 +134,14 @@ func (k *KubectlCmd) LoadOpenAPISchema(config *rest.Config) (openapi.Resources, if err != nil { return nil, nil, fmt.Errorf("error getting openapi resources: %s", err) } - gvkParser, err := newGVKParser(oapiGetter) + gvkParser, err := k.newGVKParser(oapiGetter) if err != nil { - // return a specific error type to allow gracefully handle - // creating GVK Parser bug: - // https://github.com/kubernetes/kubernetes/issues/103597 - return oapiResources, nil, NewCreateGVKParserError(err) + return oapiResources, nil, fmt.Errorf("error getting gvk parser: %s", err) } return oapiResources, gvkParser, nil } -func newGVKParser(oapiGetter *openapi.CachedOpenAPIGetter) (*managedfields.GvkParser, error) { +func (k *KubectlCmd) newGVKParser(oapiGetter discovery.OpenAPISchemaInterface) (*managedfields.GvkParser, error) { doc, err := oapiGetter.OpenAPISchema() if err != nil { return nil, fmt.Errorf("error getting openapi schema: %s", err) @@ -167,6 +150,11 @@ func newGVKParser(oapiGetter *openapi.CachedOpenAPIGetter) (*managedfields.GvkPa if err != nil { return nil, fmt.Errorf("error getting openapi data: %s", err) } + var taintedGVKs []schema.GroupVersionKind + models, taintedGVKs = newUniqueModels(models) + if len(taintedGVKs) > 0 { + k.Log.Info("Duplicate GVKs detected in OpenAPI schema. This could cause inaccurate diffs.", "gvks", taintedGVKs) + } gvkParser, err := managedfields.NewGVKParser(models, false) if err != nil { return nil, err diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/testdata/openapi_v2.json b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/testdata/openapi_v2.json new file mode 100644 index 00000000000..77b82460721 --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/testdata/openapi_v2.json @@ -0,0 +1,516 @@ +{ + "definitions": { + "additional_properties": [ + { + "name": "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource", + "value": { + "description": "APIResource specifies the name of a resource and whether it is namespaced.", + "required": [ + "name", + "singularName", + "namespaced", + "kind", + "verbs" + ], + "type": { + "value": [ + "object" + ] + }, + "properties": { + "additional_properties": [ + { + "name": "categories", + "value": { + "description": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "type": { + "value": [ + "string" + ] + } + } + ] + }, + "vendor_extension": [ + { + "name": "x-kubernetes-list-type", + "value": { + "yaml": "atomic\n" + } + } + ] + } + }, + { + "name": "group", + "value": { + "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "kind", + "value": { + "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "name", + "value": { + "description": "name is the plural name of the resource.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "namespaced", + "value": { + "description": "namespaced indicates if a resource is namespaced or not.", + "type": { + "value": [ + "boolean" + ] + } + } + }, + { + "name": "shortNames", + "value": { + "description": "shortNames is a list of suggested short names of the resource.", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "type": { + "value": [ + "string" + ] + } + } + ] + }, + "vendor_extension": [ + { + "name": "x-kubernetes-list-type", + "value": { + "yaml": "atomic\n" + } + } + ] + } + }, + { + "name": "singularName", + "value": { + "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "storageVersionHash", + "value": { + "description": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "verbs", + "value": { + "description": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "type": { + "value": [ + "string" + ] + } + } + ] + } + } + }, + { + "name": "version", + "value": { + "description": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "type": { + "value": [ + "string" + ] + } + } + } + ] + } + } + }, + { + "name": "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList", + "value": { + "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "required": [ + "groupVersion", + "resources" + ], + "type": { + "value": [ + "object" + ] + }, + "properties": { + "additional_properties": [ + { + "name": "apiVersion", + "value": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "groupVersion", + "value": { + "description": "groupVersion is the group and version this APIResourceList is for.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "kind", + "value": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "resources", + "value": { + "description": "resources contains the name of the resources and if they are namespaced.", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "_ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource" + } + ] + }, + "vendor_extension": [ + { + "name": "x-kubernetes-list-type", + "value": { + "yaml": "atomic\n" + } + } + ] + } + } + ] + }, + "vendor_extension": [ + { + "name": "x-kubernetes-group-version-kind", + "value": { + "yaml": "- group: \"\"\n kind: APIResourceList\n version: v1\n" + } + } + ] + } + }, + { + "name": "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList_v2", + "value": { + "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "required": [ + "groupVersion", + "resources" + ], + "type": { + "value": [ + "object" + ] + }, + "properties": { + "additional_properties": [ + { + "name": "apiVersion", + "value": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "groupVersion", + "value": { + "description": "groupVersion is the group and version this APIResourceList is for.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "kind", + "value": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "resources", + "value": { + "description": "resources contains the name of the resources and if they are namespaced.", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "_ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource_v2" + } + ] + } + } + } + ] + }, + "vendor_extension": [ + { + "name": "x-kubernetes-group-version-kind", + "value": { + "yaml": "- group: \"\"\n kind: APIResourceList\n version: v1\n" + } + } + ] + } + }, + { + "name": "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource_v2", + "value": { + "description": "APIResource specifies the name of a resource and whether it is namespaced.", + "required": [ + "name", + "singularName", + "namespaced", + "kind", + "verbs" + ], + "type": { + "value": [ + "object" + ] + }, + "properties": { + "additional_properties": [ + { + "name": "categories", + "value": { + "description": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "type": { + "value": [ + "string" + ] + } + } + ] + } + } + }, + { + "name": "group", + "value": { + "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "kind", + "value": { + "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "name", + "value": { + "description": "name is the plural name of the resource.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "namespaced", + "value": { + "description": "namespaced indicates if a resource is namespaced or not.", + "type": { + "value": [ + "boolean" + ] + } + } + }, + { + "name": "shortNames", + "value": { + "description": "shortNames is a list of suggested short names of the resource.", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "type": { + "value": [ + "string" + ] + } + } + ] + } + } + }, + { + "name": "singularName", + "value": { + "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "storageVersionHash", + "value": { + "description": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + "type": { + "value": [ + "string" + ] + } + } + }, + { + "name": "verbs", + "value": { + "description": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "type": { + "value": [ + "array" + ] + }, + "items": { + "schema": [ + { + "type": { + "value": [ + "string" + ] + } + } + ] + } + } + }, + { + "name": "version", + "value": { + "description": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "type": { + "value": [ + "string" + ] + } + } + } + ] + } + } + } + ] + } +} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/uniqueprotomodels.go b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/uniqueprotomodels.go new file mode 100644 index 00000000000..31d2e98e9eb --- /dev/null +++ b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/uniqueprotomodels.go @@ -0,0 +1,190 @@ +package kube + +import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" + "sort" +) + +/** +The upstream Kubernetes NewGVKParser method causes problems for Argo CD. +https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L73 + +The function fails in instances where it is probably more desirable for Argo CD to simply ignore the error and move on. +But since the upstream implementation doesn't offer the option to ignore the error, we have to mutate the input to the +function to completely avoid the case that can produce the error. + +When encountering the error from NewGVKParser, we used to just set the internal GVKParser instance to nil, log the +error as info, and move on. + +But Argo CD increasingly relies on the GVKParser to produce reliable diffs, especially with server-side diffing. And +we're better off with an incorrectly-initialized GVKParser than no GVKParser at all. + +To understand why NewGVKParser fails, we need to understand how Kubernetes constructs its OpenAPI models. + +Kubernetes contains a built-in OpenAPI document containing the `definitions` for every built-in Kubernetes API. This +document includes shared structs like APIResourceList. Some of these definitions include an +x-kubernetes-group-version-kind extension. + +Aggregated APIs produce their own OpenAPI documents, which are merged with the built-in OpenAPI document. The aggregated +API documents generally include all the definitions of all the structs which are used anywhere by the API. This often +includes some of the same structs as the built-in OpenAPI document. + +So when Kubernetes constructs the complete OpenAPI document (the one served at /openapi/v2), it merges the built-in +OpenAPI document with the aggregated API OpenAPI documents. + +When the aggregator encounters two different definitions for the same struct (as determined by a deep compare) with the +same GVK (as determined by the value in the x-kubernetes-group-version-kind extension), it appends a `_vX` suffix to the +definition name in the OpenAPI document (where X is the count of the number of times the aggregator has seen the same +definition). Basically, it's communicating "different APIs have different opinions about the structure of structs with +this GVK, so I'm going to give them different names and let you sort it out." +https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/aggregator/aggregator.go#L238-L279 + +This behavior is fine from the perspective of a typical Kubernetes API user. They download the OpenAPI document, they +see that there are two different "opinions" about the structure of a struct, and they can choose which one they want to +rely on. + +But Argo CD has to be generic. We need to take the provided OpenAPI document and use it to construct a GVKParser. And +the GVKParser (reasonably) rejects the OpenAPI document if it contains two definitions for the same struct. + +So we have to do some work to make the OpenAPI document palatable to the GVKParser. We have to remove the duplicate +definitions. Specifically, we take the first one and log a warning for each subsequent definition with the same GVK. + +In practice, this probably generally appears when a common aggregated API was built at a time significantly before the +current Kubernetes version. The most common case is that the metrics server is built against an older version of the +Kubernetes libraries, using old versions of the structs. When the metrics server is updated to use the latest version of +the Kubernetes libraries, the problems go away, because the aggregated API and Kubernetes agree about the shape of the +struct. + +Using the first encountered definition is imperfect and could result in unreliable diffs. But it's better than +constructing completely-wrong diffs due to the lack of a GVKParser. +*/ + +// uniqueModels is a model provider that ensures that no two models share the same gvk. Use newUniqueModels to +// initialize it and enforce uniqueness. +type uniqueModels struct { + models map[string]proto.Schema +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +// Copied verbatim from here: https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/util/proto/document.go#L322-L326 +func (d *uniqueModels) LookupModel(model string) proto.Schema { + return d.models[model] +} + +// Copied verbatim from here: https://github.com/kubernetes/kube-openapi/blob/b456828f718bab62dc3013d192665eb3d17f8fe9/pkg/util/proto/document.go#L328-L337 +func (d *uniqueModels) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +// newUniqueModels returns a new uniqueModels instance and a list of warnings for models that share the same gvk. +func newUniqueModels(models proto.Models) (proto.Models, []schema.GroupVersionKind) { + var taintedGVKs []schema.GroupVersionKind + gvks := map[schema.GroupVersionKind]string{} + um := &uniqueModels{models: map[string]proto.Schema{}} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic(fmt.Sprintf("ListModels returns a model that can't be looked-up for: %v", modelName)) + } + gvkList := parseGroupVersionKind(model) + gvk, wasProcessed := modelGvkWasAlreadyProcessed(model, gvks) + if !wasProcessed { + um.models[modelName] = model + + // Add GVKs to the map, so we can detect a duplicate GVK later. + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + gvks[gvk] = modelName + } + } + } else { + taintedGVKs = append(taintedGVKs, gvk) + } + } + return um, taintedGVKs +} + +// modelGvkWasAlreadyProcessed inspects a model to determine if it would trigger a duplicate GVK error. The gvks map +// holds the GVKs of all the models that have already been processed. If the model would trigger a duplicate GVK error, +// the function returns the GVK that would trigger the error and true. Otherwise, it returns an empty GVK and false. +func modelGvkWasAlreadyProcessed(model proto.Schema, gvks map[schema.GroupVersionKind]string) (schema.GroupVersionKind, bool) { + gvkList := parseGroupVersionKind(model) + // Not every model has a GVK extension specified. For those models, this loop will be skipped. + for _, gvk := range gvkList { + // The kind length check is copied from managedfields.NewGVKParser. It's unclear what edge case it's handling, + // but the behavior of this function should match NewGVKParser. + if len(gvk.Kind) > 0 { + _, ok := gvks[gvk] + if ok { + // This is the only condition under which NewGVKParser would return a duplicate GVK error. + return gvk, true + } + } + } + return schema.GroupVersionKind{}, false +} + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +// Copied verbatim from: https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L29-L32 +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// parseGroupVersionKind gets and parses GroupVersionKind from the extension. Returns empty if it doesn't have one. +// Copied verbatim from: https://github.com/kubernetes/apimachinery/blob/eb26334eeb0f769be8f0c5665ff34713cfdec83e/pkg/util/managedfields/gvkparser.go#L82-L128 +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go new file mode 100644 index 00000000000..dd950a286fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "github.com/aws/aws-sdk-go/aws" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpire is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(aws.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(aws.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go index 6eda2a5557f..4138e725dde 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -4,13 +4,13 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "fmt" "io/ioutil" "path/filepath" "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -55,6 +55,19 @@ type Provider struct { // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + TokenProvider bearer.TokenProvider } // NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured @@ -89,13 +102,31 @@ func (p *Provider) Retrieve() (credentials.Value, error) { // RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal // by exchanging the accessToken present in ~/.aws/sso/cache. func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - tokenFile, err := loadTokenFile(p.StartURL) - if err != nil { - return credentials.Value{}, err + var accessToken *string + if p.TokenProvider != nil { + token, err := p.TokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return credentials.Value{}, err + } + accessToken = &token.Value + } else { + if p.CachedTokenFilepath == "" { + cachedTokenFilePath, err := getCachedFilePath(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + p.CachedTokenFilepath = cachedTokenFilePath + } + + tokenFile, err := loadTokenFile(p.CachedTokenFilepath) + if err != nil { + return credentials.Value{}, err + } + accessToken = &tokenFile.AccessToken } output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, + AccessToken: accessToken, AccountId: &p.AccountID, RoleName: &p.RoleName, }) @@ -114,32 +145,13 @@ func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Val }, nil } -func getCacheFileName(url string) (string, error) { +func getCachedFilePath(startUrl string) (string, error) { hash := sha1.New() - _, err := hash.Write([]byte(url)) + _, err := hash.Write([]byte(startUrl)) if err != nil { return "", err } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %v", err) - } - - *r = rfc3339(parse) - - return nil + return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil } type token struct { @@ -153,13 +165,8 @@ func (t token) Expired() bool { return nowTime().Round(0).After(time.Time(t.ExpiresAt)) } -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) +func loadTokenFile(cachedTokenPath string) (t token, err error) { + fileBytes, err := ioutil.ReadFile(cachedTokenPath) if err != nil { return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000000..f6fa88451af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,237 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var resolvedOsUserHomeDir = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := resolvedOsUserHomeDir() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type cachedToken struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields +// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t cachedToken) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified +// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t *cachedToken) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %v", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (cachedToken, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err) + } + + var t cachedToken + if err := json.Unmarshal(fileBytes, &t); err != nil { + return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return cachedToken{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %v", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %v", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %v", err) + } + + return nil +} + +type rfc3339 time.Time + +// UnmarshalJSON decode rfc3339 from JSON format +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + var err error + + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + return err +} + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + return rfc3339(parsed), nil +} + +// MarshalJSON encode rfc3339 to JSON format time +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go new file mode 100644 index 00000000000..7562cd01350 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -0,0 +1,139 @@ +package ssocreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/service/ssooidc" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err) + } + } + + expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) { + if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" { + return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{ + ClientId: &token.ClientID, + ClientSecret: &token.ClientSecret, + RefreshToken: &token.RefreshToken, + GrantType: aws.String("refresh_token"), + }) + if err != nil { + return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) + } + + expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) + + token.AccessToken = *createResult.AccessToken + token.ExpiresAt = (*rfc3339)(&expiresAt) + token.RefreshToken = *createResult.RefreshToken + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil { + return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err) + } + + return token, nil +} + +func toTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 9943af744ba..6027df1e184 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -39,6 +39,7 @@ const ( EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). + IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). MeCentral1RegionID = "me-central-1" // Middle East (UAE). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). @@ -117,7 +118,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") return reg }(), }, @@ -213,6 +214,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "Europe (Paris)", }, + "il-central-1": region{ + Description: "Israel (Tel Aviv)", + }, "me-central-1": region{ Description: "Middle East (UAE)", }, @@ -356,6 +360,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -494,6 +501,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -1439,6 +1449,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "api.ecr.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -1907,6 +1925,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2247,6 +2268,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2390,6 +2414,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2475,6 +2502,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2617,6 +2647,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -2660,6 +2693,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -2675,12 +2711,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2690,6 +2732,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3375,6 +3420,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -3454,6 +3502,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -3481,6 +3538,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ap-southeast-3.api.aws", }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3499,6 +3565,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -3517,6 +3592,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -3580,6 +3664,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3791,6 +3884,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4043,15 +4139,84 @@ var awsPartition = partition{ }, "backupstorage": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -4645,6 +4810,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4785,6 +4953,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -4950,6 +5121,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5105,6 +5279,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5327,6 +5504,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -5643,6 +5823,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -6761,6 +6944,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -6929,6 +7115,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -8026,6 +8218,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8118,6 +8313,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8269,6 +8467,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8488,6 +8689,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -8497,18 +8701,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -8518,6 +8731,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8651,6 +8867,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -8780,6 +8999,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "local", }: endpoint{ @@ -8985,6 +9207,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9162,6 +9387,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9331,6 +9559,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9509,6 +9740,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9627,6 +9861,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -9796,6 +10033,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -10433,6 +10673,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10601,6 +10844,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10778,6 +11024,9 @@ var awsPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -10811,6 +11060,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -11127,6 +11379,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -11308,6 +11563,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -11540,6 +11798,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -12727,6 +12988,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -12736,6 +13000,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12799,6 +13066,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -13808,11 +14078,21 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.ap-northeast-2.api.aws", }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{ Hostname: "internetmonitor.ap-south-1.api.aws", }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "internetmonitor.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -13823,16 +14103,37 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.ap-southeast-2.api.aws", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{ Hostname: "internetmonitor.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ Hostname: "internetmonitor.eu-central-1.api.aws", }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "internetmonitor.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -13843,6 +14144,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.eu-south-1.api.aws", }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "internetmonitor.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -13858,6 +14164,16 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "internetmonitor.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "internetmonitor.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -13873,21 +14189,45 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor.us-east-1.api.aws", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{ Hostname: "internetmonitor.us-east-2.api.aws", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{ Hostname: "internetmonitor.us-west-1.api.aws", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{ Hostname: "internetmonitor.us-west-2.api.aws", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-2.api.aws", + }, }, }, "iot": service{ @@ -14611,12 +14951,140 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "api-ap-southeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "api-eu-central-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "api-eu-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "data-ap-southeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "data-eu-central-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "data-eu-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "fips-api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -15174,6 +15642,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "kendra-ranking.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -15327,6 +15800,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -15882,6 +16358,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, endpointKey{ Region: "il-central-1-fips", }: endpoint{ @@ -15889,6 +16374,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "il-central-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "me-central-1", @@ -16038,6 +16524,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -16047,6 +16536,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -16110,6 +16602,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -16375,6 +16870,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17010,6 +17514,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17479,6 +17986,9 @@ var awsPartition = partition{ }, "mediaconnect": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -17488,6 +17998,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -17497,6 +18010,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17767,6 +18283,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17816,6 +18335,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17865,6 +18387,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18134,6 +18659,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18219,6 +18747,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -18682,6 +19213,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -19055,6 +19589,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -19064,6 +19601,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19076,12 +19616,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -19296,6 +19842,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20041,6 +20590,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20980,6 +21532,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21140,6 +21695,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21264,6 +21822,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -21704,6 +22265,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22205,6 +22769,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.eu-west-3.api.aws", }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.il-central-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -22330,6 +22899,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22590,6 +23162,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22827,6 +23402,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -23198,6 +23776,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -24235,6 +24822,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -24770,6 +25360,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -24779,6 +25372,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -24791,12 +25387,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24907,7 +25509,7 @@ var awsPartition = partition{ Region: "af-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.af-south-1.amazonaws.com", + Hostname: "servicediscovery.af-south-1.api.aws", }, endpointKey{ Region: "ap-east-1", @@ -24916,7 +25518,7 @@ var awsPartition = partition{ Region: "ap-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-east-1.amazonaws.com", + Hostname: "servicediscovery.ap-east-1.api.aws", }, endpointKey{ Region: "ap-northeast-1", @@ -24925,7 +25527,7 @@ var awsPartition = partition{ Region: "ap-northeast-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-1.api.aws", }, endpointKey{ Region: "ap-northeast-2", @@ -24934,7 +25536,7 @@ var awsPartition = partition{ Region: "ap-northeast-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-2.api.aws", }, endpointKey{ Region: "ap-northeast-3", @@ -24943,7 +25545,7 @@ var awsPartition = partition{ Region: "ap-northeast-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-northeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-northeast-3.api.aws", }, endpointKey{ Region: "ap-south-1", @@ -24952,7 +25554,7 @@ var awsPartition = partition{ Region: "ap-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-south-1.amazonaws.com", + Hostname: "servicediscovery.ap-south-1.api.aws", }, endpointKey{ Region: "ap-south-2", @@ -24961,7 +25563,7 @@ var awsPartition = partition{ Region: "ap-south-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-south-2.amazonaws.com", + Hostname: "servicediscovery.ap-south-2.api.aws", }, endpointKey{ Region: "ap-southeast-1", @@ -24970,7 +25572,7 @@ var awsPartition = partition{ Region: "ap-southeast-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-1.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-1.api.aws", }, endpointKey{ Region: "ap-southeast-2", @@ -24979,7 +25581,7 @@ var awsPartition = partition{ Region: "ap-southeast-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-2.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-2.api.aws", }, endpointKey{ Region: "ap-southeast-3", @@ -24988,7 +25590,7 @@ var awsPartition = partition{ Region: "ap-southeast-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-3.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-3.api.aws", }, endpointKey{ Region: "ap-southeast-4", @@ -24997,7 +25599,7 @@ var awsPartition = partition{ Region: "ap-southeast-4", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ap-southeast-4.amazonaws.com", + Hostname: "servicediscovery.ap-southeast-4.api.aws", }, endpointKey{ Region: "ca-central-1", @@ -25006,7 +25608,7 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.ca-central-1.amazonaws.com", + Hostname: "servicediscovery.ca-central-1.api.aws", }, endpointKey{ Region: "ca-central-1", @@ -25014,6 +25616,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1-fips", }: endpoint{ @@ -25030,7 +25638,7 @@ var awsPartition = partition{ Region: "eu-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-central-1.amazonaws.com", + Hostname: "servicediscovery.eu-central-1.api.aws", }, endpointKey{ Region: "eu-central-2", @@ -25039,7 +25647,7 @@ var awsPartition = partition{ Region: "eu-central-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-central-2.amazonaws.com", + Hostname: "servicediscovery.eu-central-2.api.aws", }, endpointKey{ Region: "eu-north-1", @@ -25048,7 +25656,7 @@ var awsPartition = partition{ Region: "eu-north-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-north-1.amazonaws.com", + Hostname: "servicediscovery.eu-north-1.api.aws", }, endpointKey{ Region: "eu-south-1", @@ -25057,7 +25665,7 @@ var awsPartition = partition{ Region: "eu-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-south-1.amazonaws.com", + Hostname: "servicediscovery.eu-south-1.api.aws", }, endpointKey{ Region: "eu-south-2", @@ -25066,7 +25674,7 @@ var awsPartition = partition{ Region: "eu-south-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-south-2.amazonaws.com", + Hostname: "servicediscovery.eu-south-2.api.aws", }, endpointKey{ Region: "eu-west-1", @@ -25075,7 +25683,7 @@ var awsPartition = partition{ Region: "eu-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-1.amazonaws.com", + Hostname: "servicediscovery.eu-west-1.api.aws", }, endpointKey{ Region: "eu-west-2", @@ -25084,7 +25692,7 @@ var awsPartition = partition{ Region: "eu-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-2.amazonaws.com", + Hostname: "servicediscovery.eu-west-2.api.aws", }, endpointKey{ Region: "eu-west-3", @@ -25093,7 +25701,16 @@ var awsPartition = partition{ Region: "eu-west-3", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.eu-west-3.amazonaws.com", + Hostname: "servicediscovery.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.il-central-1.api.aws", }, endpointKey{ Region: "me-central-1", @@ -25102,7 +25719,7 @@ var awsPartition = partition{ Region: "me-central-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.me-central-1.amazonaws.com", + Hostname: "servicediscovery.me-central-1.api.aws", }, endpointKey{ Region: "me-south-1", @@ -25111,7 +25728,7 @@ var awsPartition = partition{ Region: "me-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.me-south-1.amazonaws.com", + Hostname: "servicediscovery.me-south-1.api.aws", }, endpointKey{ Region: "sa-east-1", @@ -25120,34 +25737,7 @@ var awsPartition = partition{ Region: "sa-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, + Hostname: "servicediscovery.sa-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -25156,7 +25746,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-east-1.amazonaws.com", + Hostname: "servicediscovery.us-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -25164,6 +25754,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1-fips", }: endpoint{ @@ -25180,7 +25776,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-east-2.amazonaws.com", + Hostname: "servicediscovery.us-east-2.api.aws", }, endpointKey{ Region: "us-east-2", @@ -25188,6 +25784,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2-fips", }: endpoint{ @@ -25204,7 +25806,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-west-1.amazonaws.com", + Hostname: "servicediscovery.us-west-1.api.aws", }, endpointKey{ Region: "us-west-1", @@ -25212,6 +25814,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1-fips", }: endpoint{ @@ -25228,7 +25836,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.us-west-2.amazonaws.com", + Hostname: "servicediscovery.us-west-2.api.aws", }, endpointKey{ Region: "us-west-2", @@ -25236,6 +25844,12 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2-fips", }: endpoint{ @@ -25611,75 +26225,6 @@ var awsPartition = partition{ }, "sms": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -25689,39 +26234,6 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", - }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -26225,6 +26737,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26343,7 +26858,173 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -26352,7 +27033,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + Hostname: "ssm-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -26361,7 +27042,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + Hostname: "ssm-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -26370,12 +27051,15 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + Hostname: "ssm-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -26387,15 +27071,12 @@ var awsPartition = partition{ }: endpoint{}, endpointKey{ Region: "us-east-1", - }: endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, + }: endpoint{}, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - SSLCommonName: "queue.{dnsSuffix}", + Hostname: "ssm-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -26404,7 +27085,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + Hostname: "ssm-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -26413,7 +27094,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + Hostname: "ssm-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -26422,69 +27103,36 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + Hostname: "ssm-fips.us-west-2.amazonaws.com", }, }, }, - "ssm": service{ + "ssm-contacts": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - }, endpointKey{ Region: "eu-central-1", }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -26494,19 +27142,10 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -26515,7 +27154,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -26524,7 +27163,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -26533,18 +27172,12 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -26555,7 +27188,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -26564,7 +27197,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -26573,7 +27206,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -26582,7 +27215,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", }, }, }, @@ -26606,6 +27239,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -26621,21 +27260,90 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + }, }, }, "ssm-sap": service{ @@ -26951,6 +27659,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27075,15 +27786,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27238,107 +27940,113 @@ var awsPartition = partition{ Region: "eu-west-3", }: endpoint{}, endpointKey{ - Region: "local", - }: endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", }: endpoint{}, endpointKey{ Region: "me-central-1", @@ -27547,6 +28255,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27692,6 +28403,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -27801,6 +28515,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -28275,6 +28992,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -29349,6 +30069,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "il-central-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-me-central-1", @@ -29413,6 +30134,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "waf-regional.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -30063,6 +30801,7 @@ var awsPartition = partition{ CredentialScope: credentialScope{ Region: "il-central-1", }, + Deprecated: boxedTrue, }, endpointKey{ Region: "fips-me-central-1", @@ -30127,6 +30866,23 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "wafv2.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-central-1", }: endpoint{ @@ -30641,6 +31397,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -30979,6 +31738,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "batch": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32162,6 +32931,27 @@ var awscnPartition = partition{ }, }, }, + "savingsplans": service{ + IsRegionalized: boxedTrue, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32220,7 +33010,7 @@ var awscnPartition = partition{ Region: "cn-north-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.cn-north-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", }, endpointKey{ Region: "cn-northwest-1", @@ -32229,7 +33019,7 @@ var awscnPartition = partition{ Region: "cn-northwest-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "servicediscovery.cn-northwest-1.amazonaws.com.cn", + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", }, }, }, @@ -32263,9 +33053,6 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, }, }, "snowball": service{ @@ -33395,6 +34182,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -33820,6 +34617,15 @@ var awsusgovPartition = partition{ }, "codepipeline": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -33832,6 +34638,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -35700,6 +36512,38 @@ var awsusgovPartition = partition{ }, "iottwinmaker": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -37575,6 +38419,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-east-1-fips", }: endpoint{ @@ -37599,6 +38449,12 @@ var awsusgovPartition = partition{ }: endpoint{ Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1-fips", }: endpoint{ @@ -37673,15 +38529,6 @@ var awsusgovPartition = partition{ }, "sms": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -37691,15 +38538,6 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", - }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -39269,6 +40107,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, @@ -39278,6 +40125,15 @@ var awsisoPartition = partition{ }: endpoint{ Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "rds": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 1d3f4c3adc3..ea8e3537658 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/ssooidc" "github.com/aws/aws-sdk-go/service/sts" ) @@ -23,6 +24,10 @@ type CredentialsProviderOptions struct { // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, // such as setting its ExpiryWindow. WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) + + // ProcessProviderOptions configures a ProcessProvider, + // such as setting its Timeout. + ProcessProviderOptions func(*processcreds.ProcessProvider) } func resolveCredentials(cfg *aws.Config, @@ -33,7 +38,7 @@ func resolveCredentials(cfg *aws.Config, switch { case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration + // User explicitly provided a Profile in the session's configuration // so load that profile from shared config first. // Github(aws/aws-sdk-go#2727) return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) @@ -134,7 +139,11 @@ func resolveCredsFromProfile(cfg *aws.Config, case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + var optFns []func(*processcreds.ProcessProvider) + if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil { + optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions) + } + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...) default: // Fallback to default credentials provider, include mock errors for @@ -173,8 +182,28 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req return nil, err } + var optFns []func(provider *ssocreds.Provider) cfgCopy := cfg.Copy() - cfgCopy.Region = &sharedCfg.SSORegion + + if sharedCfg.SSOSession != nil { + cfgCopy.Region = &sharedCfg.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name) + if err != nil { + return nil, err + } + // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials + mySession := Must(NewSession(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + })) + oidcClient := ssooidc.New(mySession, cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath) + optFns = append(optFns, func(p *ssocreds.Provider) { + p.TokenProvider = tokenProvider + p.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = &sharedCfg.SSORegion + } return ssocreds.NewCredentials( &Session{ @@ -184,6 +213,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req sharedCfg.SSOAccountID, sharedCfg.SSORoleName, sharedCfg.SSOStartURL, + optFns..., ), nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index cbccb60bbe8..8127c99a9a1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -37,7 +37,7 @@ const ( // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 424c82b4d34..ea3ac0d0316 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -26,6 +26,13 @@ const ( roleSessionNameKey = `role_session_name` // optional roleDurationSecondsKey = "duration_seconds" // optional + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + // AWS Single Sign-On (AWS SSO) group ssoAccountIDKey = "sso_account_id" ssoRegionKey = "sso_region" @@ -99,6 +106,10 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string + // SSO session options + SSOSessionName string + SSOSession *ssoSession + SSOAccountID string SSORegion string SSORoleName string @@ -186,6 +197,20 @@ type sharedConfigFile struct { IniData ini.Sections } +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type ssoSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *ssoSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURL) +} + // loadSharedConfig retrieves the configuration from the list of files using // the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in @@ -266,13 +291,13 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s // profile only have credential provider options. cfg.clearAssumeRoleOptions() } else { - // First time a profile has been seen, It must either be a assume role - // credentials, or SSO. Assert if the credential type requires a role ARN, - // the ARN is also set, or validate that the SSO configuration is complete. + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set if err := cfg.validateCredentialsConfig(profile); err != nil { return err } } + profiles[profile] = struct{}{} if err := cfg.validateCredentialType(); err != nil { @@ -308,6 +333,30 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s cfg.SourceProfile = srcCfg } + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if cfg.hasSSOTokenProviderConfiguration() { + skippedFiles = 0 + for _, f := range files { + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + if ok { + var ssoSession ssoSession + ssoSession.setFromIniSection(section) + ssoSession.Name = cfg.SSOSessionName + cfg.SSOSession = &ssoSession + break + } + skippedFiles++ + } + if skippedFiles == len(files) { + // If all files were skipped because the sso session section is not found, return + // the sso section not found error. + return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName) + } + } + return nil } @@ -363,6 +412,10 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e cfg.S3UsEast1RegionalEndpoint = sre } + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&cfg.SSOSessionName, section, ssoSessionNameKey) + // AWS Single Sign-On (AWS SSO) updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) updateString(&cfg.SSORegion, section, ssoRegionKey) @@ -461,32 +514,20 @@ func (cfg *sharedConfig) validateCredentialType() error { } func (cfg *sharedConfig) validateSSOConfiguration() error { - if !cfg.hasSSOConfiguration() { + if cfg.hasSSOTokenProviderConfiguration() { + err := cfg.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } return nil } - var missing []string - if len(cfg.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(cfg.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(cfg.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(cfg.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - cfg.Profile, strings.Join(missing, ", ")) + if cfg.hasLegacySSOConfiguration() { + err := cfg.validateLegacySSOConfiguration() + if err != nil { + return err + } } - return nil } @@ -525,15 +566,76 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() { } func (cfg *sharedConfig) hasSSOConfiguration() bool { - switch { - case len(cfg.SSOAccountID) != 0: - case len(cfg.SSORegion) != 0: - case len(cfg.SSORoleName) != 0: - case len(cfg.SSOStartURL) != 0: - default: - return false + return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration() +} + +func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *sharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *sharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) } - return true + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix) + } + + return nil +} + +func (c *sharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil } func oneOrNone(bs ...bool) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 0240bd0be35..41386bab12a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -8,7 +8,7 @@ // Generally using the signer outside of the SDK should not require any additional // logic when using Go v1.5 or higher. The signer does this by taking advantage // of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent +// you may need to use the URL.Opaque to define what the raw URI should be sent // to the service as. // // The signer will first check the URL.Opaque field, and use its value if set. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 25fce95a3ef..0e5f95c1c1f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.290" +const SDKVersion = "1.44.317" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 1d273ff0ec6..ecc521f88f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -287,6 +287,10 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) if tag.Get("location") != "header" || tag.Get("enum") == "" { return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) } + if len(value) == 0 { + return "", errValueNotSet + } + buff := &bytes.Buffer{} for i, sv := range value { if sv == nil || len(*sv) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index b98e7076e3c..b4d7de3c3d2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -769,7 +769,7 @@ func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleAct // If you finish before the timeout period ends, send a callback by using the // CompleteLifecycleAction API call. // -// For more information, see Amazon EC2 Auto Scaling lifecycle hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) +// For more information, see Complete a lifecycle action (https://docs.aws.amazon.com/autoscaling/ec2/userguide/completing-lifecycle-hooks.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4167,6 +4167,12 @@ func (c *AutoScaling) DescribeWarmPoolRequest(input *DescribeWarmPoolInput) (req Name: opDescribeWarmPool, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -4229,6 +4235,57 @@ func (c *AutoScaling) DescribeWarmPoolWithContext(ctx aws.Context, input *Descri return out, req.Send() } +// DescribeWarmPoolPages iterates over the pages of a DescribeWarmPool operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeWarmPool method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeWarmPool operation. +// pageNum := 0 +// err := client.DescribeWarmPoolPages(params, +// func(page *autoscaling.DescribeWarmPoolOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *AutoScaling) DescribeWarmPoolPages(input *DescribeWarmPoolInput, fn func(*DescribeWarmPoolOutput, bool) bool) error { + return c.DescribeWarmPoolPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeWarmPoolPagesWithContext same as DescribeWarmPoolPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeWarmPoolPagesWithContext(ctx aws.Context, input *DescribeWarmPoolInput, fn func(*DescribeWarmPoolOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeWarmPoolInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeWarmPoolRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeWarmPoolOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDetachInstances = "DetachInstances" // DetachInstancesRequest generates a "aws/request.Request" representing the @@ -4560,7 +4617,7 @@ func (c *AutoScaling) DetachTrafficSourcesRequest(input *DetachTrafficSourcesInp // // Detaches one or more traffic sources from the specified Auto Scaling group. // -// When you detach a taffic, it enters the Removing state while deregistering +// When you detach a traffic source, it enters the Removing state while deregistering // the instances in the group. When all instances are deregistered, then you // can no longer describe the traffic source using the DescribeTrafficSources // API call. The instances continue to run. @@ -6981,6 +7038,39 @@ func (s *Alarm) SetAlarmName(v string) *Alarm { return s } +// Specifies the CloudWatch alarm specification to use in an instance refresh. +type AlarmSpecification struct { + _ struct{} `type:"structure"` + + // The names of one or more CloudWatch alarms to monitor for the instance refresh. + // You can specify up to 10 alarms. + Alarms []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AlarmSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AlarmSpecification) GoString() string { + return s.String() +} + +// SetAlarms sets the Alarms field's value. +func (s *AlarmSpecification) SetAlarms(v []*string) *AlarmSpecification { + s.Alarms = v + return s +} + type AttachInstancesInput struct { _ struct{} `type:"structure"` @@ -18014,7 +18104,7 @@ type PutScalingPolicyInput struct { // The amount by which to scale, based on the specified adjustment type. A positive // value adds to the current capacity while a negative number removes from the - // current capacity. For exact capacity, you must specify a positive value. + // current capacity. For exact capacity, you must specify a non-negative value. // // Required if the policy type is SimpleScaling. (Not used with any other policy // type.) @@ -18687,8 +18777,13 @@ func (s RecordLifecycleActionHeartbeatOutput) GoString() string { type RefreshPreferences struct { _ struct{} `type:"structure"` + // (Optional) The CloudWatch alarm specification. CloudWatch alarms can be used + // to identify any issues and fail the operation if an alarm threshold is met. + AlarmSpecification *AlarmSpecification `type:"structure"` + // (Optional) Indicates whether to roll back the Auto Scaling group to its previous - // configuration if the instance refresh fails. The default is false. + // configuration if the instance refresh fails or a CloudWatch alarm threshold + // is met. The default is false. // // A rollback is not supported in the following situations: // @@ -18700,6 +18795,9 @@ type RefreshPreferences struct { // // * The Auto Scaling group uses the launch template's $Latest or $Default // version. + // + // For more information, see Undo changes with a rollback (https://docs.aws.amazon.com/autoscaling/ec2/userguide/instance-refresh-rollback.html) + // in the Amazon EC2 Auto Scaling User Guide. AutoRollback *bool `type:"boolean"` // (Optional) The amount of time, in seconds, to wait after a checkpoint before @@ -18812,6 +18910,12 @@ func (s RefreshPreferences) GoString() string { return s.String() } +// SetAlarmSpecification sets the AlarmSpecification field's value. +func (s *RefreshPreferences) SetAlarmSpecification(v *AlarmSpecification) *RefreshPreferences { + s.AlarmSpecification = v + return s +} + // SetAutoRollback sets the AutoRollback field's value. func (s *RefreshPreferences) SetAutoRollback(v bool) *RefreshPreferences { s.AutoRollback = &v @@ -18957,7 +19061,9 @@ type RollbackInstanceRefreshInput struct { _ struct{} `type:"structure"` // The name of the Auto Scaling group. - AutoScalingGroupName *string `min:"1" type:"string"` + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -18981,6 +19087,9 @@ func (s RollbackInstanceRefreshInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RollbackInstanceRefreshInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RollbackInstanceRefreshInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) } @@ -19899,6 +20008,8 @@ type StartInstanceRefreshInput struct { // // * Checkpoints // + // * CloudWatch alarms + // // * Skip matching Preferences *RefreshPreferences `type:"structure"` @@ -20052,12 +20163,7 @@ type StepAdjustment struct { // The amount by which to scale, based on the specified adjustment type. A positive // value adds to the current capacity while a negative number removes from the - // current capacity. - // - // The amount by which to scale. The adjustment is based on the value that you - // specified in the AdjustmentType property (either an absolute number or a - // percentage). A positive value adds to the current capacity and a negative - // number subtracts from the current capacity. + // current capacity. For exact capacity, you must specify a non-negative value. // // ScalingAdjustment is a required field ScalingAdjustment *int64 `type:"integer" required:"true"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 5bb86ce04d9..2882d45568f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -346,8 +346,9 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // to read the entire body. // // The copy request charge is based on the storage class and Region that you -// specify for the destination object. For pricing information, see Amazon S3 -// pricing (http://aws.amazon.com/s3/pricing/). +// specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/). // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you @@ -6489,7 +6490,7 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // use request parameters as selection criteria to return metadata about a subset // of all the object versions. // -// To use this operation, you must have permissions to perform the s3:ListBucketVersions +// To use this operation, you must have permission to perform the s3:ListBucketVersions // action. Be aware of the name difference. // // A 200 OK response can contain valid or invalid XML. Make sure to design your @@ -6800,20 +6801,22 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // and handle it appropriately. Objects are returned sorted in an ascending // order of the respective key names in the list. For more information about // listing objects, see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. // // To use this operation, you must have READ access to the bucket. // // To use this action in an Identity and Access Management (IAM) policy, you -// must have permissions to perform the s3:ListBucket action. The bucket owner +// must have permission to perform the s3:ListBucket action. The bucket owner // has this permission by default and can grant this permission to others. For // more information about permissions, see Permissions Related to Bucket Subresource // Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // This section describes the latest revision of this action. We recommend that -// you use this revised API for application development. For backward compatibility, -// Amazon S3 continues to support the prior version of this API, ListObjects -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). // // To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). // @@ -10706,6 +10709,7 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject } var _ awserr.Error +var _ time.Time // SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. // @@ -11378,7 +11382,7 @@ type AbortMultipartUploadInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -12767,7 +12771,7 @@ type CompleteMultipartUploadInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -13029,7 +13033,7 @@ type CompleteMultipartUploadOutput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Bucket *string `type:"string"` @@ -13473,7 +13477,7 @@ type CopyObjectInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -14617,7 +14621,7 @@ type CreateMultipartUploadInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -15064,7 +15068,7 @@ type CreateMultipartUploadOutput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Bucket *string `locationName:"Bucket" type:"string"` @@ -17034,7 +17038,7 @@ type DeleteObjectInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17257,7 +17261,7 @@ type DeleteObjectTaggingInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -17424,7 +17428,7 @@ type DeleteObjectsInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -21694,7 +21698,7 @@ type GetObjectAttributesInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -21713,8 +21717,8 @@ type GetObjectAttributesInput struct { // Sets the maximum number of parts to return. MaxParts *int64 `location:"header" locationName:"x-amz-max-parts" type:"integer"` - // An XML header that specifies the fields at the root level that you want returned - // in the response. Fields that you do not specify are not returned. + // Specifies the fields at the root level that you want returned in the response. + // Fields that you do not specify are not returned. // // ObjectAttributes is a required field ObjectAttributes []*string `location:"header" locationName:"x-amz-object-attributes" type:"list" required:"true" enum:"ObjectAttributes"` @@ -22113,7 +22117,7 @@ type GetObjectInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -23310,7 +23314,7 @@ type GetObjectTaggingInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -24008,7 +24012,7 @@ type HeadBucketInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -24139,7 +24143,7 @@ type HeadObjectInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -27051,7 +27055,7 @@ type ListMultipartUploadsInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -27068,8 +27072,8 @@ type ListMultipartUploadsInput struct { Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. @@ -27098,8 +27102,8 @@ type ListMultipartUploadsInput struct { // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Confirms that the requester knows that they will be charged for the request. @@ -27257,9 +27261,9 @@ type ListMultipartUploadsOutput struct { // Encoding type used by Amazon S3 to encode object keys in the response. // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: // // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. EncodingType *string `type:"string" enum:"EncodingType"` @@ -27420,8 +27424,8 @@ type ListObjectVersionsInput struct { Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. @@ -27435,7 +27439,7 @@ type ListObjectVersionsInput struct { // Specifies the key to start with when listing objects in a bucket. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. If additional keys satisfy the search criteria, // but were not returned because max-keys was exceeded, the response contains @@ -27443,11 +27447,15 @@ type ListObjectVersionsInput struct { // and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + // Use this parameter to select only those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different groupings - // of keys. (You can think of using prefix to make groups in the same way you'd - // use a folder in a file system.) You can use prefix with delimiter to roll - // up numerous objects into a single result under CommonPrefixes. + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) You can use prefix with delimiter to + // roll up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Confirms that the requester knows that they will be charged for the request. @@ -27538,6 +27546,12 @@ func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { return s } +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectVersionsInput) SetOptionalObjectAttributes(v []*string) *ListObjectVersionsInput { + s.OptionalObjectAttributes = v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { s.Prefix = &v @@ -27603,16 +27617,16 @@ type ListObjectVersionsOutput struct { // Encoding type used by Amazon S3 to encode object key names in the XML response. // - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: // // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. EncodingType *string `type:"string" enum:"EncodingType"` // A flag that indicates whether Amazon S3 returned all of the results that // satisfied the search criteria. If your results were truncated, you can make - // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // a follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the // rest of the results. IsTruncated *bool `type:"boolean"` @@ -27770,18 +27784,18 @@ type ListObjectsInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you use to group keys. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. @@ -27796,11 +27810,15 @@ type ListObjectsInput struct { // listing after this specified key. Marker can be any key in the bucket. Marker *string `location:"querystring" locationName:"marker" type:"string"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + // Limits the response to keys that begin with the specified prefix. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` @@ -27887,6 +27905,12 @@ func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { return s } +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsInput) SetOptionalObjectAttributes(v []*string) *ListObjectsInput { + s.OptionalObjectAttributes = v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { s.Prefix = &v @@ -27940,7 +27964,7 @@ type ListObjectsOutput struct { // CommonPrefixes lists keys that act like subdirectories in the directory specified // by Prefix. // - // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // For example, if the prefix is notes/ and the delimiter is a slash (/), as // in notes/summer/july, the common prefix is notes/summer/. All of the keys // that roll up into a common prefix count as a single return when calculating // the number of returns. @@ -27973,13 +27997,16 @@ type ListObjectsOutput struct { // The bucket name. Name *string `type:"string"` - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMarker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. + // When the response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as the marker parameter + // in the subsequent request to get the next set of objects. Amazon S3 lists + // objects in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it + // is truncated, you can use the value of the last Key element in the response + // as the marker parameter in the subsequent request to get the next set of + // object keys. NextMarker *string `type:"string"` // Keys that begin with the indicated prefix. @@ -28091,18 +28118,18 @@ type ListObjectsV2Input struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you use to group keys. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. @@ -28113,16 +28140,20 @@ type ListObjectsV2Input struct { // (access denied). ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true. + // The owner field is not present in ListObjectsV2 by default. If you want to + // return the owner field with each key in the result, then set the FetchOwner + // field to true. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + // Limits the response to keys that begin with the specified prefix. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` @@ -28219,6 +28250,12 @@ func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { return s } +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsV2Input) SetOptionalObjectAttributes(v []*string) *ListObjectsV2Input { + s.OptionalObjectAttributes = v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { s.Prefix = &v @@ -28312,11 +28349,11 @@ type ListObjectsV2Output struct { IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than or equal to the MaxKeys field. Say you ask for 50 keys, - // your result will include 50 keys or fewer. + // always be less than or equal to the MaxKeys field. For example, if you ask + // for 50 keys, your result will include 50 keys or fewer. KeyCount *int64 `type:"integer"` - // Sets the maximum number of keys returned in the response. By default the + // Sets the maximum number of keys returned in the response. By default, the // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `type:"integer"` @@ -28335,7 +28372,7 @@ type ListObjectsV2Output struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. Name *string `type:"string"` @@ -28469,7 +28506,7 @@ type ListPartsInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -29095,10 +29132,10 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { type MetadataEntry struct { _ struct{} `type:"structure"` - // Name of the Object. + // Name of the object. Name *string `type:"string"` - // Value of the Object. + // Value of the object. Value *string `type:"string"` } @@ -29848,6 +29885,13 @@ type Object struct { // The owner of the object Owner *Owner `type:"structure"` + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + RestoreStatus *RestoreStatus `type:"structure"` + // Size in bytes of the object Size *int64 `type:"integer"` @@ -29903,6 +29947,12 @@ func (s *Object) SetOwner(v *Owner) *Object { return s } +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *Object) SetRestoreStatus(v *RestoreStatus) *Object { + s.RestoreStatus = v + return s +} + // SetSize sets the Size field's value. func (s *Object) SetSize(v int64) *Object { s.Size = &v @@ -30251,6 +30301,13 @@ type ObjectVersion struct { // Specifies the owner of the object. Owner *Owner `type:"structure"` + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + RestoreStatus *RestoreStatus `type:"structure"` + // Size in bytes of the object. Size *int64 `type:"integer"` @@ -30315,6 +30372,12 @@ func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { return s } +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *ObjectVersion) SetRestoreStatus(v *RestoreStatus) *ObjectVersion { + s.RestoreStatus = v + return s +} + // SetSize sets the Size field's value. func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { s.Size = &v @@ -34254,7 +34317,7 @@ type PutObjectAclInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Key is a required field @@ -34486,7 +34549,7 @@ type PutObjectInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -35845,7 +35908,7 @@ type PutObjectTaggingInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -37232,7 +37295,7 @@ type RestoreObjectInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -37558,6 +37621,64 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } +// Specifies the restoration status of an object. Objects in certain storage +// classes must be restored before they can be retrieved. For more information +// about these storage classes and how to work with archived objects, see Working +// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) +// in the Amazon S3 User Guide. +type RestoreStatus struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is currently being restored. If the object restoration + // is in progress, the header returns the value TRUE. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE. + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. + IsRestoreInProgress *bool `type:"boolean"` + + // Indicates when the restored copy will expire. This value is populated only + // if the object has already been restored. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + RestoreExpiryDate *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) GoString() string { + return s.String() +} + +// SetIsRestoreInProgress sets the IsRestoreInProgress field's value. +func (s *RestoreStatus) SetIsRestoreInProgress(v bool) *RestoreStatus { + s.IsRestoreInProgress = &v + return s +} + +// SetRestoreExpiryDate sets the RestoreExpiryDate field's value. +func (s *RestoreStatus) SetRestoreExpiryDate(v time.Time) *RestoreStatus { + s.RestoreExpiryDate = &v + return s +} + // Specifies the redirect behavior and when a redirect is applied. For more // information about routing rules, see Configuring advanced conditional redirects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) @@ -39477,7 +39598,7 @@ type UploadPartCopyInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -39947,7 +40068,7 @@ type UploadPartInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field @@ -41233,6 +41354,12 @@ const ( // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value BucketLocationConstraintUsWest2 = "us-west-2" + + // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth2 = "ap-south-2" + + // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth2 = "eu-south-2" ) // BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum @@ -41264,6 +41391,8 @@ func BucketLocationConstraint_Values() []string { BucketLocationConstraintUsGovWest1, BucketLocationConstraintUsWest1, BucketLocationConstraintUsWest2, + BucketLocationConstraintApSouth2, + BucketLocationConstraintEuSouth2, } } @@ -41376,8 +41505,8 @@ func DeleteMarkerReplicationStatus_Values() []string { } // Requests Amazon S3 to encode the object keys in the response and specifies -// the encoding method to use. An object key may contain any Unicode character; -// however, XML 1.0 parser cannot parse some characters, such as characters +// the encoding method to use. An object key can contain any Unicode character; +// however, the XML 1.0 parser cannot parse some characters, such as characters // with an ASCII value from 0 to 10. For characters that are not supported in // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. @@ -41713,6 +41842,12 @@ const ( // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm" + + // InventoryOptionalFieldObjectAccessControlList is a InventoryOptionalField enum value + InventoryOptionalFieldObjectAccessControlList = "ObjectAccessControlList" + + // InventoryOptionalFieldObjectOwner is a InventoryOptionalField enum value + InventoryOptionalFieldObjectOwner = "ObjectOwner" ) // InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum @@ -41731,6 +41866,8 @@ func InventoryOptionalField_Values() []string { InventoryOptionalFieldIntelligentTieringAccessTier, InventoryOptionalFieldBucketKeyStatus, InventoryOptionalFieldChecksumAlgorithm, + InventoryOptionalFieldObjectAccessControlList, + InventoryOptionalFieldObjectOwner, } } @@ -42032,6 +42169,18 @@ func ObjectVersionStorageClass_Values() []string { } } +const ( + // OptionalObjectAttributesRestoreStatus is a OptionalObjectAttributes enum value + OptionalObjectAttributesRestoreStatus = "RestoreStatus" +) + +// OptionalObjectAttributes_Values returns all elements of the OptionalObjectAttributes enum +func OptionalObjectAttributes_Values() []string { + return []string{ + OptionalObjectAttributesRestoreStatus, + } +} + const ( // OwnerOverrideDestination is a OwnerOverride enum value OwnerOverrideDestination = "Destination" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index deeee90c7e5..0086334985d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -45,7 +45,7 @@ type UploadInput struct { // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When // you use this action with S3 on Outposts through the Amazon Web Services SDKs, // you provide the Outposts access point ARN in place of the bucket name. For - // more information about S3 on Outposts ARNs, see What is S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // more information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // Bucket is a required field diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go new file mode 100644 index 00000000000..c743913c572 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -0,0 +1,1682 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateToken = "CreateToken" + +// CreateTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateToken for more information on using the CreateToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenRequest method. +// req, resp := client.CreateTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) { + op := &request.Operation{ + Name: opCreateToken, + HTTPMethod: "POST", + HTTPPath: "/token", + } + + if input == nil { + input = &CreateTokenInput{} + } + + output = &CreateTokenOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// CreateToken API operation for AWS SSO OIDC. +// +// Creates and returns an access token for the authorized client. The access +// token issued will be used to fetch short-term credentials for the assigned +// roles in the AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateToken for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + return out, req.Send() +} + +// CreateTokenWithContext is the same as CreateToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterClient = "RegisterClient" + +// RegisterClientRequest generates a "aws/request.Request" representing the +// client's request for the RegisterClient operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterClient for more information on using the RegisterClient +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RegisterClientRequest method. +// req, resp := client.RegisterClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) { + op := &request.Operation{ + Name: opRegisterClient, + HTTPMethod: "POST", + HTTPPath: "/client/register", + } + + if input == nil { + input = &RegisterClientInput{} + } + + output = &RegisterClientOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// RegisterClient API operation for AWS SSO OIDC. +// +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation RegisterClient for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - InvalidClientMetadataException +// Indicates that the client information sent in the request during registration +// is invalid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + return out, req.Send() +} + +// RegisterClientWithContext is the same as RegisterClient with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterClient for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeviceAuthorization = "StartDeviceAuthorization" + +// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the StartDeviceAuthorization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartDeviceAuthorizationRequest method. +// req, resp := client.StartDeviceAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) { + op := &request.Operation{ + Name: opStartDeviceAuthorization, + HTTPMethod: "POST", + HTTPPath: "/device_authorization", + } + + if input == nil { + input = &StartDeviceAuthorizationInput{} + } + + output = &StartDeviceAuthorizationOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// StartDeviceAuthorization API operation for AWS SSO OIDC. +// +// Initiates device authorization by requesting a pair of verification codes +// from the authorization service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation StartDeviceAuthorization for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + return out, req.Send() +} + +// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeviceAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) GoString() string { + return s.String() +} + +func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error { + return &AuthorizationPendingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AuthorizationPendingException) Code() string { + return "AuthorizationPendingException" +} + +// Message returns the exception's message. +func (s *AuthorizationPendingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AuthorizationPendingException) OrigErr() error { + return nil +} + +func (s *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AuthorizationPendingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AuthorizationPendingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateTokenInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for each client. This value should come from + // the persisted result of the RegisterClient API. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + + // The authorization code received from the authorization service. This parameter + // is required to perform an authorization grant request to get access to a + // token. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the device code grant type. This short-term + // code is used to identify this authentication attempt. This should come from + // an in-memory reference to the result of the StartDeviceAuthorization API. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Supports grant types for the authorization code, refresh token, and device + // code request. For device code requests, specify the following value: + // + // urn:ietf:params:oauth:grant-type:device_code + // + // For information about how to obtain the device code, see the StartDeviceAuthorization + // topic. + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // The location of the application that will receive the authorization code. + // Users authorize the service to send the request to this location. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Currently, refreshToken is not yet implemented and is not supported. For + // more information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // The token used to obtain an access token in the event that the access token + // is invalid or expired. + RefreshToken *string `locationName:"refreshToken" type:"string"` + + // The list of scopes that is defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scope []*string `locationName:"scope" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput { + s.ClientSecret = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { + s.Code = &v + return s +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { + s.DeviceCode = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { + s.Scope = v + return s +} + +type CreateTokenOutput struct { + _ struct{} `type:"structure"` + + // An opaque token to access IAM Identity Center resources assigned to a user. + AccessToken *string `locationName:"accessToken" type:"string"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Currently, idToken is not yet implemented and is not supported. For more + // information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // The identifier of the user that associated with the access token, if present. + IdToken *string `locationName:"idToken" type:"string"` + + // Currently, refreshToken is not yet implemented and is not supported. For + // more information about the features and limitations of the current IAM Identity + // Center OIDC implementation, see Considerations for Using this Guide in the + // IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + RefreshToken *string `locationName:"refreshToken" type:"string"` + + // Used to notify the client that the returned token is an access token. The + // supported type is BearerToken. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput { + s.RefreshToken = &v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { + s.TokenType = &v + return s +} + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) GoString() string { + return s.String() +} + +func newErrorExpiredTokenException(v protocol.ResponseMetadata) error { + return &ExpiredTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExpiredTokenException) Code() string { + return "ExpiredTokenException" +} + +// Message returns the exception's message. +func (s *ExpiredTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExpiredTokenException) OrigErr() error { + return nil +} + +func (s *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExpiredTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExpiredTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that an error from the service occurred while trying to process +// a request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) GoString() string { + return s.String() +} + +func newErrorInvalidClientException(v protocol.ResponseMetadata) error { + return &InvalidClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientException) Code() string { + return "InvalidClientException" +} + +// Message returns the exception's message. +func (s *InvalidClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientException) OrigErr() error { + return nil +} + +func (s *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) GoString() string { + return s.String() +} + +func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error { + return &InvalidClientMetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientMetadataException) Code() string { + return "InvalidClientMetadataException" +} + +// Message returns the exception's message. +func (s *InvalidClientMetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientMetadataException) OrigErr() error { + return nil +} + +func (s *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientMetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientMetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) GoString() string { + return s.String() +} + +func newErrorInvalidGrantException(v protocol.ResponseMetadata) error { + return &InvalidGrantException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidGrantException) Code() string { + return "InvalidGrantException" +} + +// Message returns the exception's message. +func (s *InvalidGrantException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidGrantException) OrigErr() error { + return nil +} + +func (s *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidGrantException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidGrantException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) GoString() string { + return s.String() +} + +func newErrorInvalidScopeException(v protocol.ResponseMetadata) error { + return &InvalidScopeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidScopeException) Code() string { + return "InvalidScopeException" +} + +// Message returns the exception's message. +func (s *InvalidScopeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidScopeException) OrigErr() error { + return nil +} + +func (s *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidScopeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidScopeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RegisterClientInput struct { + _ struct{} `type:"structure"` + + // The friendly name of the client. + // + // ClientName is a required field + ClientName *string `locationName:"clientName" type:"string" required:"true"` + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // ClientType is a required field + ClientType *string `locationName:"clientType" type:"string" required:"true"` + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []*string `locationName:"scopes" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientType == nil { + invalidParams.Add(request.NewErrParamRequired("ClientType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientName sets the ClientName field's value. +func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput { + s.ClientName = &v + return s +} + +// SetClientType sets the ClientType field's value. +func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { + s.ClientType = &v + return s +} + +// SetScopes sets the Scopes field's value. +func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { + s.Scopes = v + return s +} + +type RegisterClientOutput struct { + _ struct{} `type:"structure"` + + // The endpoint where the client can request authorization. + AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string `locationName:"clientId" type:"string"` + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"` + + // A secret string generated for the client. The client will use this string + // to get authenticated by the service in subsequent calls. + ClientSecret *string `locationName:"clientSecret" type:"string"` + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` + + // The endpoint where the client can get an access token. + TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput { + s.ClientId = &v + return s +} + +// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value. +func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput { + s.ClientIdIssuedAt = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput { + s.ClientSecret = &v + return s +} + +// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value. +func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput { + s.ClientSecretExpiresAt = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput { + s.TokenEndpoint = &v + return s +} + +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +type SlowDownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) GoString() string { + return s.String() +} + +func newErrorSlowDownException(v protocol.ResponseMetadata) error { + return &SlowDownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SlowDownException) Code() string { + return "SlowDownException" +} + +// Message returns the exception's message. +func (s *SlowDownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SlowDownException) OrigErr() error { + return nil +} + +func (s *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SlowDownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SlowDownException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDeviceAuthorizationInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client that is registered with IAM Identity + // Center. This value should come from the persisted result of the RegisterClient + // API operation. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string that is generated for the client. This value should come + // from the persisted result of the RegisterClient API operation. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true"` + + // The URL for the AWS access portal. For more information, see Using the AWS + // access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // StartUrl is a required field + StartUrl *string `locationName:"startUrl" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeviceAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.StartUrl == nil { + invalidParams.Add(request.NewErrParamRequired("StartUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput { + s.ClientSecret = &v + return s +} + +// SetStartUrl sets the StartUrl field's value. +func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput { + s.StartUrl = &v + return s +} + +type StartDeviceAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval *int64 `locationName:"interval" type:"integer"` + + // A one-time user verification code. This is needed to authorize an in-use + // device. + UserCode *string `locationName:"userCode" type:"string"` + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string `locationName:"verificationUri" type:"string"` + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) GoString() string { + return s.String() +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput { + s.DeviceCode = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput { + s.ExpiresIn = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput { + s.Interval = &v + return s +} + +// SetUserCode sets the UserCode field's value. +func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput { + s.UserCode = &v + return s +} + +// SetVerificationUri sets the VerificationUri field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput { + s.VerificationUri = &v + return s +} + +// SetVerificationUriComplete sets the VerificationUriComplete field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput { + s.VerificationUriComplete = &v + return s +} + +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error { + return &UnauthorizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedClientException) Code() string { + return "UnauthorizedClientException" +} + +// Message returns the exception's message. +func (s *UnauthorizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedClientException) OrigErr() error { + return nil +} + +func (s *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Error_ *string `locationName:"error" type:"string"` + + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedGrantTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedGrantTypeException) Code() string { + return "UnsupportedGrantTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedGrantTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedGrantTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedGrantTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedGrantTypeException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go new file mode 100644 index 00000000000..8b5ee6019af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -0,0 +1,66 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssooidc provides the client and types for making API +// requests to AWS SSO OIDC. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) OpenID Connect +// (OIDC) is a web service that enables a client (such as AWS CLI or a native +// application) to register with IAM Identity Center. The service also enables +// the client to fetch the user’s access token upon successful authentication +// and authorization with IAM Identity Center. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 +// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single +// sign-on authentication with the AWS CLI. Support for other OIDC flows +// frequently needed for native applications, such as Authorization Code +// Flow (+ PKCE), will be addressed in future releases. +// +// - The service emits only OIDC access tokens, such that obtaining a new +// token (For example, token refresh) requires explicit user re-authentication. +// +// - The access tokens provided by this service grant access to all AWS account +// entitlements assigned to an IAM Identity Center user, not just a particular +// application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into AWS Auth (“sigv4”) credentials for use with +// IAM-protected AWS service endpoints. For more information, see GetRoleCredentials +// (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service. +// +// See ssooidc package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/ +// +// # Using the Client +// +// To contact AWS SSO OIDC with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS SSO OIDC client SSOOIDC for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go new file mode 100644 index 00000000000..69837701268 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAuthorizationPendingException for service response error code + // "AuthorizationPendingException". + // + // Indicates that a request to authorize a client with an access user session + // token is pending. + ErrCodeAuthorizationPendingException = "AuthorizationPendingException" + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // Indicates that the token issued by the service is expired and is no longer + // valid. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Indicates that an error from the service occurred while trying to process + // a request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeInvalidClientException for service response error code + // "InvalidClientException". + // + // Indicates that the clientId or clientSecret in the request is invalid. For + // example, this can occur when a client sends an incorrect clientId or an expired + // clientSecret. + ErrCodeInvalidClientException = "InvalidClientException" + + // ErrCodeInvalidClientMetadataException for service response error code + // "InvalidClientMetadataException". + // + // Indicates that the client information sent in the request during registration + // is invalid. + ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException" + + // ErrCodeInvalidGrantException for service response error code + // "InvalidGrantException". + // + // Indicates that a request contains an invalid grant. This can occur if a client + // makes a CreateToken request with an invalid grant type. + ErrCodeInvalidGrantException = "InvalidGrantException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that something is wrong with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeInvalidScopeException for service response error code + // "InvalidScopeException". + // + // Indicates that the scope provided in the request is invalid. + ErrCodeInvalidScopeException = "InvalidScopeException" + + // ErrCodeSlowDownException for service response error code + // "SlowDownException". + // + // Indicates that the client is making the request too frequently and is more + // than the service can handle. + ErrCodeSlowDownException = "SlowDownException" + + // ErrCodeUnauthorizedClientException for service response error code + // "UnauthorizedClientException". + // + // Indicates that the client is not currently authorized to make the request. + // This can happen when a clientId is not issued for a public client. + ErrCodeUnauthorizedClientException = "UnauthorizedClientException" + + // ErrCodeUnsupportedGrantTypeException for service response error code + // "UnsupportedGrantTypeException". + // + // Indicates that the grant type in the request is not supported by the service. + ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AuthorizationPendingException": newErrorAuthorizationPendingException, + "ExpiredTokenException": newErrorExpiredTokenException, + "InternalServerException": newErrorInternalServerException, + "InvalidClientException": newErrorInvalidClientException, + "InvalidClientMetadataException": newErrorInvalidClientMetadataException, + "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRequestException": newErrorInvalidRequestException, + "InvalidScopeException": newErrorInvalidScopeException, + "SlowDownException": newErrorSlowDownException, + "UnauthorizedClientException": newErrorUnauthorizedClientException, + "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go new file mode 100644 index 00000000000..969f33c37b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSOOIDC provides the API operation methods for making requests to +// AWS SSO OIDC. See this package's package overview docs +// for details on the service. +// +// SSOOIDC methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSOOIDC struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO OIDC" // Name of service. + EndpointsID = "oidc" // ID to lookup a service endpoint with. + ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSOOIDC client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSOOIDC client from just a session. +// svc := ssooidc.New(mySession) +// +// // Create a SSOOIDC client with additional configuration +// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssooidc" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC { + svc := &SSOOIDC{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSOOIDC operation and runs any +// custom request initialization. +func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 7ac6b93f442..11af63b4d8b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1460,6 +1460,9 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` + // Reserved for future use. + ProvidedContexts []*ProvidedContext `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1633,6 +1636,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.ProvidedContexts != nil { + for i, v := range s.ProvidedContexts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1674,6 +1687,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn return s } +// SetProvidedContexts sets the ProvidedContexts field's value. +func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput { + s.ProvidedContexts = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -2266,7 +2285,8 @@ type AssumeRoleWithWebIdentityInput struct { // The OAuth 2.0 access token or OpenID Connect ID token that is provided by // the identity provider. Your application must get this token by authenticating // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. + // the application makes an AssumeRoleWithWebIdentity call. Only tokens with + // RSA algorithms (RS256) are supported. // // WebIdentityToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's @@ -3385,6 +3405,63 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { return s } +// Reserved for future use. +type ProvidedContext struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + ContextAssertion *string `min:"4" type:"string"` + + // Reserved for future use. + ProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvidedContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"} + if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4)) + } + if s.ProviderArn != nil && len(*s.ProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextAssertion sets the ContextAssertion field's value. +func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext { + s.ContextAssertion = &v + return s +} + +// SetProviderArn sets the ProviderArn field's value. +func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext { + s.ProviderArn = &v + return s +} + // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags // to control access to resources. For more information, see Tagging Amazon diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go index 317de76fc10..a67510c1c6d 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/appsTransport.go @@ -106,6 +106,11 @@ func (t *AppsTransport) RoundTrip(req *http.Request) (*http.Response, error) { return resp, err } +// AppID returns the appID of the transport +func (t *AppsTransport) AppID() int64 { + return t.appID +} + type AppsTransportOption func(*AppsTransport) // WithSigner configures the AppsTransport to use the given Signer for generating JWT tokens. diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go index 015ebe4b656..84012a00dfc 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go @@ -191,6 +191,16 @@ func (t *Transport) Expiry() (expiresAt time.Time, refreshAt time.Time, err erro return t.token.ExpiresAt, t.token.getRefreshTime(), nil } +// AppID returns the app ID associated with the transport +func (t *Transport) AppID() int64 { + return t.appID +} + +// InstallationID returns the installation ID associated with the transport +func (t *Transport) InstallationID() int64 { + return t.installationID +} + func (t *Transport) refreshToken(ctx context.Context) error { // Convert InstallationTokenOptions into a ReadWriter to pass as an argument to http.NewRequest. body, err := GetReadWriter(t.InstallationTokenOptions) diff --git a/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index dc32dadbdad..1c018920e89 100644 --- a/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -16,6 +16,40 @@ package bean +import ( + "encoding/base64" + "encoding/json" + "github.com/docker/cli/cli/config/types" +) + const ( - YamlSeparator string = "---\n" + YamlSeparator string = "---\n" + RegistryTypeGcr = "gcr" + RegistryTypeEcr = "ecr" + GcrRegistryUsername = "oauth2accesstoken" + GcrRegistryScope = "https://www.googleapis.com/auth/cloud-platform" ) + +type DockerAuthConfig struct { + RegistryType string // can be ecr, gcr, docker-hub, harbor etc. + Username string + Password string + AccessKeyEcr string // used for pulling from private ecr registry + SecretAccessKeyEcr string // used for pulling from private ecr registry + EcrRegion string // used for pulling from private ecr registry + CredentialFileJsonGcr string // used for pulling from private gcr registry + IsRegistryPrivate bool +} + +func (r *DockerAuthConfig) GetEncodedRegistryAuth() (string, error) { + // Create and encode the auth config + authConfig := types.AuthConfig{ + Username: r.Username, + Password: r.Password, + } + encodedJSON, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(encodedJSON), nil +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 00000000000..483743c9921 --- /dev/null +++ b/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,852 @@ +# File @generated by scripts/docs/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See scripts/docs/generate-authors.sh to make modifications. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Abreto FU +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akhil Mohan +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Alberto Roura +Albin Kerouanton +Aleksa Sarai +Aleksander Piotrowski +Alessandro Boch +Alex Couture-Beil +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alexey Igrychev +Alexis Couvreur +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anca Iordache +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andres G. Aragoneses +Andres Leon Rangel +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +Andrii Berehuliak +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Arko Dasgupta +Arnaud Porterie +Arnaud Rebillout +Arthur Peka +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bodenmiller +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benjamin Böhmke +Benjamin Nater +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Bishal Das +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Brad Baker +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Brian Wieder +Bruno Sousa +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +Charlotte Mach +ChaYoung You +Chee Hau Lim +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Couzens +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Vermilion +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christopher Svensson +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Conner Crosby +Corey Farrell +Corey Quon +Cory Bennet +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +Daisuke Ito +dalanlan +Damien Nadé +Dan Cotora +Daniel Artine +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Daniil Nikolenko +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Alvarez +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Karlsson +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Des Preston +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Djordje Lukic +Dmitriy Fishman +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dominik Braun +Don Kjer +Dong Chen +DongGeon Lee +Doug Davis +Drew Erny +Ed Costello +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eric Curtin +Eric Engestrom +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik Humphrey +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Geyer +Felix Hupfeld +Felix Rabe +fezzik1620 +Filip Jareš +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Gore +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Gio d'Amelio +Gleb Stsenov +Goksu Toprak +Gou Rao +Govind Rai +Grant Reaber +Greg Pflaum +Gsealy +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +Guillaume Tardif +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Hector S +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +Hugo Gabriel Eyherabide +huqun +Huu Nguyen +Hyzhou Zhy +Iain Samuel McLean Elder +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Grund +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Hall +Jason Heiss +Jason Plum +Jay Kamat +Jean Lecordier +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jennings Zhang +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jim Lin +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Abbey +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard +John Howard +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jon Johnson +Jon Zeolla +Jonatas Baldin +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonathan Warriss-Simmons +Jonh Wendell +Jordan Jennings +Jorge Vallecillo +Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Kelton Bassingthwaite +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Alvarez +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +Kevin Woblick +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Kyle Mitofsky +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lee Gaines +Lei Jitang +Lennie +Leo Gallucci +Leonid Skorospelov +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lifubang +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukas Heeren +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Maciej Kalisz +Madhav Puri +Madhu Venugopal +Madhur Batra +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marc Cornellà +Marco Mariani +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Ileana +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathieu Champlon +Mathieu Rollet +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Mauro Porras P +Max Shytikov +Maxime Petazzoni +Maximillian Fan Xavier +Mei ChunTao +Metal <2466052+tedhexaflow@users.noreply.github.com> +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Dalton +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Miroslav Gula +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Morten Hekkvang +Morten Linderud +Moysés Borges +Mozi <29089388+pzhlkj6612@users.noreply.github.com> +Mrunal Patel +muicoder +Murukesh Mohanan +Muthukumar R +Máximo Cuadros +Mårten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nick Santos +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +Odin Ugedal +ohmystack +OKA Naoya +Oliver Pomeroy +Olle Jonsson +Olli Janatuinen +Oscar Wieman +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Mulders +Paul Weaver +Pavel Pospisil +Paweł Gronowski +Paweł Pokrywka +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Dave Hello +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +Phong Tran +pidster +Pieter E Smit +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Rahul Kadyan +Rahul Zoldyck +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Chen Zheng <58443436+rchenzheng@users.noreply.github.com> +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Roch Feuillade +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Sam Thibault +Samarth Shah +Sambuddha Basu +Sami Tabet +Samuel Cochran +Samuel Karp +Sandro Jäckel +Santhosh Manohar +Sargun Dhillon +Saswat Bhattacharya +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Simon Heimberg +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +Spring Lee +squeegels +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Stoica-Marcu Floris-Andrei +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Takeshi Koenuma +Takuya Noguchi +Taylor Jones +Teiva Harsanyi +Tejaswini Duggaraju +Tengfei Wang +Teppei Fukuda +Thatcher Peskens +Thibault Coupin +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Sampson +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Bäckman +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulrich Bareth +Ulysses Souza +Umesh Yadav +Valentin Lorentz +Vardan Pogosian +Venkateswara Reddy Bukkasamudram +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zeel B Patel +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Álex González +Álvaro Lázaro +Átila Camurça Alves +Александр Менщиков <__Singleton__@hackerdom.ru> +徐俊杰 diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 00000000000..9c8e20ab85c --- /dev/null +++ b/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 00000000000..58b19b6d15b --- /dev/null +++ b/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 00000000000..056af6b8425 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/google/go-github/v53/github/actions_runners.go b/vendor/github.com/google/go-github/v53/github/actions_runners.go index 40c6be3a92c..3990a5a90f7 100644 --- a/vendor/github.com/google/go-github/v53/github/actions_runners.go +++ b/vendor/github.com/google/go-github/v53/github/actions_runners.go @@ -45,6 +45,60 @@ func (s *ActionsService) ListRunnerApplicationDownloads(ctx context.Context, own return rads, resp, nil } +// GenerateJITConfigRequest specifies body parameters to GenerateRepoJITConfig. +type GenerateJITConfigRequest struct { + Name string `json:"name"` + RunnerGroupID int64 `json:"runner_group_id"` + WorkFolder *string `json:"work_folder,omitempty"` + + // Labels represents the names of the custom labels to add to the runner. + // Minimum items: 1. Maximum items: 100. + Labels []string `json:"labels"` +} + +// JITRunnerConfig represents encoded JIT configuration that can be used to bootstrap a self-hosted runner. +type JITRunnerConfig struct { + EncodedJITConfig *string `json:"encoded_jit_config,omitempty"` +} + +// GenerateOrgJITConfig generate a just-in-time configuration for an organization. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-configuration-for-a-just-in-time-runner-for-an-organization +func (s *ActionsService) GenerateOrgJITConfig(ctx context.Context, owner string, request *GenerateJITConfigRequest) (*JITRunnerConfig, *Response, error) { + u := fmt.Sprintf("orgs/%v/actions/runners/generate-jitconfig", owner) + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, nil, err + } + + jitConfig := new(JITRunnerConfig) + resp, err := s.client.Do(ctx, req, jitConfig) + if err != nil { + return nil, resp, err + } + + return jitConfig, resp, nil +} + +// GenerateRepoJITConfig generates a just-in-time configuration for a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners?apiVersion=2022-11-28#create-configuration-for-a-just-in-time-runner-for-a-repository +func (s *ActionsService) GenerateRepoJITConfig(ctx context.Context, owner, repo string, request *GenerateJITConfigRequest) (*JITRunnerConfig, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/runners/generate-jitconfig", owner, repo) + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, nil, err + } + + jitConfig := new(JITRunnerConfig) + resp, err := s.client.Do(ctx, req, jitConfig) + if err != nil { + return nil, resp, err + } + + return jitConfig, resp, nil +} + // RegistrationToken represents a token that can be used to add a self-hosted runner to a repository. type RegistrationToken struct { Token *string `json:"token,omitempty"` diff --git a/vendor/github.com/google/go-github/v53/github/code-scanning.go b/vendor/github.com/google/go-github/v53/github/code-scanning.go index 6717348ed73..e4a6abeba37 100644 --- a/vendor/github.com/google/go-github/v53/github/code-scanning.go +++ b/vendor/github.com/google/go-github/v53/github/code-scanning.go @@ -378,3 +378,76 @@ func (s *CodeScanningService) GetAnalysis(ctx context.Context, owner, repo strin return analysis, resp, nil } + +// DefaultSetupConfiguration represents a code scanning default setup configuration. +type DefaultSetupConfiguration struct { + State *string `json:"state,omitempty"` + Languages []string `json:"languages,omitempty"` + QuerySuite *string `json:"query_suite,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` +} + +// GetDefaultSetupConfiguration gets a code scanning default setup configuration. +// +// You must use an access token with the repo scope to use this +// endpoint with private repos or the public_repo scope for public repos. GitHub Apps must have the repo write +// permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#get-a-code-scanning-default-setup-configuration +func (s *CodeScanningService) GetDefaultSetupConfiguration(ctx context.Context, owner, repo string) (*DefaultSetupConfiguration, *Response, error) { + u := fmt.Sprintf("repos/%s/%s/code-scanning/default-setup", owner, repo) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + cfg := new(DefaultSetupConfiguration) + resp, err := s.client.Do(ctx, req, cfg) + if err != nil { + return nil, resp, err + } + + return cfg, resp, nil +} + +// UpdateDefaultSetupConfigurationOptions specifies parameters to the CodeScanningService.UpdateDefaultSetupConfiguration +// method. +type UpdateDefaultSetupConfigurationOptions struct { + State string `json:"state"` + QuerySuite *string `json:"query_suite,omitempty"` + Languages []string `json:"languages,omitempty"` +} + +// UpdateDefaultSetupConfigurationResponse represents a response from updating a code scanning default setup configuration. +type UpdateDefaultSetupConfigurationResponse struct { + RunID *int64 `json:"run_id,omitempty"` + RunURL *string `json:"run_url,omitempty"` +} + +// UpdateDefaultSetupConfiguration updates a code scanning default setup configuration. +// +// You must use an access token with the repo scope to use this +// endpoint with private repos or the public_repo scope for public repos. GitHub Apps must have the repo write +// permission to use this endpoint. +// +// This method might return an AcceptedError and a status code of 202. This is because this is the status that GitHub +// returns to signify that it has now scheduled the update of the pull request branch in a background task. +// +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#update-a-code-scanning-default-setup-configuration +func (s *CodeScanningService) UpdateDefaultSetupConfiguration(ctx context.Context, owner, repo string, options *UpdateDefaultSetupConfigurationOptions) (*UpdateDefaultSetupConfigurationResponse, *Response, error) { + u := fmt.Sprintf("repos/%s/%s/code-scanning/default-setup", owner, repo) + + req, err := s.client.NewRequest("PATCH", u, options) + if err != nil { + return nil, nil, err + } + + a := new(UpdateDefaultSetupConfigurationResponse) + resp, err := s.client.Do(ctx, req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, nil +} diff --git a/vendor/github.com/google/go-github/v53/github/codespaces.go b/vendor/github.com/google/go-github/v53/github/codespaces.go new file mode 100644 index 00000000000..a260c227de7 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/codespaces.go @@ -0,0 +1,254 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// CodespacesService handles communication with the Codespaces related +// methods of the GitHub API. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/ +type CodespacesService service + +// Codespace represents a codespace. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces +type Codespace struct { + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + DisplayName *string `json:"display_name,omitempty"` + EnvironmentID *string `json:"environment_id,omitempty"` + Owner *User `json:"owner,omitempty"` + BillableOwner *User `json:"billable_owner,omitempty"` + Repository *Repository `json:"repository,omitempty"` + Machine *CodespacesMachine `json:"machine,omitempty"` + DevcontainerPath *string `json:"devcontainer_path,omitempty"` + Prebuild *bool `json:"prebuild,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + LastUsedAt *Timestamp `json:"last_used_at,omitempty"` + State *string `json:"state,omitempty"` + URL *string `json:"url,omitempty"` + GitStatus *CodespacesGitStatus `json:"git_status,omitempty"` + Location *string `json:"location,omitempty"` + IdleTimeoutMinutes *int `json:"idle_timeout_minutes,omitempty"` + WebURL *string `json:"web_url,omitempty"` + MachinesURL *string `json:"machines_url,omitempty"` + StartURL *string `json:"start_url,omitempty"` + StopURL *string `json:"stop_url,omitempty"` + PullsURL *string `json:"pulls_url,omitempty"` + RecentFolders []string `json:"recent_folders,omitempty"` + RuntimeConstraints *CodespacesRuntimeConstraints `json:"runtime_constraints,omitempty"` + PendingOperation *bool `json:"pending_operation,omitempty"` + PendingOperationDisabledReason *string `json:"pending_operation_disabled_reason,omitempty"` + IdleTimeoutNotice *string `json:"idle_timeout_notice,omitempty"` + RetentionPeriodMinutes *int `json:"retention_period_minutes,omitempty"` + RetentionExpiresAt *Timestamp `json:"retention_expires_at,omitempty"` + LastKnownStopNotice *string `json:"last_known_stop_notice,omitempty"` +} + +// CodespacesGitStatus represents the git status of a codespace. +type CodespacesGitStatus struct { + Ahead *int `json:"ahead,omitempty"` + Behind *int `json:"behind,omitempty"` + HasUnpushedChanges *bool `json:"has_unpushed_changes,omitempty"` + HasUncommittedChanges *bool `json:"has_uncommitted_changes,omitempty"` + Ref *string `json:"ref,omitempty"` +} + +// CodespacesMachine represents the machine type of a codespace. +type CodespacesMachine struct { + Name *string `json:"name,omitempty"` + DisplayName *string `json:"display_name,omitempty"` + OperatingSystem *string `json:"operating_system,omitempty"` + StorageInBytes *int64 `json:"storage_in_bytes,omitempty"` + MemoryInBytes *int64 `json:"memory_in_bytes,omitempty"` + CPUs *int `json:"cpus,omitempty"` + PrebuildAvailability *string `json:"prebuild_availability,omitempty"` +} + +// CodespacesRuntimeConstraints represents the runtime constraints of a codespace. +type CodespacesRuntimeConstraints struct { + AllowedPortPrivacySettings []string `json:"allowed_port_privacy_settings,omitempty"` +} + +// ListCodespaces represents the response from the list codespaces endpoints. +type ListCodespaces struct { + TotalCount *int `json:"total_count,omitempty"` + Codespaces []*Codespace `json:"codespaces"` +} + +// ListInRepo lists codespaces for a user in a repository. +// +// Lists the codespaces associated with a specified repository and the authenticated user. +// You must authenticate using an access token with the codespace scope to use this endpoint. +// GitHub Apps must have read access to the codespaces repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#list-codespaces-in-a-repository-for-the-authenticated-user +func (s *CodespacesService) ListInRepo(ctx context.Context, owner, repo string, opts *ListOptions) (*ListCodespaces, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/codespaces", owner, repo) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var codespaces *ListCodespaces + resp, err := s.client.Do(ctx, req, &codespaces) + if err != nil { + return nil, resp, err + } + + return codespaces, resp, nil +} + +// ListOptions represents the options for listing codespaces for a user. +type ListCodespacesOptions struct { + ListOptions + RepositoryID int64 `url:"repository_id,omitempty"` +} + +// List lists codespaces for an authenticated user. +// +// Lists the authenticated user's codespaces. +// You must authenticate using an access token with the codespace scope to use this endpoint. +// GitHub Apps must have read access to the codespaces repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#list-codespaces-for-the-authenticated-user +func (s *CodespacesService) List(ctx context.Context, opts *ListCodespacesOptions) (*ListCodespaces, *Response, error) { + u := fmt.Sprint("user/codespaces") + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var codespaces *ListCodespaces + resp, err := s.client.Do(ctx, req, &codespaces) + if err != nil { + return nil, resp, err + } + + return codespaces, resp, nil +} + +// CreateCodespaceOptions represents options for the creation of a codespace in a repository. +type CreateCodespaceOptions struct { + Ref *string `json:"ref,omitempty"` + // Geo represents the geographic area for this codespace. + // If not specified, the value is assigned by IP. + // This property replaces location, which is being deprecated. + // Geo can be one of: `EuropeWest`, `SoutheastAsia`, `UsEast`, `UsWest`. + Geo *string `json:"geo,omitempty"` + ClientIP *string `json:"client_ip,omitempty"` + Machine *string `json:"machine,omitempty"` + DevcontainerPath *string `json:"devcontainer_path,omitempty"` + MultiRepoPermissionsOptOut *bool `json:"multi_repo_permissions_opt_out,omitempty"` + WorkingDirectory *string `json:"working_directory,omitempty"` + IdleTimeoutMinutes *int `json:"idle_timeout_minutes,omitempty"` + DisplayName *string `json:"display_name,omitempty"` + // RetentionPeriodMinutes represents the duration in minutes after codespace has gone idle in which it will be deleted. + // Must be integer minutes between 0 and 43200 (30 days). + RetentionPeriodMinutes *int `json:"retention_period_minutes,omitempty"` +} + +// CreateInRepo creates a codespace in a repository. +// +// Creates a codespace owned by the authenticated user in the specified repository. +// You must authenticate using an access token with the codespace scope to use this endpoint. +// GitHub Apps must have write access to the codespaces repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#create-a-codespace-in-a-repository +func (s *CodespacesService) CreateInRepo(ctx context.Context, owner, repo string, request *CreateCodespaceOptions) (*Codespace, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/codespaces", owner, repo) + + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, nil, err + } + + var codespace *Codespace + resp, err := s.client.Do(ctx, req, &codespace) + if err != nil { + return nil, resp, err + } + + return codespace, resp, nil +} + +// Start starts a codespace. +// +// You must authenticate using an access token with the codespace scope to use this endpoint. +// GitHub Apps must have write access to the codespaces_lifecycle_admin repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#start-a-codespace-for-the-authenticated-user +func (s *CodespacesService) Start(ctx context.Context, codespaceName string) (*Codespace, *Response, error) { + u := fmt.Sprintf("user/codespaces/%v/start", codespaceName) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, nil, err + } + + var codespace *Codespace + resp, err := s.client.Do(ctx, req, &codespace) + if err != nil { + return nil, resp, err + } + + return codespace, resp, nil +} + +// Stop stops a codespace. +// +// You must authenticate using an access token with the codespace scope to use this endpoint. +// GitHub Apps must have write access to the codespaces_lifecycle_admin repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#stop-a-codespace-for-the-authenticated-user +func (s *CodespacesService) Stop(ctx context.Context, codespaceName string) (*Codespace, *Response, error) { + u := fmt.Sprintf("user/codespaces/%v/stop", codespaceName) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, nil, err + } + + var codespace *Codespace + resp, err := s.client.Do(ctx, req, &codespace) + if err != nil { + return nil, resp, err + } + + return codespace, resp, nil +} + +// Delete deletes a codespace. +// +// You must authenticate using an access token with the codespace scope to use this endpoint. +// GitHub Apps must have write access to the codespaces repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/codespaces?apiVersion=2022-11-28#delete-a-codespace-for-the-authenticated-user +func (s *CodespacesService) Delete(ctx context.Context, codespaceName string) (*Response, error) { + u := fmt.Sprintf("user/codespaces/%v", codespaceName) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go b/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go new file mode 100644 index 00000000000..e11c679c668 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/codespaces_secrets.go @@ -0,0 +1,405 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// ListUserSecrets list all secrets available for a users codespace +// +// Lists all secrets available for a user's Codespaces without revealing their encrypted values +// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint +// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#list-secrets-for-the-authenticated-user +func (s *CodespacesService) ListUserSecrets(ctx context.Context, opts *ListOptions) (*Secrets, *Response, error) { + u, err := addOptions("user/codespaces/secrets", opts) + if err != nil { + return nil, nil, err + } + return s.listSecrets(ctx, u) +} + +// ListOrgSecrets list all secrets available to an org +// +// Lists all Codespaces secrets available at the organization-level without revealing their encrypted values. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#list-organization-secrets +func (s *CodespacesService) ListOrgSecrets(ctx context.Context, org string, opts *ListOptions) (*Secrets, *Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets", org) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + return s.listSecrets(ctx, u) +} + +// ListRepoSecrets list all secrets available to a repo +// +// Lists all secrets available in a repository without revealing their encrypted values. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#list-repository-secrets +func (s *CodespacesService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/codespaces/secrets", owner, repo) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + return s.listSecrets(ctx, u) +} + +func (s *CodespacesService) listSecrets(ctx context.Context, url string) (*Secrets, *Response, error) { + req, err := s.client.NewRequest("GET", url, nil) + if err != nil { + return nil, nil, err + } + + var secrets *Secrets + resp, err := s.client.Do(ctx, req, &secrets) + if err != nil { + return nil, resp, err + } + + return secrets, resp, nil +} + +// GetUserPublicKey gets the users public key for encrypting codespace secrets +// +// Gets your public key, which you need to encrypt secrets. You need to encrypt a secret before you can create or update secrets. +// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. +// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#get-public-key-for-the-authenticated-user +func (s *CodespacesService) GetUserPublicKey(ctx context.Context) (*PublicKey, *Response, error) { + return s.getPublicKey(ctx, "user/codespaces/secrets/public-key") +} + +// GetOrgPublicKey gets the org public key for encrypting codespace secrets +// +// Gets a public key for an organization, which is required in order to encrypt secrets. You need to encrypt the value of a secret before you can create or update secrets. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#get-an-organization-public-key +func (s *CodespacesService) GetOrgPublicKey(ctx context.Context, org string) (*PublicKey, *Response, error) { + return s.getPublicKey(ctx, fmt.Sprintf("orgs/%v/codespaces/secrets/public-key", org)) +} + +// GetRepoPublicKey gets the repo public key for encrypting codespace secrets +// +// Gets your public key, which you need to encrypt secrets. You need to encrypt a secret before you can create or update secrets. Anyone with read access to the repository can use this endpoint. If the repository is private you must use an access token with the repo scope. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#get-a-repository-public-key +func (s *CodespacesService) GetRepoPublicKey(ctx context.Context, owner, repo string) (*PublicKey, *Response, error) { + return s.getPublicKey(ctx, fmt.Sprintf("repos/%v/%v/codespaces/secrets/public-key", owner, repo)) +} + +func (s *CodespacesService) getPublicKey(ctx context.Context, url string) (*PublicKey, *Response, error) { + req, err := s.client.NewRequest("GET", url, nil) + if err != nil { + return nil, nil, err + } + + var publicKey *PublicKey + resp, err := s.client.Do(ctx, req, &publicKey) + if err != nil { + return nil, resp, err + } + + return publicKey, resp, nil +} + +// GetUserSecret gets a users codespace secret +// +// Gets a secret available to a user's codespaces without revealing its encrypted value. +// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. +// GitHub Apps must have read access to the codespaces_user_secrets user permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#get-a-secret-for-the-authenticated-user +func (s *CodespacesService) GetUserSecret(ctx context.Context, name string) (*Secret, *Response, error) { + u := fmt.Sprintf("user/codespaces/secrets/%v", name) + return s.getSecret(ctx, u) +} + +// GetOrgSecret gets an org codespace secret +// +// Gets an organization secret without revealing its encrypted value. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#get-an-organization-secret +func (s *CodespacesService) GetOrgSecret(ctx context.Context, org, name string) (*Secret, *Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, name) + return s.getSecret(ctx, u) +} + +// GetRepoSecret gets a repo codespace secret +// +// Gets a single repository secret without revealing its encrypted value. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#get-a-repository-secret +func (s *CodespacesService) GetRepoSecret(ctx context.Context, owner, repo, name string) (*Secret, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, name) + return s.getSecret(ctx, u) +} + +func (s *CodespacesService) getSecret(ctx context.Context, url string) (*Secret, *Response, error) { + req, err := s.client.NewRequest("GET", url, nil) + if err != nil { + return nil, nil, err + } + + var secret *Secret + resp, err := s.client.Do(ctx, req, &secret) + if err != nil { + return nil, resp, err + } + + return secret, resp, nil +} + +// CreateOrUpdateUserSecret creates or updates a users codespace secret +// +// Creates or updates a secret for a user's codespace with an encrypted value. Encrypt your secret using LibSodium. +// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must also have Codespaces access to use this endpoint. +// GitHub Apps must have write access to the codespaces_user_secrets user permission and codespaces_secrets repository permission on all referenced repositories to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#create-or-update-a-secret-for-the-authenticated-user +func (s *CodespacesService) CreateOrUpdateUserSecret(ctx context.Context, eSecret *EncryptedSecret) (*Response, error) { + u := fmt.Sprintf("user/codespaces/secrets/%v", eSecret.Name) + return s.createOrUpdateSecret(ctx, u, eSecret) +} + +// CreateOrUpdateOrgSecret creates or updates an orgs codespace secret +// +// Creates or updates an organization secret with an encrypted value. Encrypt your secret using LibSodium. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#create-or-update-an-organization-secret +func (s *CodespacesService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *EncryptedSecret) (*Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, eSecret.Name) + return s.createOrUpdateSecret(ctx, u, eSecret) +} + +// CreateOrUpdateRepoSecret creates or updates a repos codespace secret +// +// Creates or updates a repository secret with an encrypted value. Encrypt your secret using LibSodium. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#create-or-update-a-repository-secret +func (s *CodespacesService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *EncryptedSecret) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, eSecret.Name) + return s.createOrUpdateSecret(ctx, u, eSecret) +} + +func (s *CodespacesService) createOrUpdateSecret(ctx context.Context, url string, eSecret *EncryptedSecret) (*Response, error) { + req, err := s.client.NewRequest("PUT", url, eSecret) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// DeleteUserSecret deletes a users codespace secret +// +// Deletes a secret from a user's codespaces using the secret name. Deleting the secret will remove access from all codespaces that were allowed to access the secret. +// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. +// GitHub Apps must have write access to the codespaces_user_secrets user permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#delete-a-secret-for-the-authenticated-user +func (s *CodespacesService) DeleteUserSecret(ctx context.Context, name string) (*Response, error) { + u := fmt.Sprintf("user/codespaces/secrets/%v", name) + return s.deleteSecret(ctx, u) +} + +// DeleteOrgSecret deletes an orgs codespace secret +// +// Deletes an organization secret using the secret name. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#delete-an-organization-secret +func (s *CodespacesService) DeleteOrgSecret(ctx context.Context, org, name string) (*Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v", org, name) + return s.deleteSecret(ctx, u) +} + +// DeleteRepoSecret deletes a repos codespace secret +// +// Deletes a secret in a repository using the secret name. You must authenticate using an access token with the repo scope to use this endpoint. GitHub Apps must have write access to the codespaces_secrets repository permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/repository-secrets?apiVersion=2022-11-28#delete-a-repository-secret +func (s *CodespacesService) DeleteRepoSecret(ctx context.Context, owner, repo, name string) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/codespaces/secrets/%v", owner, repo, name) + return s.deleteSecret(ctx, u) +} + +func (s *CodespacesService) deleteSecret(ctx context.Context, url string) (*Response, error) { + req, err := s.client.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// ListSelectedReposForUserSecret lists the repositories that have been granted the ability to use a user's codespace secret. +// +// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. +// GitHub Apps must have read access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on all referenced repositories to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#list-selected-repositories-for-a-user-secret +func (s *CodespacesService) ListSelectedReposForUserSecret(ctx context.Context, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { + u := fmt.Sprintf("user/codespaces/secrets/%v/repositories", name) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + return s.listSelectedReposForSecret(ctx, u) +} + +// ListSelectedReposForOrgSecret lists the repositories that have been granted the ability to use an organization's codespace secret. +// +// Lists all repositories that have been selected when the visibility for repository access to a secret is set to selected. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#list-selected-repositories-for-an-organization-secret +func (s *CodespacesService) ListSelectedReposForOrgSecret(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories", org, name) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + return s.listSelectedReposForSecret(ctx, u) +} + +func (s *CodespacesService) listSelectedReposForSecret(ctx context.Context, url string) (*SelectedReposList, *Response, error) { + req, err := s.client.NewRequest("GET", url, nil) + if err != nil { + return nil, nil, err + } + + var repositories *SelectedReposList + resp, err := s.client.Do(ctx, req, &repositories) + if err != nil { + return nil, resp, err + } + + return repositories, resp, nil +} + +// SetSelectedReposForUserSecret sets the repositories that have been granted the ability to use a user's codespace secret. +// +// You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. +// GitHub Apps must have write access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on all referenced repositories to use this endpoint. +// +// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#set-selected-repositories-for-a-user-secret +func (s *CodespacesService) SetSelectedReposForUserSecret(ctx context.Context, name string, ids SelectedRepoIDs) (*Response, error) { + u := fmt.Sprintf("user/codespaces/secrets/%v/repositories", name) + return s.setSelectedRepoForSecret(ctx, u, ids) +} + +// SetSelectedReposForOrgSecret sets the repositories that have been granted the ability to use a user's codespace secret. +// +// Replaces all repositories for an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#set-selected-repositories-for-a-user-secret +func (s *CodespacesService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories", org, name) + return s.setSelectedRepoForSecret(ctx, u, ids) +} + +func (s *CodespacesService) setSelectedRepoForSecret(ctx context.Context, url string, ids SelectedRepoIDs) (*Response, error) { + type repoIDs struct { + SelectedIDs SelectedRepoIDs `json:"selected_repository_ids"` + } + + req, err := s.client.NewRequest("PUT", url, repoIDs{SelectedIDs: ids}) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// AddSelectedRepoToUserSecret adds a repository to the list of repositories that have been granted the ability to use a user's codespace secret. +// +// Adds a repository to the selected repositories for a user's codespace secret. You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. GitHub Apps must have write access to the codespaces_user_secrets user permission and write access to the codespaces_secrets repository permission on the referenced repository to use this endpoint. +// +// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#add-a-selected-repository-to-a-user-secret +func (s *CodespacesService) AddSelectedRepoToUserSecret(ctx context.Context, name string, repo *Repository) (*Response, error) { + u := fmt.Sprintf("user/codespaces/secrets/%v/repositories/%v", name, *repo.ID) + return s.addSelectedRepoToSecret(ctx, u) +} + +// AddSelectedRepoToOrgSecret adds a repository to the list of repositories that have been granted the ability to use an organization's codespace secret. +// +// Adds a repository to an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// Github API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#add-selected-repository-to-an-organization-secret +func (s *CodespacesService) AddSelectedRepoToOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories/%v", org, name, *repo.ID) + return s.addSelectedRepoToSecret(ctx, u) +} + +func (s *CodespacesService) addSelectedRepoToSecret(ctx context.Context, url string) (*Response, error) { + req, err := s.client.NewRequest("PUT", url, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// RemoveSelectedRepoFromUserSecret removes a repository from the list of repositories that have been granted the ability to use a user's codespace secret. +// +// Removes a repository from the selected repositories for a user's codespace secret. You must authenticate using an access token with the codespace or codespace:secrets scope to use this endpoint. User must have Codespaces access to use this endpoint. GitHub Apps must have write access to the codespaces_user_secrets user permission to use this endpoint. +// +// Github API docs: https://docs.github.com/en/rest/codespaces/secrets?apiVersion=2022-11-28#remove-a-selected-repository-from-a-user-secret +func (s *CodespacesService) RemoveSelectedRepoFromUserSecret(ctx context.Context, name string, repo *Repository) (*Response, error) { + u := fmt.Sprintf("user/codespaces/secrets/%v/repositories/%v", name, *repo.ID) + return s.removeSelectedRepoFromSecret(ctx, u) +} + +// RemoveSelectedRepoFromOrgSecret removes a repository from the list of repositories that have been granted the ability to use an organization's codespace secret. +// +// Removes a repository from an organization secret when the visibility for repository access is set to selected. The visibility is set when you Create or update an organization secret. You must authenticate using an access token with the admin:org scope to use this endpoint. +// +// Github API docs: https://docs.github.com/en/rest/codespaces/organization-secrets?apiVersion=2022-11-28#remove-selected-repository-from-an-organization-secret +func (s *CodespacesService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { + u := fmt.Sprintf("orgs/%v/codespaces/secrets/%v/repositories/%v", org, name, *repo.ID) + return s.removeSelectedRepoFromSecret(ctx, u) +} + +func (s *CodespacesService) removeSelectedRepoFromSecret(ctx context.Context, url string) (*Response, error) { + req, err := s.client.NewRequest("DELETE", url, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} diff --git a/vendor/github.com/google/go-github/v53/github/event.go b/vendor/github.com/google/go-github/v53/github/event.go index 20907a99321..4ee25603a83 100644 --- a/vendor/github.com/google/go-github/v53/github/event.go +++ b/vendor/github.com/google/go-github/v53/github/event.go @@ -127,6 +127,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) { payload = &RepositoryVulnerabilityAlertEvent{} case "SecretScanningAlertEvent": payload = &SecretScanningAlertEvent{} + case "SecurityAdvisoryEvent": + payload = &SecurityAdvisoryEvent{} case "StarEvent": payload = &StarEvent{} case "StatusEvent": diff --git a/vendor/github.com/google/go-github/v53/github/github-accessors.go b/vendor/github.com/google/go-github/v53/github/github-accessors.go index a9aaee814ab..8acb72b0420 100644 --- a/vendor/github.com/google/go-github/v53/github/github-accessors.go +++ b/vendor/github.com/google/go-github/v53/github/github-accessors.go @@ -2222,6 +2222,22 @@ func (b *BranchProtectionRuleEvent) GetSender() *User { return b.Sender } +// GetActorID returns the ActorID field if it's non-nil, zero value otherwise. +func (b *BypassActor) GetActorID() int64 { + if b == nil || b.ActorID == nil { + return 0 + } + return *b.ActorID +} + +// GetActorType returns the ActorType field if it's non-nil, zero value otherwise. +func (b *BypassActor) GetActorType() string { + if b == nil || b.ActorType == nil { + return "" + } + return *b.ActorType +} + // GetApp returns the App field. func (c *CheckRun) GetApp() *App { if c == nil { @@ -2886,6 +2902,342 @@ func (c *CodeSearchResult) GetTotal() int { return *c.Total } +// GetBillableOwner returns the BillableOwner field. +func (c *Codespace) GetBillableOwner() *User { + if c == nil { + return nil + } + return c.BillableOwner +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (c *Codespace) GetCreatedAt() Timestamp { + if c == nil || c.CreatedAt == nil { + return Timestamp{} + } + return *c.CreatedAt +} + +// GetDevcontainerPath returns the DevcontainerPath field if it's non-nil, zero value otherwise. +func (c *Codespace) GetDevcontainerPath() string { + if c == nil || c.DevcontainerPath == nil { + return "" + } + return *c.DevcontainerPath +} + +// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. +func (c *Codespace) GetDisplayName() string { + if c == nil || c.DisplayName == nil { + return "" + } + return *c.DisplayName +} + +// GetEnvironmentID returns the EnvironmentID field if it's non-nil, zero value otherwise. +func (c *Codespace) GetEnvironmentID() string { + if c == nil || c.EnvironmentID == nil { + return "" + } + return *c.EnvironmentID +} + +// GetGitStatus returns the GitStatus field. +func (c *Codespace) GetGitStatus() *CodespacesGitStatus { + if c == nil { + return nil + } + return c.GitStatus +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (c *Codespace) GetID() int64 { + if c == nil || c.ID == nil { + return 0 + } + return *c.ID +} + +// GetIdleTimeoutMinutes returns the IdleTimeoutMinutes field if it's non-nil, zero value otherwise. +func (c *Codespace) GetIdleTimeoutMinutes() int { + if c == nil || c.IdleTimeoutMinutes == nil { + return 0 + } + return *c.IdleTimeoutMinutes +} + +// GetIdleTimeoutNotice returns the IdleTimeoutNotice field if it's non-nil, zero value otherwise. +func (c *Codespace) GetIdleTimeoutNotice() string { + if c == nil || c.IdleTimeoutNotice == nil { + return "" + } + return *c.IdleTimeoutNotice +} + +// GetLastKnownStopNotice returns the LastKnownStopNotice field if it's non-nil, zero value otherwise. +func (c *Codespace) GetLastKnownStopNotice() string { + if c == nil || c.LastKnownStopNotice == nil { + return "" + } + return *c.LastKnownStopNotice +} + +// GetLastUsedAt returns the LastUsedAt field if it's non-nil, zero value otherwise. +func (c *Codespace) GetLastUsedAt() Timestamp { + if c == nil || c.LastUsedAt == nil { + return Timestamp{} + } + return *c.LastUsedAt +} + +// GetLocation returns the Location field if it's non-nil, zero value otherwise. +func (c *Codespace) GetLocation() string { + if c == nil || c.Location == nil { + return "" + } + return *c.Location +} + +// GetMachine returns the Machine field. +func (c *Codespace) GetMachine() *CodespacesMachine { + if c == nil { + return nil + } + return c.Machine +} + +// GetMachinesURL returns the MachinesURL field if it's non-nil, zero value otherwise. +func (c *Codespace) GetMachinesURL() string { + if c == nil || c.MachinesURL == nil { + return "" + } + return *c.MachinesURL +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (c *Codespace) GetName() string { + if c == nil || c.Name == nil { + return "" + } + return *c.Name +} + +// GetOwner returns the Owner field. +func (c *Codespace) GetOwner() *User { + if c == nil { + return nil + } + return c.Owner +} + +// GetPendingOperation returns the PendingOperation field if it's non-nil, zero value otherwise. +func (c *Codespace) GetPendingOperation() bool { + if c == nil || c.PendingOperation == nil { + return false + } + return *c.PendingOperation +} + +// GetPendingOperationDisabledReason returns the PendingOperationDisabledReason field if it's non-nil, zero value otherwise. +func (c *Codespace) GetPendingOperationDisabledReason() string { + if c == nil || c.PendingOperationDisabledReason == nil { + return "" + } + return *c.PendingOperationDisabledReason +} + +// GetPrebuild returns the Prebuild field if it's non-nil, zero value otherwise. +func (c *Codespace) GetPrebuild() bool { + if c == nil || c.Prebuild == nil { + return false + } + return *c.Prebuild +} + +// GetPullsURL returns the PullsURL field if it's non-nil, zero value otherwise. +func (c *Codespace) GetPullsURL() string { + if c == nil || c.PullsURL == nil { + return "" + } + return *c.PullsURL +} + +// GetRepository returns the Repository field. +func (c *Codespace) GetRepository() *Repository { + if c == nil { + return nil + } + return c.Repository +} + +// GetRetentionExpiresAt returns the RetentionExpiresAt field if it's non-nil, zero value otherwise. +func (c *Codespace) GetRetentionExpiresAt() Timestamp { + if c == nil || c.RetentionExpiresAt == nil { + return Timestamp{} + } + return *c.RetentionExpiresAt +} + +// GetRetentionPeriodMinutes returns the RetentionPeriodMinutes field if it's non-nil, zero value otherwise. +func (c *Codespace) GetRetentionPeriodMinutes() int { + if c == nil || c.RetentionPeriodMinutes == nil { + return 0 + } + return *c.RetentionPeriodMinutes +} + +// GetRuntimeConstraints returns the RuntimeConstraints field. +func (c *Codespace) GetRuntimeConstraints() *CodespacesRuntimeConstraints { + if c == nil { + return nil + } + return c.RuntimeConstraints +} + +// GetStartURL returns the StartURL field if it's non-nil, zero value otherwise. +func (c *Codespace) GetStartURL() string { + if c == nil || c.StartURL == nil { + return "" + } + return *c.StartURL +} + +// GetState returns the State field if it's non-nil, zero value otherwise. +func (c *Codespace) GetState() string { + if c == nil || c.State == nil { + return "" + } + return *c.State +} + +// GetStopURL returns the StopURL field if it's non-nil, zero value otherwise. +func (c *Codespace) GetStopURL() string { + if c == nil || c.StopURL == nil { + return "" + } + return *c.StopURL +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (c *Codespace) GetUpdatedAt() Timestamp { + if c == nil || c.UpdatedAt == nil { + return Timestamp{} + } + return *c.UpdatedAt +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (c *Codespace) GetURL() string { + if c == nil || c.URL == nil { + return "" + } + return *c.URL +} + +// GetWebURL returns the WebURL field if it's non-nil, zero value otherwise. +func (c *Codespace) GetWebURL() string { + if c == nil || c.WebURL == nil { + return "" + } + return *c.WebURL +} + +// GetAhead returns the Ahead field if it's non-nil, zero value otherwise. +func (c *CodespacesGitStatus) GetAhead() int { + if c == nil || c.Ahead == nil { + return 0 + } + return *c.Ahead +} + +// GetBehind returns the Behind field if it's non-nil, zero value otherwise. +func (c *CodespacesGitStatus) GetBehind() int { + if c == nil || c.Behind == nil { + return 0 + } + return *c.Behind +} + +// GetHasUncommittedChanges returns the HasUncommittedChanges field if it's non-nil, zero value otherwise. +func (c *CodespacesGitStatus) GetHasUncommittedChanges() bool { + if c == nil || c.HasUncommittedChanges == nil { + return false + } + return *c.HasUncommittedChanges +} + +// GetHasUnpushedChanges returns the HasUnpushedChanges field if it's non-nil, zero value otherwise. +func (c *CodespacesGitStatus) GetHasUnpushedChanges() bool { + if c == nil || c.HasUnpushedChanges == nil { + return false + } + return *c.HasUnpushedChanges +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (c *CodespacesGitStatus) GetRef() string { + if c == nil || c.Ref == nil { + return "" + } + return *c.Ref +} + +// GetCPUs returns the CPUs field if it's non-nil, zero value otherwise. +func (c *CodespacesMachine) GetCPUs() int { + if c == nil || c.CPUs == nil { + return 0 + } + return *c.CPUs +} + +// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. +func (c *CodespacesMachine) GetDisplayName() string { + if c == nil || c.DisplayName == nil { + return "" + } + return *c.DisplayName +} + +// GetMemoryInBytes returns the MemoryInBytes field if it's non-nil, zero value otherwise. +func (c *CodespacesMachine) GetMemoryInBytes() int64 { + if c == nil || c.MemoryInBytes == nil { + return 0 + } + return *c.MemoryInBytes +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (c *CodespacesMachine) GetName() string { + if c == nil || c.Name == nil { + return "" + } + return *c.Name +} + +// GetOperatingSystem returns the OperatingSystem field if it's non-nil, zero value otherwise. +func (c *CodespacesMachine) GetOperatingSystem() string { + if c == nil || c.OperatingSystem == nil { + return "" + } + return *c.OperatingSystem +} + +// GetPrebuildAvailability returns the PrebuildAvailability field if it's non-nil, zero value otherwise. +func (c *CodespacesMachine) GetPrebuildAvailability() string { + if c == nil || c.PrebuildAvailability == nil { + return "" + } + return *c.PrebuildAvailability +} + +// GetStorageInBytes returns the StorageInBytes field if it's non-nil, zero value otherwise. +func (c *CodespacesMachine) GetStorageInBytes() int64 { + if c == nil || c.StorageInBytes == nil { + return 0 + } + return *c.StorageInBytes +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (c *CollaboratorInvitation) GetCreatedAt() Timestamp { if c == nil || c.CreatedAt == nil { @@ -4006,6 +4358,86 @@ func (c *CreateCheckSuiteOptions) GetHeadBranch() string { return *c.HeadBranch } +// GetClientIP returns the ClientIP field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetClientIP() string { + if c == nil || c.ClientIP == nil { + return "" + } + return *c.ClientIP +} + +// GetDevcontainerPath returns the DevcontainerPath field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetDevcontainerPath() string { + if c == nil || c.DevcontainerPath == nil { + return "" + } + return *c.DevcontainerPath +} + +// GetDisplayName returns the DisplayName field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetDisplayName() string { + if c == nil || c.DisplayName == nil { + return "" + } + return *c.DisplayName +} + +// GetGeo returns the Geo field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetGeo() string { + if c == nil || c.Geo == nil { + return "" + } + return *c.Geo +} + +// GetIdleTimeoutMinutes returns the IdleTimeoutMinutes field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetIdleTimeoutMinutes() int { + if c == nil || c.IdleTimeoutMinutes == nil { + return 0 + } + return *c.IdleTimeoutMinutes +} + +// GetMachine returns the Machine field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetMachine() string { + if c == nil || c.Machine == nil { + return "" + } + return *c.Machine +} + +// GetMultiRepoPermissionsOptOut returns the MultiRepoPermissionsOptOut field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetMultiRepoPermissionsOptOut() bool { + if c == nil || c.MultiRepoPermissionsOptOut == nil { + return false + } + return *c.MultiRepoPermissionsOptOut +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetRef() string { + if c == nil || c.Ref == nil { + return "" + } + return *c.Ref +} + +// GetRetentionPeriodMinutes returns the RetentionPeriodMinutes field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetRetentionPeriodMinutes() int { + if c == nil || c.RetentionPeriodMinutes == nil { + return 0 + } + return *c.RetentionPeriodMinutes +} + +// GetWorkingDirectory returns the WorkingDirectory field if it's non-nil, zero value otherwise. +func (c *CreateCodespaceOptions) GetWorkingDirectory() string { + if c == nil || c.WorkingDirectory == nil { + return "" + } + return *c.WorkingDirectory +} + // GetDescription returns the Description field if it's non-nil, zero value otherwise. func (c *CreateEvent) GetDescription() string { if c == nil || c.Description == nil { @@ -4262,6 +4694,30 @@ func (c *CustomRepoRoles) GetName() string { return *c.Name } +// GetQuerySuite returns the QuerySuite field if it's non-nil, zero value otherwise. +func (d *DefaultSetupConfiguration) GetQuerySuite() string { + if d == nil || d.QuerySuite == nil { + return "" + } + return *d.QuerySuite +} + +// GetState returns the State field if it's non-nil, zero value otherwise. +func (d *DefaultSetupConfiguration) GetState() string { + if d == nil || d.State == nil { + return "" + } + return *d.State +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (d *DefaultSetupConfiguration) GetUpdatedAt() Timestamp { + if d == nil || d.UpdatedAt == nil { + return Timestamp{} + } + return *d.UpdatedAt +} + // GetInstallation returns the Installation field. func (d *DeleteEvent) GetInstallation() *Installation { if d == nil { @@ -6310,6 +6766,14 @@ func (f *ForkEvent) GetSender() *User { return f.Sender } +// GetWorkFolder returns the WorkFolder field if it's non-nil, zero value otherwise. +func (g *GenerateJITConfigRequest) GetWorkFolder() string { + if g == nil || g.WorkFolder == nil { + return "" + } + return *g.WorkFolder +} + // GetPreviousTagName returns the PreviousTagName field if it's non-nil, zero value otherwise. func (g *GenerateNotesOptions) GetPreviousTagName() string { if g == nil || g.PreviousTagName == nil { @@ -8934,6 +9398,14 @@ func (i *IssueStats) GetTotalIssues() int { return *i.TotalIssues } +// GetEncodedJITConfig returns the EncodedJITConfig field if it's non-nil, zero value otherwise. +func (j *JITRunnerConfig) GetEncodedJITConfig() string { + if j == nil || j.EncodedJITConfig == nil { + return "" + } + return *j.EncodedJITConfig +} + // GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. func (j *Jobs) GetTotalCount() int { if j == nil || j.TotalCount == nil { @@ -9454,6 +9926,14 @@ func (l *ListCheckSuiteResults) GetTotal() int { return *l.Total } +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (l *ListCodespaces) GetTotalCount() int { + if l == nil || l.TotalCount == nil { + return 0 + } + return *l.TotalCount +} + // GetAffiliation returns the Affiliation field if it's non-nil, zero value otherwise. func (l *ListCollaboratorOptions) GetAffiliation() string { if l == nil || l.Affiliation == nil { @@ -17918,6 +18398,14 @@ func (r *RepositoryRelease) GetZipballURL() string { return *r.ZipballURL } +// GetParameters returns the Parameters field if it's non-nil, zero value otherwise. +func (r *RepositoryRule) GetParameters() json.RawMessage { + if r == nil || r.Parameters == nil { + return json.RawMessage{} + } + return *r.Parameters +} + // GetCommit returns the Commit field. func (r *RepositoryTag) GetCommit() *Commit { if r == nil { @@ -18366,6 +18854,118 @@ func (r *Rule) GetSeverity() string { return *r.Severity } +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (r *RulePatternParameters) GetName() string { + if r == nil || r.Name == nil { + return "" + } + return *r.Name +} + +// GetNegate returns the Negate field if it's non-nil, zero value otherwise. +func (r *RulePatternParameters) GetNegate() bool { + if r == nil || r.Negate == nil { + return false + } + return *r.Negate +} + +// GetIntegrationID returns the IntegrationID field if it's non-nil, zero value otherwise. +func (r *RuleRequiredStatusChecks) GetIntegrationID() int64 { + if r == nil || r.IntegrationID == nil { + return 0 + } + return *r.IntegrationID +} + +// GetBypassMode returns the BypassMode field if it's non-nil, zero value otherwise. +func (r *Ruleset) GetBypassMode() string { + if r == nil || r.BypassMode == nil { + return "" + } + return *r.BypassMode +} + +// GetConditions returns the Conditions field. +func (r *Ruleset) GetConditions() *RulesetConditions { + if r == nil { + return nil + } + return r.Conditions +} + +// GetLinks returns the Links field. +func (r *Ruleset) GetLinks() *RulesetLinks { + if r == nil { + return nil + } + return r.Links +} + +// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. +func (r *Ruleset) GetNodeID() string { + if r == nil || r.NodeID == nil { + return "" + } + return *r.NodeID +} + +// GetSourceType returns the SourceType field if it's non-nil, zero value otherwise. +func (r *Ruleset) GetSourceType() string { + if r == nil || r.SourceType == nil { + return "" + } + return *r.SourceType +} + +// GetTarget returns the Target field if it's non-nil, zero value otherwise. +func (r *Ruleset) GetTarget() string { + if r == nil || r.Target == nil { + return "" + } + return *r.Target +} + +// GetRefName returns the RefName field. +func (r *RulesetConditions) GetRefName() *RulesetRefConditionParameters { + if r == nil { + return nil + } + return r.RefName +} + +// GetRepositoryName returns the RepositoryName field. +func (r *RulesetConditions) GetRepositoryName() *RulesetRepositoryConditionParameters { + if r == nil { + return nil + } + return r.RepositoryName +} + +// GetHRef returns the HRef field if it's non-nil, zero value otherwise. +func (r *RulesetLink) GetHRef() string { + if r == nil || r.HRef == nil { + return "" + } + return *r.HRef +} + +// GetSelf returns the Self field. +func (r *RulesetLinks) GetSelf() *RulesetLink { + if r == nil { + return nil + } + return r.Self +} + +// GetProtected returns the Protected field if it's non-nil, zero value otherwise. +func (r *RulesetRepositoryConditionParameters) GetProtected() bool { + if r == nil || r.Protected == nil { + return false + } + return *r.Protected +} + // GetBusy returns the Busy field if it's non-nil, zero value otherwise. func (r *Runner) GetBusy() bool { if r == nil || r.Busy == nil { @@ -20998,6 +21598,30 @@ func (u *UpdateCheckRunOptions) GetStatus() string { return *u.Status } +// GetQuerySuite returns the QuerySuite field if it's non-nil, zero value otherwise. +func (u *UpdateDefaultSetupConfigurationOptions) GetQuerySuite() string { + if u == nil || u.QuerySuite == nil { + return "" + } + return *u.QuerySuite +} + +// GetRunID returns the RunID field if it's non-nil, zero value otherwise. +func (u *UpdateDefaultSetupConfigurationResponse) GetRunID() int64 { + if u == nil || u.RunID == nil { + return 0 + } + return *u.RunID +} + +// GetRunURL returns the RunURL field if it's non-nil, zero value otherwise. +func (u *UpdateDefaultSetupConfigurationResponse) GetRunURL() string { + if u == nil || u.RunURL == nil { + return "" + } + return *u.RunURL +} + // GetAllowsPublicRepositories returns the AllowsPublicRepositories field if it's non-nil, zero value otherwise. func (u *UpdateRunnerGroupRequest) GetAllowsPublicRepositories() bool { if u == nil || u.AllowsPublicRepositories == nil { diff --git a/vendor/github.com/google/go-github/v53/github/github.go b/vendor/github.com/google/go-github/v53/github/github.go index 7d8aef53022..34a27282f0f 100644 --- a/vendor/github.com/google/go-github/v53/github/github.go +++ b/vendor/github.com/google/go-github/v53/github/github.go @@ -28,7 +28,7 @@ import ( ) const ( - Version = "v53.0.0" + Version = "v53.2.0" defaultAPIVersion = "2022-11-28" defaultBaseURL = "https://api.github.com/" @@ -187,6 +187,7 @@ type Client struct { Billing *BillingService Checks *ChecksService CodeScanning *CodeScanningService + Codespaces *CodespacesService Dependabot *DependabotService Enterprise *EnterpriseService Gists *GistsService @@ -325,6 +326,7 @@ func NewClient(httpClient *http.Client) *Client { c.Billing = (*BillingService)(&c.common) c.Checks = (*ChecksService)(&c.common) c.CodeScanning = (*CodeScanningService)(&c.common) + c.Codespaces = (*CodespacesService)(&c.common) c.Dependabot = (*DependabotService)(&c.common) c.Enterprise = (*EnterpriseService)(&c.common) c.Gists = (*GistsService)(&c.common) diff --git a/vendor/github.com/google/go-github/v53/github/messages.go b/vendor/github.com/google/go-github/v53/github/messages.go index 8547b8810f8..bb5ae3f3895 100644 --- a/vendor/github.com/google/go-github/v53/github/messages.go +++ b/vendor/github.com/google/go-github/v53/github/messages.go @@ -93,6 +93,7 @@ var ( "repository_vulnerability_alert": "RepositoryVulnerabilityAlertEvent", "release": "ReleaseEvent", "secret_scanning_alert": "SecretScanningAlertEvent", + "security_advisory": "SecurityAdvisoryEvent", "star": "StarEvent", "status": "StatusEvent", "team": "TeamEvent", diff --git a/vendor/github.com/google/go-github/v53/github/orgs_rules.go b/vendor/github.com/google/go-github/v53/github/orgs_rules.go new file mode 100644 index 00000000000..a3905af8fb3 --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/orgs_rules.go @@ -0,0 +1,105 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// GetAllOrganizationRulesets gets all the rulesets for the specified organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-all-organization-repository-rulesets +func (s *OrganizationsService) GetAllOrganizationRulesets(ctx context.Context, org string) ([]*Ruleset, *Response, error) { + u := fmt.Sprintf("orgs/%v/rulesets", org) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var rulesets []*Ruleset + resp, err := s.client.Do(ctx, req, &rulesets) + if err != nil { + return nil, resp, err + } + + return rulesets, resp, nil +} + +// CreateOrganizationRuleset creates a ruleset for the specified organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#create-an-organization-repository-ruleset +func (s *OrganizationsService) CreateOrganizationRuleset(ctx context.Context, org string, rs *Ruleset) (*Ruleset, *Response, error) { + u := fmt.Sprintf("orgs/%v/rulesets", org) + + req, err := s.client.NewRequest("POST", u, rs) + if err != nil { + return nil, nil, err + } + + var ruleset *Ruleset + resp, err := s.client.Do(ctx, req, &ruleset) + if err != nil { + return nil, resp, err + } + + return ruleset, resp, nil +} + +// GetOrganizationRuleset gets a ruleset from the specified organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#get-an-organization-repository-ruleset +func (s *OrganizationsService) GetOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Ruleset, *Response, error) { + u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var ruleset *Ruleset + resp, err := s.client.Do(ctx, req, &ruleset) + if err != nil { + return nil, resp, err + } + + return ruleset, resp, nil +} + +// UpdateOrganizationRuleset updates a ruleset from the specified organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#update-an-organization-repository-ruleset +func (s *OrganizationsService) UpdateOrganizationRuleset(ctx context.Context, org string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) { + u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID) + + req, err := s.client.NewRequest("PUT", u, rs) + if err != nil { + return nil, nil, err + } + + var ruleset *Ruleset + resp, err := s.client.Do(ctx, req, &ruleset) + if err != nil { + return nil, resp, err + } + + return ruleset, resp, nil +} + +// DeleteOrganizationRuleset deletes a ruleset from the specified organization. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/rules#delete-an-organization-repository-ruleset +func (s *OrganizationsService) DeleteOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Response, error) { + u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/google/go-github/v53/github/repos_contents.go b/vendor/github.com/google/go-github/v53/github/repos_contents.go index be58fd52f66..874a3277283 100644 --- a/vendor/github.com/google/go-github/v53/github/repos_contents.go +++ b/vendor/github.com/google/go-github/v53/github/repos_contents.go @@ -192,8 +192,15 @@ func (s *RepositoriesService) DownloadContentsWithMeta(ctx context.Context, owne // as possible, both result types will be returned but only one will contain a // value and the other will be nil. // +// Due to an auth vulnerability issue in the GitHub v3 API, ".." is not allowed +// to appear anywhere in the "path" or this method will return an error. +// // GitHub API docs: https://docs.github.com/en/rest/repos/contents#get-repository-content func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path string, opts *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) { + if strings.Contains(path, "..") { + return nil, nil, nil, errors.New("path must not contain '..' due to auth vulnerability issue") + } + escapedPath := (&url.URL{Path: strings.TrimSuffix(path, "/")}).String() u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath) u, err = addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v53/github/repos_rules.go b/vendor/github.com/google/go-github/v53/github/repos_rules.go new file mode 100644 index 00000000000..9299d3e7f3d --- /dev/null +++ b/vendor/github.com/google/go-github/v53/github/repos_rules.go @@ -0,0 +1,447 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "encoding/json" + "fmt" +) + +// BypassActor represents the bypass actors from a ruleset. +type BypassActor struct { + ActorID *int64 `json:"actor_id,omitempty"` + // Possible values for ActorType are: Team, Integration + ActorType *string `json:"actor_type,omitempty"` +} + +// RulesetLink represents a single link object from GitHub ruleset request _links. +type RulesetLink struct { + HRef *string `json:"href,omitempty"` +} + +// RulesetLinks represents the "_links" object in a Ruleset. +type RulesetLinks struct { + Self *RulesetLink `json:"self,omitempty"` +} + +// RulesetRefConditionParameters represents the conditions object for ref_names. +type RulesetRefConditionParameters struct { + Include []string `json:"include"` + Exclude []string `json:"exclude"` +} + +// RulesetRepositoryConditionParameters represents the conditions object for repository_names. +type RulesetRepositoryConditionParameters struct { + Include []string `json:"include,omitempty"` + Exclude []string `json:"exclude,omitempty"` + Protected *bool `json:"protected,omitempty"` +} + +// RulesetCondition represents the conditions object in a ruleset. +type RulesetConditions struct { + RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"` + RepositoryName *RulesetRepositoryConditionParameters `json:"repository_name,omitempty"` +} + +// RulePatternParameters represents the rule pattern parameters. +type RulePatternParameters struct { + Name *string `json:"name,omitempty"` + // If Negate is true, the rule will fail if the pattern matches. + Negate *bool `json:"negate,omitempty"` + // Possible values for Operator are: starts_with, ends_with, contains, regex + Operator string `json:"operator"` + Pattern string `json:"pattern"` +} + +// UpdateAllowsFetchAndMergeRuleParameters represents the update rule parameters. +type UpdateAllowsFetchAndMergeRuleParameters struct { + UpdateAllowsFetchAndMerge bool `json:"update_allows_fetch_and_merge"` +} + +// RequiredDeploymentEnvironmentsRuleParameters represents the required_deployments rule parameters. +type RequiredDeploymentEnvironmentsRuleParameters struct { + RequiredDeploymentEnvironments []string `json:"required_deployment_environments"` +} + +// PullRequestRuleParameters represents the pull_request rule parameters. +type PullRequestRuleParameters struct { + DismissStaleReviewsOnPush bool `json:"dismiss_stale_reviews_on_push"` + RequireCodeOwnerReview bool `json:"require_code_owner_review"` + RequireLastPushApproval bool `json:"require_last_push_approval"` + RequiredApprovingReviewCount int `json:"required_approving_review_count"` + RequiredReviewThreadResolution bool `json:"required_review_thread_resolution"` +} + +// RuleRequiredStatusChecks represents the RequiredStatusChecks for the RequiredStatusChecksRuleParameters object. +type RuleRequiredStatusChecks struct { + Context string `json:"context"` + IntegrationID *int64 `json:"integration_id,omitempty"` +} + +// RequiredStatusChecksRuleParameters represents the required_status_checks rule parameters. +type RequiredStatusChecksRuleParameters struct { + RequiredStatusChecks []RuleRequiredStatusChecks `json:"required_status_checks"` + StrictRequiredStatusChecksPolicy bool `json:"strict_required_status_checks_policy"` +} + +// RepositoryRule represents a GitHub Rule. +type RepositoryRule struct { + Type string `json:"type"` + Parameters *json.RawMessage `json:"parameters,omitempty"` +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// This helps us handle the fact that RepositoryRule parameter field can be of numerous types. +func (r *RepositoryRule) UnmarshalJSON(data []byte) error { + type rule RepositoryRule + var RepositoryRule rule + if err := json.Unmarshal(data, &RepositoryRule); err != nil { + return err + } + + r.Type = RepositoryRule.Type + + switch RepositoryRule.Type { + case "creation", "deletion", "required_linear_history", "required_signatures", "non_fast_forward": + r.Parameters = nil + case "update": + params := UpdateAllowsFetchAndMergeRuleParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + case "required_deployments": + params := RequiredDeploymentEnvironmentsRuleParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + case "commit_message_pattern", "commit_author_email_pattern", "committer_email_pattern", "branch_name_pattern", "tag_name_pattern": + params := RulePatternParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + case "pull_request": + params := PullRequestRuleParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + case "required_status_checks": + params := RequiredStatusChecksRuleParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + default: + r.Type = "" + r.Parameters = nil + return fmt.Errorf("RepositoryRule.Type %T is not yet implemented, unable to unmarshal", RepositoryRule.Type) + } + + return nil +} + +// NewCreationRule creates a rule to only allow users with bypass permission to create matching refs. +func NewCreationRule() (rule *RepositoryRule) { + return &RepositoryRule{ + Type: "creation", + } +} + +// NewUpdateRule creates a rule to only allow users with bypass permission to update matching refs. +func NewUpdateRule(params *UpdateAllowsFetchAndMergeRuleParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "update", + Parameters: &rawParams, + } +} + +// NewDeletionRule creates a rule to only allow users with bypass permissions to delete matching refs. +func NewDeletionRule() (rule *RepositoryRule) { + return &RepositoryRule{ + Type: "deletion", + } +} + +// NewRequiredLinearHistoryRule creates a rule to prevent merge commits from being pushed to matching branches. +func NewRequiredLinearHistoryRule() (rule *RepositoryRule) { + return &RepositoryRule{ + Type: "required_linear_history", + } +} + +// NewRequiredDeploymentsRule creates a rule to require environments to be successfully deployed before they can be merged into the matching branches. +func NewRequiredDeploymentsRule(params *RequiredDeploymentEnvironmentsRuleParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "required_deployments", + Parameters: &rawParams, + } +} + +// NewRequiredSignaturesRule creates a rule a to require commits pushed to matching branches to have verified signatures. +func NewRequiredSignaturesRule() (rule *RepositoryRule) { + return &RepositoryRule{ + Type: "required_signatures", + } +} + +// NewPullRequestRule creates a rule to require all commits be made to a non-target branch and submitted via a pull request before they can be merged. +func NewPullRequestRule(params *PullRequestRuleParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "pull_request", + Parameters: &rawParams, + } +} + +// NewRequiredStatusChecksRule creates a rule to require which status checks must pass before branches can be merged into a branch rule. +func NewRequiredStatusChecksRule(params *RequiredStatusChecksRuleParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "required_status_checks", + Parameters: &rawParams, + } +} + +// NewNonFastForwardRule creates a rule as part to prevent users with push access from force pushing to matching branches. +func NewNonFastForwardRule() (rule *RepositoryRule) { + return &RepositoryRule{ + Type: "non_fast_forward", + } +} + +// NewCommitMessagePatternRule creates a rule to restrict commit message patterns being pushed to matching branches. +func NewCommitMessagePatternRule(params *RulePatternParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "commit_message_pattern", + Parameters: &rawParams, + } +} + +// NewCommitAuthorEmailPatternRule creates a rule to restrict commits with author email patterns being merged into matching branches. +func NewCommitAuthorEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "commit_author_email_pattern", + Parameters: &rawParams, + } +} + +// NewCommitterEmailPatternRule creates a rule to restrict commits with committer email patterns being merged into matching branches. +func NewCommitterEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "committer_email_pattern", + Parameters: &rawParams, + } +} + +// NewBranchNamePatternRule creates a rule to restrict branch patterns from being merged into matching branches. +func NewBranchNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "branch_name_pattern", + Parameters: &rawParams, + } +} + +// NewTagNamePatternRule creates a rule to restrict tag patterns contained in non-target branches from being merged into matching branches. +func NewTagNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "tag_name_pattern", + Parameters: &rawParams, + } +} + +// Ruleset represents a GitHub ruleset object. +type Ruleset struct { + ID int64 `json:"id"` + Name string `json:"name"` + // Possible values for Target are branch, tag + Target *string `json:"target,omitempty"` + // Possible values for SourceType are: Repository, Organization + SourceType *string `json:"source_type,omitempty"` + Source string `json:"source"` + // Possible values for Enforcement are: disabled, active, evaluate + Enforcement string `json:"enforcement"` + // Possible values for BypassMode are: none, repository, organization + BypassMode *string `json:"bypass_mode,omitempty"` + BypassActors []*BypassActor `json:"bypass_actors,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Links *RulesetLinks `json:"_links,omitempty"` + Conditions *RulesetConditions `json:"conditions,omitempty"` + Rules []*RepositoryRule `json:"rules,omitempty"` +} + +// GetRulesForBranch gets all the rules that apply to the specified branch. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-rules-for-a-branch +func (s *RepositoriesService) GetRulesForBranch(ctx context.Context, owner, repo, branch string) ([]*RepositoryRule, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/rules/branches/%v", owner, repo, branch) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var rules []*RepositoryRule + resp, err := s.client.Do(ctx, req, &rules) + if err != nil { + return nil, resp, err + } + + return rules, resp, nil +} + +// GetAllRulesets gets all the rules that apply to the specified repository. +// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-all-repository-rulesets +func (s *RepositoriesService) GetAllRulesets(ctx context.Context, owner, repo string, includesParents bool) ([]*Ruleset, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/rulesets?includes_parents=%v", owner, repo, includesParents) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var ruleset []*Ruleset + resp, err := s.client.Do(ctx, req, &ruleset) + if err != nil { + return nil, resp, err + } + + return ruleset, resp, nil +} + +// CreateRuleset creates a ruleset for the specified repository. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/rules#create-a-repository-ruleset +func (s *RepositoriesService) CreateRuleset(ctx context.Context, owner, repo string, rs *Ruleset) (*Ruleset, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/rulesets", owner, repo) + + req, err := s.client.NewRequest("POST", u, rs) + if err != nil { + return nil, nil, err + } + + var ruleset *Ruleset + resp, err := s.client.Do(ctx, req, &ruleset) + if err != nil { + return nil, resp, err + } + + return ruleset, resp, nil +} + +// GetRuleset gets a ruleset for the specified repository. +// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/rules#get-a-repository-ruleset +func (s *RepositoriesService) GetRuleset(ctx context.Context, owner, repo string, rulesetID int64, includesParents bool) (*Ruleset, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/rulesets/%v?includes_parents=%v", owner, repo, rulesetID, includesParents) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var ruleset *Ruleset + resp, err := s.client.Do(ctx, req, &ruleset) + if err != nil { + return nil, resp, err + } + + return ruleset, resp, nil +} + +// UpdateRuleset updates a ruleset for the specified repository. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/rules#update-a-repository-ruleset +func (s *RepositoriesService) UpdateRuleset(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID) + + req, err := s.client.NewRequest("PUT", u, rs) + if err != nil { + return nil, nil, err + } + + var ruleset *Ruleset + resp, err := s.client.Do(ctx, req, &ruleset) + if err != nil { + return nil, resp, err + } + + return ruleset, resp, nil +} + +// DeleteRuleset deletes a ruleset for the specified repository. +// +// GitHub API docs: https://docs.github.com/en/rest/repos/rules#delete-a-repository-ruleset +func (s *RepositoriesService) DeleteRuleset(ctx context.Context, owner, repo string, rulesetID int64) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md index 00b4c9621b0..c8db11f147c 100644 --- a/vendor/github.com/tidwall/gjson/README.md +++ b/vendor/github.com/tidwall/gjson/README.md @@ -176,7 +176,7 @@ The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, ```go result.Int() int64 // -9223372036854775808 to 9223372036854775807 -result.Uint() int64 // 0 to 18446744073709551615 +result.Uint() uint64 // 0 to 18446744073709551615 ``` ## Modifiers and path chaining diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go index 330218d4ae4..53cbd2363fe 100644 --- a/vendor/github.com/tidwall/gjson/gjson.go +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -1009,8 +1009,8 @@ func parseObjectPath(path string) (r objectPathResult) { r.piped = true } else { r.path = path[i+1:] + r.more = true } - r.more = true return } else if path[i] == '|' { r.part = string(epart) diff --git a/vendor/modules.txt b/vendor/modules.txt index b184ab403e4..8d25722e995 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -107,7 +107,7 @@ github.com/apparentlymart/go-textseg/textseg # github.com/apparentlymart/go-textseg/v13 v13.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v13/textseg -# github.com/argoproj/argo-cd/v2 v2.8.19 +# github.com/argoproj/argo-cd/v2 v2.9.21 ## explicit; go 1.19 github.com/argoproj/argo-cd/v2/common github.com/argoproj/argo-cd/v2/pkg/apiclient/account @@ -160,10 +160,11 @@ github.com/argoproj/argo-workflows/v3/util/slice github.com/argoproj/argo-workflows/v3/util/wait github.com/argoproj/argo-workflows/v3/workflow/common github.com/argoproj/argo-workflows/v3/workflow/util -# github.com/argoproj/gitops-engine v0.7.1-0.20231013183858-f15cf615b814 -## explicit; go 1.19 +# github.com/argoproj/gitops-engine v0.7.1-0.20240715141028-c68bce0f979c +## explicit; go 1.17 github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/api/v1/endpoints github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/util/hash +github.com/argoproj/gitops-engine/pkg/cache github.com/argoproj/gitops-engine/pkg/diff github.com/argoproj/gitops-engine/pkg/diff/internal/fieldmanager github.com/argoproj/gitops-engine/pkg/health @@ -182,10 +183,11 @@ github.com/argoproj/pkg/grpc/http github.com/argoproj/pkg/rand github.com/argoproj/pkg/sync github.com/argoproj/pkg/time -# github.com/aws/aws-sdk-go v1.44.290 +# github.com/aws/aws-sdk-go v1.44.317 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn +github.com/aws/aws-sdk-go/aws/auth/bearer github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client @@ -235,6 +237,7 @@ github.com/aws/aws-sdk-go/service/s3/s3iface github.com/aws/aws-sdk-go/service/s3/s3manager github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface +github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/aws/aws-sdk-go-v2/service/ecr v1.20.0 @@ -256,7 +259,7 @@ github.com/bmatcuk/doublestar/v4 # github.com/bombsimon/logrusr/v2 v2.0.1 ## explicit; go 1.13 github.com/bombsimon/logrusr/v2 -# github.com/bradleyfalzon/ghinstallation/v2 v2.5.0 +# github.com/bradleyfalzon/ghinstallation/v2 v2.6.0 ## explicit; go 1.13 github.com/bradleyfalzon/ghinstallation/v2 # github.com/caarlos0/env v3.5.0+incompatible @@ -348,7 +351,7 @@ github.com/devtron-labs/authenticator/jwt github.com/devtron-labs/authenticator/middleware github.com/devtron-labs/authenticator/oidc github.com/devtron-labs/authenticator/password -# github.com/devtron-labs/common-lib v0.0.25-0.20240812113340-f14be466613d +# github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63 ## explicit; go 1.21 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/blob-storage @@ -382,6 +385,9 @@ github.com/devtron-labs/protos/gitSensor # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f ## explicit github.com/dgryski/go-rendezvous +# github.com/docker/cli v24.0.6+incompatible +## explicit +github.com/docker/cli/cli/config/types # github.com/docker/distribution v2.8.2+incompatible ## explicit github.com/docker/distribution/digestset @@ -618,7 +624,7 @@ github.com/google/go-cmp/cmp/internal/value # github.com/google/go-github v17.0.0+incompatible ## explicit github.com/google/go-github/github -# github.com/google/go-github/v53 v53.0.0 +# github.com/google/go-github/v53 v53.2.0 ## explicit; go 1.17 github.com/google/go-github/v53/github # github.com/google/go-querystring v1.1.0 @@ -964,7 +970,7 @@ github.com/syndtr/goleveldb/leveldb/opt github.com/syndtr/goleveldb/leveldb/storage github.com/syndtr/goleveldb/leveldb/table github.com/syndtr/goleveldb/leveldb/util -# github.com/tidwall/gjson v1.14.3 +# github.com/tidwall/gjson v1.14.4 ## explicit; go 1.12 github.com/tidwall/gjson # github.com/tidwall/match v1.1.1 @@ -2046,7 +2052,7 @@ k8s.io/utils/trace # mellium.im/sasl v0.3.1 ## explicit; go 1.18 mellium.im/sasl -# oras.land/oras-go/v2 v2.2.0 +# oras.land/oras-go/v2 v2.3.0 ## explicit; go 1.19 oras.land/oras-go/v2/content oras.land/oras-go/v2/errdef diff --git a/vendor/oras.land/oras-go/v2/content/graph.go b/vendor/oras.land/oras-go/v2/content/graph.go index fa2f9efe785..9ae837285e2 100644 --- a/vendor/oras.land/oras-go/v2/content/graph.go +++ b/vendor/oras.land/oras-go/v2/content/graph.go @@ -75,18 +75,33 @@ func Successors(ctx context.Context, fetcher Fetcher, node ocispec.Descriptor) ( } nodes = append(nodes, manifest.Config) return append(nodes, manifest.Layers...), nil - case docker.MediaTypeManifestList, ocispec.MediaTypeImageIndex: + case docker.MediaTypeManifestList: content, err := FetchAll(ctx, fetcher, node) if err != nil { return nil, err } - // docker manifest list and oci index are equivalent for successors. + // OCI manifest index schema can be used to marshal docker manifest list var index ocispec.Index if err := json.Unmarshal(content, &index); err != nil { return nil, err } return index.Manifests, nil + case ocispec.MediaTypeImageIndex: + content, err := FetchAll(ctx, fetcher, node) + if err != nil { + return nil, err + } + + var index ocispec.Index + if err := json.Unmarshal(content, &index); err != nil { + return nil, err + } + var nodes []ocispec.Descriptor + if index.Subject != nil { + nodes = append(nodes, *index.Subject) + } + return append(nodes, index.Manifests...), nil case spec.MediaTypeArtifactManifest: content, err := FetchAll(ctx, fetcher, node) if err != nil { diff --git a/vendor/oras.land/oras-go/v2/content/reader.go b/vendor/oras.land/oras-go/v2/content/reader.go index 11d27b236eb..e575378e313 100644 --- a/vendor/oras.land/oras-go/v2/content/reader.go +++ b/vendor/oras.land/oras-go/v2/content/reader.go @@ -70,7 +70,7 @@ func (vr *VerifyReader) Read(p []byte) (n int, err error) { return } -// Verify verifies the read content against the size and the digest. +// Verify checks for remaining unread content and verifies the read content against the digest func (vr *VerifyReader) Verify() error { if vr.verified { return nil @@ -120,7 +120,10 @@ func ReadAll(r io.Reader, desc ocispec.Descriptor) ([]byte, error) { buf := make([]byte, desc.Size) vr := NewVerifyReader(r, desc) - if _, err := io.ReadFull(vr, buf); err != nil { + if n, err := io.ReadFull(vr, buf); err != nil { + if errors.Is(err, io.ErrUnexpectedEOF) { + return nil, fmt.Errorf("read failed: expected content size of %d, got %d, for digest %s: %w", desc.Size, n, desc.Digest.String(), err) + } return nil, fmt.Errorf("read failed: %w", err) } if err := vr.Verify(); err != nil { diff --git a/vendor/oras.land/oras-go/v2/content/storage.go b/vendor/oras.land/oras-go/v2/content/storage.go index 971142cbf9d..47c95d87693 100644 --- a/vendor/oras.land/oras-go/v2/content/storage.go +++ b/vendor/oras.land/oras-go/v2/content/storage.go @@ -31,7 +31,7 @@ type Fetcher interface { // Pusher pushes content. type Pusher interface { // Push pushes the content, matching the expected descriptor. - // Reader is perferred to Writer so that the suitable buffer size can be + // Reader is preferred to Writer so that the suitable buffer size can be // chosen by the underlying implementation. Furthermore, the implementation // can also do reflection on the Reader for more advanced I/O optimization. Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error diff --git a/vendor/oras.land/oras-go/v2/errdef/errors.go b/vendor/oras.land/oras-go/v2/errdef/errors.go index 030360edb71..7adb44b173f 100644 --- a/vendor/oras.land/oras-go/v2/errdef/errors.go +++ b/vendor/oras.land/oras-go/v2/errdef/errors.go @@ -22,6 +22,7 @@ var ( ErrAlreadyExists = errors.New("already exists") ErrInvalidDigest = errors.New("invalid digest") ErrInvalidReference = errors.New("invalid reference") + ErrInvalidMediaType = errors.New("invalid media type") ErrMissingReference = errors.New("missing reference") ErrNotFound = errors.New("not found") ErrSizeExceedsLimit = errors.New("size exceeds limit") diff --git a/vendor/oras.land/oras-go/v2/internal/spec/artifact.go b/vendor/oras.land/oras-go/v2/internal/spec/artifact.go index 8aa8e79ecca..7f801fd9caf 100644 --- a/vendor/oras.land/oras-go/v2/internal/spec/artifact.go +++ b/vendor/oras.land/oras-go/v2/internal/spec/artifact.go @@ -17,8 +17,16 @@ package spec import ocispec "github.com/opencontainers/image-spec/specs-go/v1" -// AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. -const AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" +const ( + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" +) // MediaTypeArtifactManifest specifies the media type for a content descriptor. const MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" diff --git a/vendor/oras.land/oras-go/v2/registry/reference.go b/vendor/oras.land/oras-go/v2/registry/reference.go index cea579a141e..7661a162bd0 100644 --- a/vendor/oras.land/oras-go/v2/registry/reference.go +++ b/vendor/oras.land/oras-go/v2/registry/reference.go @@ -31,14 +31,16 @@ var ( // repository name set under OCI distribution spec is a subset of the docker // spec. For maximum compatability, the docker spec is verified client-side. // Further checks are left to the server-side. + // // References: - // - https://github.com/distribution/distribution/blob/v2.7.1/reference/regexp.go#L53 - // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pulling-manifests + // - https://github.com/distribution/distribution/blob/v2.7.1/reference/regexp.go#L53 + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pulling-manifests repositoryRegexp = regexp.MustCompile(`^[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*(?:/[a-z0-9]+(?:(?:[._]|__|[-]*)[a-z0-9]+)*)*$`) // tagRegexp checks the tag name. // The docker and OCI spec have the same regular expression. - // Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pulling-manifests + // + // Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pulling-manifests tagRegexp = regexp.MustCompile(`^[\w][\w.-]{0,127}$`) ) diff --git a/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go b/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go index cf0018a018d..fb192aa8a3f 100644 --- a/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go +++ b/vendor/oras.land/oras-go/v2/registry/remote/errcode/errors.go @@ -24,7 +24,7 @@ import ( ) // References: -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#error-codes +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#error-codes // - https://docs.docker.com/registry/spec/api/#errors-2 const ( ErrorCodeBlobUnknown = "BLOB_UNKNOWN" @@ -45,7 +45,7 @@ const ( // Error represents a response inner error returned by the remote // registry. // References: -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#error-codes +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#error-codes // - https://docs.docker.com/registry/spec/api/#errors-2 type Error struct { Code string `json:"code"` @@ -73,7 +73,7 @@ func (e Error) Error() string { // Errors represents a list of response inner errors returned by the remote // server. // References: -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#error-codes +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#error-codes // - https://docs.docker.com/registry/spec/api/#errors-2 type Errors []Error diff --git a/vendor/oras.land/oras-go/v2/registry/remote/referrers.go b/vendor/oras.land/oras-go/v2/registry/remote/referrers.go index a3ed08ca5b8..191db9d179d 100644 --- a/vendor/oras.land/oras-go/v2/registry/remote/referrers.go +++ b/vendor/oras.land/oras-go/v2/registry/remote/referrers.go @@ -22,7 +22,6 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "oras.land/oras-go/v2/content" "oras.land/oras-go/v2/internal/descriptor" - "oras.land/oras-go/v2/internal/spec" ) // zeroDigest represents a digest that consists of zeros. zeroDigest is used @@ -103,17 +102,15 @@ func (e *ReferrersError) IsReferrersIndexDelete() bool { // buildReferrersTag builds the referrers tag for the given manifest descriptor. // Format: - -// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#unavailable-referrers-api +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#unavailable-referrers-api func buildReferrersTag(desc ocispec.Descriptor) string { alg := desc.Digest.Algorithm().String() encoded := desc.Digest.Encoded() return alg + "-" + encoded } -// isReferrersFilterApplied checks annotations to see if requested is in the -// applied filter list. -func isReferrersFilterApplied(annotations map[string]string, requested string) bool { - applied := annotations[spec.AnnotationReferrersFiltersApplied] +// isReferrersFilterApplied checks if requsted is in the applied filter list. +func isReferrersFilterApplied(applied, requested string) bool { if applied == "" || requested == "" { return false } diff --git a/vendor/oras.land/oras-go/v2/registry/remote/registry.go b/vendor/oras.land/oras-go/v2/registry/remote/registry.go index c8c414f17f2..8ae538d964f 100644 --- a/vendor/oras.land/oras-go/v2/registry/remote/registry.go +++ b/vendor/oras.land/oras-go/v2/registry/remote/registry.go @@ -73,13 +73,28 @@ func (r *Registry) client() Client { return r.Client } +// do sends an HTTP request and returns an HTTP response using the HTTP client +// returned by r.client(). +func (r *Registry) do(req *http.Request) (*http.Response, error) { + if r.HandleWarning == nil { + return r.client().Do(req) + } + + resp, err := r.client().Do(req) + if err != nil { + return nil, err + } + handleWarningHeaders(resp.Header.Values(headerWarning), r.HandleWarning) + return resp, nil +} + // Ping checks whether or not the registry implement Docker Registry API V2 or // OCI Distribution Specification. // Ping can be used to check authentication when an auth client is configured. // // References: // - https://docs.docker.com/registry/spec/api/#base -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#api +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#api func (r *Registry) Ping(ctx context.Context) error { url := buildRegistryBaseURL(r.PlainHTTP, r.Reference) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) @@ -87,7 +102,7 @@ func (r *Registry) Ping(ctx context.Context) error { return err } - resp, err := r.client().Do(req) + resp, err := r.do(req) if err != nil { return err } @@ -142,7 +157,7 @@ func (r *Registry) repositories(ctx context.Context, last string, fn func(repos } req.URL.RawQuery = q.Encode() } - resp, err := r.client().Do(req) + resp, err := r.do(req) if err != nil { return "", err } diff --git a/vendor/oras.land/oras-go/v2/registry/remote/repository.go b/vendor/oras.land/oras-go/v2/registry/remote/repository.go index 32ac347d630..0f8c6acd21a 100644 --- a/vendor/oras.land/oras-go/v2/registry/remote/repository.go +++ b/vendor/oras.land/oras-go/v2/registry/remote/repository.go @@ -47,11 +47,37 @@ import ( "oras.land/oras-go/v2/registry/remote/internal/errutil" ) -// dockerContentDigestHeader - The Docker-Content-Digest header, if present -// on the response, returns the canonical digest of the uploaded blob. -// See https://docs.docker.com/registry/spec/api/#digest-header -// See https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pull -const dockerContentDigestHeader = "Docker-Content-Digest" +const ( + // headerDockerContentDigest is the "Docker-Content-Digest" header. + // If present on the response, it contains the canonical digest of the + // uploaded blob. + // + // References: + // - https://docs.docker.com/registry/spec/api/#digest-header + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pull + headerDockerContentDigest = "Docker-Content-Digest" + + // headerOCIFiltersApplied is the "OCI-Filters-Applied" header. + // If present on the response, it contains a comma-separated list of the + // applied filters. + // + // Reference: + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers + headerOCIFiltersApplied = "OCI-Filters-Applied" + + // headerOCISubject is the "OCI-Subject" header. + // If present on the response, it contains the digest of the subject, + // indicating that Referrers API is supported by the registry. + headerOCISubject = "OCI-Subject" +) + +// filterTypeArtifactType is the "artifactType" filter applied on the list of +// referrers. +// +// References: +// - Latest spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers +// - Compatible spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers +const filterTypeArtifactType = "artifactType" // Client is an interface for a HTTP client. type Client interface { @@ -93,7 +119,7 @@ type Repository struct { // ReferrerListPageSize specifies the page size when invoking the Referrers // API. // If zero, the page size is determined by the remote registry. - // Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers + // Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers ReferrerListPageSize int // MaxMetadataBytes specifies a limit on how many response bytes are allowed @@ -102,6 +128,25 @@ type Repository struct { // If less than or equal to zero, a default (currently 4MiB) is used. MaxMetadataBytes int64 + // SkipReferrersGC specifies whether to delete the dangling referrers + // index when referrers tag schema is utilized. + // - If false, the old referrers index will be deleted after the new one + // is successfully uploaded. + // - If true, the old referrers index is kept. + // By default, it is disabled (set to false). See also: + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#referrers-tag-schema + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pushing-manifests-with-subject + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#deleting-manifests + SkipReferrersGC bool + + // HandleWarning handles the warning returned by the remote server. + // Callers SHOULD deduplicate warnings from multiple associated responses. + // + // References: + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#warnings + // - https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + HandleWarning func(warning Warning) + // NOTE: Must keep fields in sync with newRepositoryWithOptions function. // referrersState represents that if the repository supports Referrers API. @@ -145,6 +190,7 @@ func newRepositoryWithOptions(ref registry.Reference, opts *RepositoryOptions) ( Client: opts.Client, Reference: ref, PlainHTTP: opts.PlainHTTP, + SkipReferrersGC: opts.SkipReferrersGC, ManifestMediaTypes: slices.Clone(opts.ManifestMediaTypes), TagListPageSize: opts.TagListPageSize, ReferrerListPageSize: opts.ReferrerListPageSize, @@ -159,9 +205,9 @@ func newRepositoryWithOptions(ref registry.Reference, opts *RepositoryOptions) ( // SetReferrersCapability returns ErrReferrersCapabilityAlreadySet if the // Referrers API capability has been already set. // - When the capability is set to true, the Referrers() function will always -// request the Referrers API. Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers +// request the Referrers API. Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers // - When the capability is set to false, the Referrers() function will always -// request the Referrers Tag. Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#referrers-tag-schema +// request the Referrers Tag. Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#referrers-tag-schema // - When the capability is not set, the Referrers() function will automatically // determine which API to use. func (r *Repository) SetReferrersCapability(capable bool) error { @@ -196,6 +242,21 @@ func (r *Repository) client() Client { return r.Client } +// do sends an HTTP request and returns an HTTP response using the HTTP client +// returned by r.client(). +func (r *Repository) do(req *http.Request) (*http.Response, error) { + if r.HandleWarning == nil { + return r.client().Do(req) + } + + resp, err := r.client().Do(req) + if err != nil { + return nil, err + } + handleWarningHeaders(resp.Header.Values(headerWarning), r.HandleWarning) + return resp, nil +} + // blobStore detects the blob store for the given descriptor. func (r *Repository) blobStore(desc ocispec.Descriptor) registry.BlobStore { if isManifest(r.ManifestMediaTypes, desc) { @@ -320,7 +381,7 @@ func (r *Repository) ParseReference(reference string) (registry.Reference, error // of the Tags list. // // References: -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#content-discovery +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#content-discovery // - https://docs.docker.com/registry/spec/api/#tags func (r *Repository) Tags(ctx context.Context, last string, fn func(tags []string) error) error { ctx = registryutil.WithScopeHint(ctx, r.Reference, auth.ActionPull) @@ -353,7 +414,7 @@ func (r *Repository) tags(ctx context.Context, last string, fn func(tags []strin } req.URL.RawQuery = q.Encode() } - resp, err := r.client().Do(req) + resp, err := r.do(req) if err != nil { return "", err } @@ -379,7 +440,7 @@ func (r *Repository) tags(ctx context.Context, last string, fn func(tags []strin // Predecessors returns the descriptors of image or artifact manifests directly // referencing the given manifest descriptor. // Predecessors internally leverages Referrers. -// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers func (r *Repository) Predecessors(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { var res []ocispec.Descriptor if err := r.Referrers(ctx, desc, "", func(referrers []ocispec.Descriptor) error { @@ -398,7 +459,7 @@ func (r *Repository) Predecessors(ctx context.Context, desc ocispec.Descriptor) // If artifactType is not empty, only referrers of the same artifact type are // fed to fn. // -// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers func (r *Repository) Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error { state := r.loadReferrersState() if state == referrersStateUnsupported { @@ -470,7 +531,7 @@ func (r *Repository) referrersPageByAPI(ctx context.Context, artifactType string req.URL.RawQuery = q.Encode() } - resp, err := r.client().Do(req) + resp, err := r.do(req) if err != nil { return "", err } @@ -485,10 +546,19 @@ func (r *Repository) referrersPageByAPI(ctx context.Context, artifactType string if err := json.NewDecoder(lr).Decode(&index); err != nil { return "", fmt.Errorf("%s %q: failed to decode response: %w", resp.Request.Method, resp.Request.URL, err) } + referrers := index.Manifests - if artifactType != "" && !isReferrersFilterApplied(index.Annotations, "artifactType") { - // perform client side filtering if the filter is not applied on the server side - referrers = filterReferrers(referrers, artifactType) + if artifactType != "" { + // check both filters header and filters annotations for compatibility + // latest spec for filters header: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers + // older spec for filters annotations: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers + filtersHeader := resp.Header.Get(headerOCIFiltersApplied) + filtersAnnotation := index.Annotations[spec.AnnotationReferrersFiltersApplied] + if !isReferrersFilterApplied(filtersHeader, filterTypeArtifactType) && + !isReferrersFilterApplied(filtersAnnotation, filterTypeArtifactType) { + // perform client side filtering if the filter is not applied on the server side + referrers = filterReferrers(referrers, artifactType) + } } if len(referrers) > 0 { if err := fn(referrers); err != nil { @@ -502,7 +572,7 @@ func (r *Repository) referrersPageByAPI(ctx context.Context, artifactType string // referencing the given manifest descriptor by requesting referrers tag. // fn is called for the referrers result. If artifactType is not empty, // only referrers of the same artifact type are fed to fn. -// reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#backwards-compatibility +// reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#backwards-compatibility func (r *Repository) referrersByTagSchema(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error { referrersTag := buildReferrersTag(desc) _, referrers, err := r.referrersFromIndex(ctx, referrersTag) @@ -572,7 +642,7 @@ func (r *Repository) pingReferrers(ctx context.Context) (bool, error) { if err != nil { return false, err } - resp, err := r.client().Do(req) + resp, err := r.do(req) if err != nil { return false, err } @@ -610,7 +680,7 @@ func (r *Repository) delete(ctx context.Context, target ocispec.Descriptor, isMa return err } - resp, err := r.client().Do(req) + resp, err := r.do(req) if err != nil { return err } @@ -642,7 +712,7 @@ func (s *blobStore) Fetch(ctx context.Context, target ocispec.Descriptor) (rc io return nil, err } - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return nil, err } @@ -689,7 +759,7 @@ func (s *blobStore) Mount(ctx context.Context, desc ocispec.Descriptor, fromRepo if err != nil { return err } - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return err } @@ -715,7 +785,7 @@ func (s *blobStore) Mount(ctx context.Context, desc ocispec.Descriptor, fromRepo // push it. If the caller has provided a getContent function, we // can use that, otherwise pull the content from the source repository. // - // [spec]: https://github.com/opencontainers/distribution-spec/blob/main/spec.md#mounting-a-blob-from-another-repository + // [spec]: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#mounting-a-blob-from-another-repository var r io.ReadCloser if getContent != nil { @@ -746,10 +816,11 @@ func (s *blobStore) sibling(otherRepoName string) *blobStore { // Push is done by conventional 2-step monolithic upload instead of a single // `POST` request for better overall performance. It also allows early fail on // authentication errors. +// // References: -// - https://docs.docker.com/registry/spec/api/#pushing-an-image -// - https://docs.docker.com/registry/spec/api/#initiate-blob-upload -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pushing-a-blob-monolithically +// - https://docs.docker.com/registry/spec/api/#pushing-an-image +// - https://docs.docker.com/registry/spec/api/#initiate-blob-upload +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pushing-a-blob-monolithically func (s *blobStore) Push(ctx context.Context, expected ocispec.Descriptor, content io.Reader) error { // start an upload // pushing usually requires both pull and push actions. @@ -761,7 +832,7 @@ func (s *blobStore) Push(ctx context.Context, expected ocispec.Descriptor, conte return err } - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return err } @@ -816,7 +887,7 @@ func (s *blobStore) completePushAfterInitialPost(ctx context.Context, req *http. if auth := resp.Request.Header.Get("Authorization"); auth != "" { req.Header.Set("Authorization", auth) } - resp, err = s.repo.client().Do(req) + resp, err = s.repo.do(req) if err != nil { return err } @@ -862,7 +933,7 @@ func (s *blobStore) Resolve(ctx context.Context, reference string) (ocispec.Desc return ocispec.Descriptor{}, err } - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return ocispec.Descriptor{}, err } @@ -897,7 +968,7 @@ func (s *blobStore) FetchReference(ctx context.Context, reference string) (desc return ocispec.Descriptor{}, nil, err } - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return ocispec.Descriptor{}, nil, err } @@ -973,7 +1044,7 @@ func (s *manifestStore) Fetch(ctx context.Context, target ocispec.Descriptor) (r } req.Header.Set("Accept", target.MediaType) - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return nil, err } @@ -1032,7 +1103,8 @@ func (s *manifestStore) Delete(ctx context.Context, target ocispec.Descriptor) e // deleteWithIndexing removes the manifest content identified by the descriptor, // and indexes referrers for the manifest when needed. func (s *manifestStore) deleteWithIndexing(ctx context.Context, target ocispec.Descriptor) error { - if target.MediaType == spec.MediaTypeArtifactManifest || target.MediaType == ocispec.MediaTypeImageManifest { + switch target.MediaType { + case spec.MediaTypeArtifactManifest, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: if state := s.repo.loadReferrersState(); state == referrersStateSupported { // referrers API is available, no client-side indexing needed return s.repo.delete(ctx, target, true) @@ -1053,9 +1125,12 @@ func (s *manifestStore) deleteWithIndexing(ctx context.Context, target ocispec.D return s.repo.delete(ctx, target, true) } -// indexReferrersForDelete indexes referrers for image or artifact manifest with -// the subject field on manifest delete. -// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#deleting-manifests +// indexReferrersForDelete indexes referrers for manifests with a subject field +// on manifest delete. +// +// References: +// - Latest spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#deleting-manifests +// - Compatible spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#deleting-manifests func (s *manifestStore) indexReferrersForDelete(ctx context.Context, desc ocispec.Descriptor, manifestJSON []byte) error { var manifest struct { Subject *ocispec.Descriptor `json:"subject"` @@ -1095,7 +1170,7 @@ func (s *manifestStore) Resolve(ctx context.Context, reference string) (ocispec. } req.Header.Set("Accept", manifestAcceptHeader(s.repo.ManifestMediaTypes)) - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return ocispec.Descriptor{}, err } @@ -1127,7 +1202,7 @@ func (s *manifestStore) FetchReference(ctx context.Context, reference string) (d } req.Header.Set("Accept", manifestAcceptHeader(s.repo.ManifestMediaTypes)) - resp, err := s.repo.client().Do(req) + resp, err := s.repo.do(req) if err != nil { return ocispec.Descriptor{}, nil, err } @@ -1225,7 +1300,7 @@ func (s *manifestStore) push(ctx context.Context, expected ocispec.Descriptor, c return err } } - resp, err := client.Do(req) + resp, err := s.repo.do(req) if err != nil { return err } @@ -1234,14 +1309,26 @@ func (s *manifestStore) push(ctx context.Context, expected ocispec.Descriptor, c if resp.StatusCode != http.StatusCreated { return errutil.ParseErrorResponse(resp) } + s.checkOCISubjectHeader(resp) return verifyContentDigest(resp, expected.Digest) } +// checkOCISubjectHeader checks the "OCI-Subject" header in the response and +// sets referrers capability accordingly. +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pushing-manifests-with-subject +func (s *manifestStore) checkOCISubjectHeader(resp *http.Response) { + // Referrers capability is not set to false when the subject header is not + // present, as the server may still conform to an older version of the spec + if subjectHeader := resp.Header.Get(headerOCISubject); subjectHeader != "" { + s.repo.SetReferrersCapability(true) + } +} + // pushWithIndexing pushes the manifest content matching the expected descriptor, // and indexes referrers for the manifest when needed. func (s *manifestStore) pushWithIndexing(ctx context.Context, expected ocispec.Descriptor, r io.Reader, reference string) error { switch expected.MediaType { - case spec.MediaTypeArtifactManifest, ocispec.MediaTypeImageManifest: + case spec.MediaTypeArtifactManifest, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: if state := s.repo.loadReferrersState(); state == referrersStateSupported { // referrers API is available, no client-side indexing needed return s.push(ctx, expected, r, reference) @@ -1257,15 +1344,22 @@ func (s *manifestStore) pushWithIndexing(ctx context.Context, expected ocispec.D if err := s.push(ctx, expected, bytes.NewReader(manifestJSON), reference); err != nil { return err } + // check referrers API availability again after push + if state := s.repo.loadReferrersState(); state == referrersStateSupported { + return nil + } return s.indexReferrersForPush(ctx, expected, manifestJSON) default: return s.push(ctx, expected, r, reference) } } -// indexReferrersForPush indexes referrers for image or artifact manifest with -// the subject field on manifest push. -// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pushing-manifests-with-subject +// indexReferrersForPush indexes referrers for manifests with a subject field +// on manifest push. +// +// References: +// - Latest spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pushing-manifests-with-subject +// - Compatible spec: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pushing-manifests-with-subject func (s *manifestStore) indexReferrersForPush(ctx context.Context, desc ocispec.Descriptor, manifestJSON []byte) error { var subject ocispec.Descriptor switch desc.MediaType { @@ -1291,7 +1385,22 @@ func (s *manifestStore) indexReferrersForPush(ctx context.Context, desc ocispec. return nil } subject = *manifest.Subject - desc.ArtifactType = manifest.Config.MediaType + desc.ArtifactType = manifest.ArtifactType + if desc.ArtifactType == "" { + desc.ArtifactType = manifest.Config.MediaType + } + desc.Annotations = manifest.Annotations + case ocispec.MediaTypeImageIndex: + var manifest ocispec.Index + if err := json.Unmarshal(manifestJSON, &manifest); err != nil { + return fmt.Errorf("failed to decode manifest: %s: %s: %w", desc.Digest, desc.MediaType, err) + } + if manifest.Subject == nil { + // no subject, no indexing needed + return nil + } + subject = *manifest.Subject + desc.ArtifactType = manifest.ArtifactType desc.Annotations = manifest.Annotations default: return nil @@ -1311,12 +1420,12 @@ func (s *manifestStore) indexReferrersForPush(ctx context.Context, desc ocispec. // updateReferrersIndex updates the referrers index for desc referencing subject // on manifest push and manifest delete. // References: -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#pushing-manifests-with-subject -// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#deleting-manifests +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#pushing-manifests-with-subject +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#deleting-manifests func (s *manifestStore) updateReferrersIndex(ctx context.Context, subject ocispec.Descriptor, change referrerChange) (err error) { referrersTag := buildReferrersTag(subject) - var skipDelete bool + skipDelete := s.repo.SkipReferrersGC var oldIndexDesc ocispec.Descriptor var referrers []ocispec.Descriptor prepare := func() error { @@ -1408,13 +1517,13 @@ func (s *manifestStore) generateDescriptor(resp *http.Response, ref registry.Ref // 4. Validate Server Digest (if present) var serverHeaderDigest digest.Digest - if serverHeaderDigestStr := resp.Header.Get(dockerContentDigestHeader); serverHeaderDigestStr != "" { + if serverHeaderDigestStr := resp.Header.Get(headerDockerContentDigest); serverHeaderDigestStr != "" { if serverHeaderDigest, err = digest.Parse(serverHeaderDigestStr); err != nil { return ocispec.Descriptor{}, fmt.Errorf( "%s %q: invalid response header value: `%s: %s`; %w", resp.Request.Method, resp.Request.URL, - dockerContentDigestHeader, + headerDockerContentDigest, serverHeaderDigestStr, err, ) @@ -1431,7 +1540,7 @@ func (s *manifestStore) generateDescriptor(resp *http.Response, ref registry.Ref // immediate fail return ocispec.Descriptor{}, fmt.Errorf( "HTTP %s request missing required header %q", - httpMethod, dockerContentDigestHeader, + httpMethod, headerDockerContentDigest, ) } // Otherwise, just trust the client-supplied digest @@ -1453,7 +1562,7 @@ func (s *manifestStore) generateDescriptor(resp *http.Response, ref registry.Ref return ocispec.Descriptor{}, fmt.Errorf( "%s %q: invalid response; digest mismatch in %s: received %q when expecting %q", resp.Request.Method, resp.Request.URL, - dockerContentDigestHeader, contentDigest, + headerDockerContentDigest, contentDigest, refDigest, ) } @@ -1485,7 +1594,7 @@ func calculateDigestFromResponse(resp *http.Response, maxMetadataBytes int64) (d // OCI distribution-spec states the Docker-Content-Digest header is optional. // Reference: https://github.com/opencontainers/distribution-spec/blob/v1.0.1/spec.md#legacy-docker-support-http-headers func verifyContentDigest(resp *http.Response, expected digest.Digest) error { - digestStr := resp.Header.Get(dockerContentDigestHeader) + digestStr := resp.Header.Get(headerDockerContentDigest) if len(digestStr) == 0 { return nil @@ -1496,7 +1605,7 @@ func verifyContentDigest(resp *http.Response, expected digest.Digest) error { return fmt.Errorf( "%s %q: invalid response header: `%s: %s`", resp.Request.Method, resp.Request.URL, - dockerContentDigestHeader, digestStr, + headerDockerContentDigest, digestStr, ) } @@ -1504,7 +1613,7 @@ func verifyContentDigest(resp *http.Response, expected digest.Digest) error { return fmt.Errorf( "%s %q: invalid response; digest mismatch in %s: received %q when expecting %q", resp.Request.Method, resp.Request.URL, - dockerContentDigestHeader, contentDigest, + headerDockerContentDigest, contentDigest, expected, ) } diff --git a/vendor/oras.land/oras-go/v2/registry/remote/url.go b/vendor/oras.land/oras-go/v2/registry/remote/url.go index d3eee3eeab6..74258de7ae3 100644 --- a/vendor/oras.land/oras-go/v2/registry/remote/url.go +++ b/vendor/oras.land/oras-go/v2/registry/remote/url.go @@ -101,7 +101,7 @@ func buildRepositoryBlobMountURL(plainHTTP bool, ref registry.Reference, d diges // buildReferrersURL builds the URL for querying the Referrers API. // Format: :///v2//referrers/?artifactType= -// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers func buildReferrersURL(plainHTTP bool, ref registry.Reference, artifactType string) string { var query string if artifactType != "" { diff --git a/vendor/oras.land/oras-go/v2/registry/remote/warning.go b/vendor/oras.land/oras-go/v2/registry/remote/warning.go new file mode 100644 index 00000000000..ff8f9c0292c --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/warning.go @@ -0,0 +1,100 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remote + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + // headerWarning is the "Warning" header. + // Reference: https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + headerWarning = "Warning" + + // warnCode299 is the 299 warn-code. + // Reference: https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + warnCode299 = 299 + + // warnAgentUnknown represents an unknown warn-agent. + // Reference: https://www.rfc-editor.org/rfc/rfc7234#section-5.5 + warnAgentUnknown = "-" +) + +// errUnexpectedWarningFormat is returned by parseWarningHeader when +// an unexpected warning format is encountered. +var errUnexpectedWarningFormat = errors.New("unexpected warning format") + +// WarningValue represents the value of the Warning header. +// +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#warnings +// - https://www.rfc-editor.org/rfc/rfc7234#section-5.5 +type WarningValue struct { + // Code is the warn-code. + Code int + // Agent is the warn-agent. + Agent string + // Text is the warn-text. + Text string +} + +// Warning contains the value of the warning header and may contain +// other information related to the warning. +// +// References: +// - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#warnings +// - https://www.rfc-editor.org/rfc/rfc7234#section-5.5 +type Warning struct { + // WarningValue is the value of the warning header. + WarningValue +} + +// parseWarningHeader parses the warning header into WarningValue. +func parseWarningHeader(header string) (WarningValue, error) { + if len(header) < 9 || !strings.HasPrefix(header, `299 - "`) || !strings.HasSuffix(header, `"`) { + // minimum header value: `299 - "x"` + return WarningValue{}, fmt.Errorf("%s: %w", header, errUnexpectedWarningFormat) + } + + // validate text only as code and agent are fixed + quotedText := header[6:] // behind `299 - `, quoted by " + text, err := strconv.Unquote(quotedText) + if err != nil { + return WarningValue{}, fmt.Errorf("%s: unexpected text: %w: %v", header, errUnexpectedWarningFormat, err) + } + + return WarningValue{ + Code: warnCode299, + Agent: warnAgentUnknown, + Text: text, + }, nil +} + +// handleWarningHeaders parses the warning headers and handles the parsed +// warnings using handleWarning. +func handleWarningHeaders(headers []string, handleWarning func(Warning)) { + for _, h := range headers { + if value, err := parseWarningHeader(h); err == nil { + // ignore warnings in unexpected formats + handleWarning(Warning{ + WarningValue: value, + }) + } + } +} diff --git a/vendor/oras.land/oras-go/v2/registry/repository.go b/vendor/oras.land/oras-go/v2/registry/repository.go index 2dd7ff994b7..b75b7b8ea44 100644 --- a/vendor/oras.land/oras-go/v2/registry/repository.go +++ b/vendor/oras.land/oras-go/v2/registry/repository.go @@ -82,7 +82,7 @@ type ReferenceFetcher interface { } // ReferrerLister provides the Referrers API. -// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers +// Reference: https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#listing-referrers type ReferrerLister interface { Referrers(ctx context.Context, desc ocispec.Descriptor, artifactType string, fn func(referrers []ocispec.Descriptor) error) error } @@ -93,16 +93,19 @@ type TagLister interface { // Since the returned tag list may be paginated by the underlying // implementation, a function should be passed in to process the paginated // tag list. + // // `last` argument is the `last` parameter when invoking the tags API. // If `last` is NOT empty, the entries in the response start after the // tag specified by `last`. Otherwise, the response starts from the top // of the Tags list. + // // Note: When implemented by a remote registry, the tags API is called. // However, not all registries supports pagination or conforms the // specification. + // // References: - // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#content-discovery - // - https://docs.docker.com/registry/spec/api/#tags + // - https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc3/spec.md#content-discovery + // - https://docs.docker.com/registry/spec/api/#tags // See also `Tags()` in this package. Tags(ctx context.Context, last string, fn func(tags []string) error) error } From bd51187e7967c9423bcd16e9b41a7c2c5d51a062 Mon Sep 17 00:00:00 2001 From: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Date: Mon, 9 Sep 2024 12:07:51 +0530 Subject: [PATCH 42/61] fix: Ea rbac fixes (#5813) * app found using display or app name * single query optimization * display name handling for new apps --------- Co-authored-by: kartik-579 --- pkg/appStore/bean/bean.go | 14 ++++++ .../repository/InstalledAppRepository.go | 33 ++++++++++++++ .../service/AppStoreDeploymentDBService.go | 5 +++ util/rbac/EnforcerUtilHelm.go | 44 ++++++++++++++++--- 4 files changed, 89 insertions(+), 7 deletions(-) diff --git a/pkg/appStore/bean/bean.go b/pkg/appStore/bean/bean.go index 84a7a323609..11a5238c8d5 100644 --- a/pkg/appStore/bean/bean.go +++ b/pkg/appStore/bean/bean.go @@ -21,6 +21,7 @@ import ( "fmt" apiBean "github.com/devtron-labs/devtron/api/bean/gitOps" openapi "github.com/devtron-labs/devtron/api/helm-app/openapiClient" + bean3 "github.com/devtron-labs/devtron/api/helm-app/service/bean" "github.com/devtron-labs/devtron/pkg/cluster/repository/bean" bean2 "github.com/devtron-labs/devtron/pkg/deployment/common/bean" "github.com/devtron-labs/devtron/util/gitUtil" @@ -120,6 +121,19 @@ type InstallAppVersionDTO struct { DisplayName string `json:"-"` // used only for external apps } +func (chart *InstallAppVersionDTO) GetAppIdentifierString() string { + displayName := chart.DisplayName + if len(displayName) == 0 { + displayName = chart.AppName + } + appIdentifier := &bean3.AppIdentifier{ + ClusterId: chart.ClusterId, + Namespace: chart.Namespace, + ReleaseName: displayName, + } + return appIdentifier.GetUniqueAppNameIdentifier() +} + // UpdateDeploymentAppType updates deploymentAppType to InstallAppVersionDTO func (chart *InstallAppVersionDTO) UpdateDeploymentAppType(deploymentAppType string) { if chart == nil { diff --git a/pkg/appStore/installedApp/repository/InstalledAppRepository.go b/pkg/appStore/installedApp/repository/InstalledAppRepository.go index da873dd50bc..f04fdb93e5f 100644 --- a/pkg/appStore/installedApp/repository/InstalledAppRepository.go +++ b/pkg/appStore/installedApp/repository/InstalledAppRepository.go @@ -139,6 +139,7 @@ type InstalledAppRepository interface { GetInstalledAppVersionByClusterIds(clusterIds []int) ([]*InstalledAppVersions, error) //unused GetInstalledAppVersionByClusterIdsV2(clusterIds []int) ([]*InstalledAppVersions, error) GetInstalledApplicationByClusterIdAndNamespaceAndAppName(clusterId int, namespace string, appName string) (*InstalledApps, error) + GetInstalledApplicationByClusterIdAndNamespaceAndAppIdentifier(clusterId int, namespace string, appIdentifier string, appName string) (*InstalledApps, error) GetAppAndEnvDetailsForDeploymentAppTypeInstalledApps(deploymentAppType string, clusterIds []int) ([]*InstalledApps, error) GetDeploymentSuccessfulStatusCountForTelemetry() (int, error) GetGitOpsInstalledAppsWhereArgoAppDeletedIsTrue(installedAppId int, envId int) (InstalledApps, error) @@ -672,6 +673,38 @@ func (impl InstalledAppRepositoryImpl) GetInstalledAppVersionByClusterIdsV2(clus return installedAppVersions, err } +func (impl InstalledAppRepositoryImpl) GetInstalledApplicationByClusterIdAndNamespaceAndAppIdentifier(clusterId int, namespace string, appIdentifier string, appName string) (*InstalledApps, error) { + var installedApps []*InstalledApps + err := impl.dbConnection.Model(&installedApps). + Column("installed_apps.*", "App", "Environment", "App.Team"). + Where("environment.cluster_id = ?", clusterId). + Where("environment.namespace = ?", namespace). + Where("app.app_name = ? OR app.display_name = ?", appName, appName). + Where("installed_apps.active = ?", true). + Where("app.active = ?", true). + Where("environment.active = ?", true). + Select() + // extract app which has matching display name and app name + for _, installedApp := range installedApps { + appObj := installedApp.App + if appObj.DisplayName == appName && appObj.AppName == appIdentifier { + return installedApp, nil + } + } + // if not found any matching app in above case, then return app with only app name + for _, installedApp := range installedApps { + appObj := installedApp.App + if appObj.DisplayName == "" && appObj.AppName == appName { + return installedApp, nil + } + } + if err == nil { + err = pg.ErrNoRows + } + + return &InstalledApps{}, err +} + func (impl InstalledAppRepositoryImpl) GetInstalledApplicationByClusterIdAndNamespaceAndAppName(clusterId int, namespace string, appName string) (*InstalledApps, error) { model := &InstalledApps{} err := impl.dbConnection.Model(model). diff --git a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go index 33f1a7b9a19..80ac2412727 100644 --- a/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go +++ b/pkg/appStore/installedApp/service/AppStoreDeploymentDBService.go @@ -148,6 +148,10 @@ func (impl *AppStoreDeploymentDBServiceImpl) AppStoreDeployOperationDB(installRe appCreateRequest.AppType = helper.ExternalChartStoreApp appCreateRequest.DisplayName = installRequest.DisplayName } + if globalUtil.IsBaseStack() || globalUtil.IsHelmApp(installRequest.AppOfferingMode) { + appCreateRequest.DisplayName = installRequest.AppName + appCreateRequest.AppName = installRequest.GetAppIdentifierString() + } appCreateRequest, err = impl.createAppForAppStore(appCreateRequest, tx, getAppInstallationMode(installRequest.AppOfferingMode)) if err != nil { impl.logger.Errorw("error while creating app", "error", err) @@ -603,6 +607,7 @@ func (impl *AppStoreDeploymentDBServiceImpl) createAppForAppStore(createRequest TeamId: createRequest.TeamId, AppType: helper.ChartStoreApp, AppOfferingMode: appInstallationMode, + DisplayName: createRequest.DisplayName, } if createRequest.AppType == helper.ExternalChartStoreApp { //when linking ext helm app to chart store, there can be a case that two (or more) external apps can have same name, in diff namespaces or diff diff --git a/util/rbac/EnforcerUtilHelm.go b/util/rbac/EnforcerUtilHelm.go index 15861c0d88d..c72ce626614 100644 --- a/util/rbac/EnforcerUtilHelm.go +++ b/util/rbac/EnforcerUtilHelm.go @@ -18,6 +18,7 @@ package rbac import ( "fmt" + "github.com/devtron-labs/devtron/api/helm-app/service/bean" "github.com/devtron-labs/devtron/internal/sql/repository/app" repository2 "github.com/devtron-labs/devtron/pkg/appStore/installedApp/repository" "github.com/devtron-labs/devtron/pkg/cluster/repository" @@ -78,8 +79,7 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByTeamIdAndClusterId(teamId int, c func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clusterId int, namespace string, appName string) (string, string) { - installedApp, installedAppErr := impl.InstalledAppRepository.GetInstalledApplicationByClusterIdAndNamespaceAndAppName(clusterId, namespace, appName) - + installedApp, installedAppErr := impl.getInstalledApp(clusterId, namespace, appName) if installedAppErr != nil && installedAppErr != pg.ErrNoRows { impl.logger.Errorw("error on fetching data for rbac object from installed app repository", "err", installedAppErr) return "", "" @@ -93,8 +93,7 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clu if installedApp == nil || installedAppErr == pg.ErrNoRows { // for cli apps which are not yet linked - - app, err := impl.appRepository.FindAppAndProjectByAppName(appName) + app, err := impl.getAppObject(clusterId, namespace, appName) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching app details", "err", err) return "", "" @@ -102,10 +101,10 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clu if app.TeamId == 0 { // case if project is not assigned to cli app - return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName), "" + return fmt.Sprintf("%s/%s__%s/%s", team.UNASSIGNED_PROJECT, cluster.ClusterName, namespace, appName), fmt.Sprintf("%s/%s/%s", team.UNASSIGNED_PROJECT, namespace, appName) } else { // case if project is assigned - return fmt.Sprintf("%s/%s__%s/%s", app.Team.Name, cluster.ClusterName, namespace, appName), "" + return fmt.Sprintf("%s/%s__%s/%s", app.Team.Name, cluster.ClusterName, namespace, appName), fmt.Sprintf("%s/%s/%s", app.Team.Name, namespace, appName) } } @@ -118,7 +117,7 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clu } else { if installedApp.EnvironmentId == 0 { // for apps in EA mode, initally env can be 0. - return fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, appName), "" + return fmt.Sprintf("%s/%s__%s/%s", installedApp.App.Team.Name, cluster.ClusterName, namespace, appName), fmt.Sprintf("%s/%s/%s", installedApp.App.Team.Name, namespace, appName) } // for apps which are assigned to a project and have env ID rbacOne := fmt.Sprintf("%s/%s/%s", installedApp.App.Team.Name, installedApp.Environment.EnvironmentIdentifier, appName) @@ -131,6 +130,37 @@ func (impl EnforcerUtilHelmImpl) GetHelmObjectByClusterIdNamespaceAndAppName(clu } +func (impl EnforcerUtilHelmImpl) getAppObject(clusterId int, namespace string, appName string) (*app.App, error) { + appIdentifier := &bean.AppIdentifier{ + ClusterId: clusterId, + Namespace: namespace, + ReleaseName: appName, + } + appNameIdentifier := appIdentifier.GetUniqueAppNameIdentifier() + appObj, err := impl.appRepository.FindAppAndProjectByAppName(appNameIdentifier) + if appObj == nil || err == pg.ErrNoRows { + impl.logger.Warnw("appObj not found, going to find app using display name ", "appIdentifier", appNameIdentifier, "appName", appName) + appObj, err = impl.appRepository.FindAppAndProjectByAppName(appName) + } + return appObj, err +} + +func (impl EnforcerUtilHelmImpl) getInstalledApp(clusterId int, namespace string, appName string) (*repository2.InstalledApps, error) { + appIdentifier := &bean.AppIdentifier{ + ClusterId: clusterId, + Namespace: namespace, + ReleaseName: appName, + } + appNameIdentifier := appIdentifier.GetUniqueAppNameIdentifier() + //installedApp, installedAppErr := impl.InstalledAppRepository.GetInstalledApplicationByClusterIdAndNamespaceAndAppName(clusterId, namespace, appNameIdentifier) + //if installedApp == nil || installedAppErr == pg.ErrNoRows { + // impl.logger.Warnw("installed app not found, going to find app using display name ", "appIdentifier", appNameIdentifier, "appName", appName) + // installedApp, installedAppErr = impl.InstalledAppRepository.GetInstalledApplicationByClusterIdAndNamespaceAndAppName(clusterId, namespace, appName) + //} + return impl.InstalledAppRepository.GetInstalledApplicationByClusterIdAndNamespaceAndAppIdentifier(clusterId, namespace, appNameIdentifier, appName) + //return installedApp, installedAppErr +} + func (impl EnforcerUtilHelmImpl) GetAppRBACNameByInstalledAppId(installedAppVersionId int) (string, string) { InstalledApp, err := impl.InstalledAppRepository.GetInstalledApp(installedAppVersionId) From 3020744161522d52d35b124b3af2ed854a50514d Mon Sep 17 00:00:00 2001 From: Prakash Date: Mon, 9 Sep 2024 13:45:12 +0530 Subject: [PATCH 43/61] fix: scan list in global security page sql injection fix (#5808) * scan list in global security page sql injection fix * comment --- .../security/ImageScanDeployInfoRepository.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/internal/sql/repository/security/ImageScanDeployInfoRepository.go b/internal/sql/repository/security/ImageScanDeployInfoRepository.go index b3146dd6d8a..f723c9a5471 100644 --- a/internal/sql/repository/security/ImageScanDeployInfoRepository.go +++ b/internal/sql/repository/security/ImageScanDeployInfoRepository.go @@ -147,8 +147,13 @@ func (impl ImageScanDeployInfoRepositoryImpl) FindByTypeMetaAndTypeId(scanObject func (impl ImageScanDeployInfoRepositoryImpl) ScanListingWithFilter(request *securityBean.ImageScanFilter, size int, offset int, deployInfoIds []int) ([]*ImageScanListingResponse, error) { var models []*ImageScanListingResponse + var err error query := impl.scanListingQueryBuilder(request, size, offset, deployInfoIds) - _, err := impl.dbConnection.Query(&models, query, size, offset) + if len(request.Severity) > 0 { + _, err = impl.dbConnection.Query(&models, query, pg.In(request.Severity), pg.In(request.Severity)) + } else { + _, err = impl.dbConnection.Query(&models, query) + } if err != nil { impl.logger.Error("err", err) return []*ImageScanListingResponse{}, err @@ -177,8 +182,8 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithoutObject(request query = query + " AND res.cve_store_name ILIKE '%" + request.CVEName + "%'" } if len(request.Severity) > 0 { - severities := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.Severity)), ","), "[]") - query = query + fmt.Sprintf(" AND (cs.standard_severity IN (%s) OR (cs.severity IN (%s) AND cs.standard_severity IS NULL))", severities, severities) + // use pg.In to inject values here wherever calling this func in case severity exists, to avoid sql injections + query = query + " AND (cs.standard_severity IN (?) OR (cs.severity IN (?) AND cs.standard_severity IS NULL))" } if len(request.EnvironmentIds) > 0 { envIds := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.EnvironmentIds)), ","), "[]") @@ -239,8 +244,7 @@ func (impl ImageScanDeployInfoRepositoryImpl) scanListQueryWithObject(request *s } if len(request.Severity) > 0 { - severities := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.Severity)), ","), "[]") - query = query + fmt.Sprintf(" AND (cs.standard_severity IN (%s) OR (cs.severity IN (%s) AND cs.standard_severity IS NULL))", severities, severities) + query = query + " AND (cs.standard_severity IN (?) OR (cs.severity IN (?) AND cs.standard_severity IS NULL))" } if len(request.EnvironmentIds) > 0 { envIds := strings.Trim(strings.Join(strings.Fields(fmt.Sprint(request.EnvironmentIds)), ","), "[]") From e332df2c4430bce93c9be52d28176fd047c43206 Mon Sep 17 00:00:00 2001 From: iamayushm <32041961+iamayushm@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:33:31 +0530 Subject: [PATCH 44/61] fix: app details page(#5823) --- internal/sql/repository/AppListingRepository.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/sql/repository/AppListingRepository.go b/internal/sql/repository/AppListingRepository.go index 59340ada08c..c5536724f67 100644 --- a/internal/sql/repository/AppListingRepository.go +++ b/internal/sql/repository/AppListingRepository.go @@ -355,6 +355,7 @@ func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx contex " p.deployment_app_type," + " p.ci_pipeline_id," + " p.deployment_app_delete_request," + + " pco.id as pco_id," + " cia.data_source," + " cia.id as ci_artifact_id," + " cia.parent_ci_artifact as parent_artifact_id," + @@ -362,6 +363,7 @@ func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx contex " env.cluster_id," + " env.is_virtual_environment," + " cl.cluster_name," + + " cia.image," + " p.id as cd_pipeline_id," + " p.ci_pipeline_id," + " p.trigger_type" + From 8e78d6e7b80f77137d87cb12f92a9ce53e67d509 Mon Sep 17 00:00:00 2001 From: systemsdt <129372406+systemsdt@users.noreply.github.com> Date: Wed, 11 Sep 2024 16:05:46 +0530 Subject: [PATCH 45/61] misc: sync with common-lib changes with release candidate 18 (#5830) * added config sql script (#5681) * feat: CVE severity categorisation and scan result listing API enhancements (#5617) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * fix: update script numbers * fix: minor fix * feat: casbin deny policy sql scripts (#5677) * system controller scripts * script additions * sql cript update * sql script number chnage * feat: Config diff phase 2 oss (#5488) * story(configDiffView) : open api spec * story(configDiffView) : open api spec updated * story(configDiffView) : open api spec updated for error state * story(configDiffView) : WIP * story(configDiffView) : WIP "some code changed" * story(configDiffView) : support for names added * story(configDiffView) : iota removed * story(configDiffView) : pg no rows handled * story(configDiffView) : spelling check * story(configDiffView) : code review comment resolved * story(configDiffView) : env id added * story(configDiffView) : intersection added * story(configDiffView) : comments removed * story(configDiffView) : code review comment resolved * story(configDiffView) : comment removed * story(configDiffView) : CMCSNames DTO moved * story(configDiffView) : null case handled * story(configDiffView) : logger added * story(configDiffView) : code refactored * story(configDiffView) : code refactored v2 * story(configDiffView) : spec updated * story(configDiffView) : code refactored * story(configDiffView) : config names * main sync * overridden and global flag introduced in config diff autocomplete api * ent sync * get config data in resthandler * new api for showing all config data in config/data :- Service func -> GetAllConfigData * using a single key instead of global and overridden key in config/autocomplete api * ConfigState made string instead of int * not sending inheriting in case base config * code review comment incorporation * ent sync * code review comment incorp -1 * code review comment incorp -2 * code review comment incorp -3 * small fix in plugin * migration number changes (#5692) * main sync * minor fix * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * scipt number change --------- Co-authored-by: adi6859 Co-authored-by: Vikram Singh * fix: Helm apps entries in Ea mode (#5652) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * executed make after merging with develop branch * feat: refactoring deployment app name usage (#5702) * removing hard coded deployment app name * removing %s-%s usage * wip: query change for enterprise * wip * wip * wip * adding release mode in deployment config * wip: release changes * left join on pco and artifact * handling empty release mode - backward compatibility * fixing panic * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * migration updated * main merge and migration script updated * wip * review changes * fix sql no --------- Co-authored-by: Prakash * migration syn with ent (#5719) * fix: group image vulnerabilities by base/os image (#5680) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * feat: storing target,class and type values in imageScanExecutionResults * feat: add sql script * feat: add sql script * fix: add new columns * fix: update script numbers * fix: correct down script * fix: minor fix * chore: script number update * fix: remove sql script (#5727) * Revert "fix: Helm apps entries in Ea mode (#5652)" (#5733) This reverts commit f1aa1fca0624af32de5e620ceba4548488a07127. * chore: custom argo-workflow dependency (#5731) * bumped github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * reverted main branch changes * reverted main branch changes --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: fix go.sum file (#5734) * misc: Main sync develop (#5737) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: sql scripts sync (#5763) * fix: Scan tool migration fix develop (#5773) * scan tool active check removed * query fix * chore: Oss sync 2 sept 2024 oss (#5779) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * doc: Edit Deployment Chart Schema (#5735) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * doc: Redirection of old entry in gitbook.yaml (#5738) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * Redirected Old Entry * docs: added Documentation for Air-Gapped Installation (#5360) * added docs for air-gapped-installation * added all the images in 7.0.0 * modified yq command in the docs * added an entry in summary.md * added installation commands * modified statements * modified variable name * added steps to navigation * added the latest oss chart images * added a note for docker * Added Intro + Proofreading + Structuring * Other fixes * Lang fix * added docs for ea-mode only * modified lang * Update install-devtron-in-airgapped-environment.md Changed h3 header to fit the ToC on the RHS * added changes * modified changes --------- Co-authored-by: Badal Kumar Prusty Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * feat: Env description handling (#5744) * env description handling added * license handling * misc: Main sync rc - branch update (#5753) * added config sql script (#5681) * feat: CVE severity categorisation and scan result listing API enhancements (#5617) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * fix: update script numbers * fix: minor fix * feat: casbin deny policy sql scripts (#5677) * system controller scripts * script additions * sql cript update * sql script number chnage * feat: Config diff phase 2 oss (#5488) * story(configDiffView) : open api spec * story(configDiffView) : open api spec updated * story(configDiffView) : open api spec updated for error state * story(configDiffView) : WIP * story(configDiffView) : WIP "some code changed" * story(configDiffView) : support for names added * story(configDiffView) : iota removed * story(configDiffView) : pg no rows handled * story(configDiffView) : spelling check * story(configDiffView) : code review comment resolved * story(configDiffView) : env id added * story(configDiffView) : intersection added * story(configDiffView) : comments removed * story(configDiffView) : code review comment resolved * story(configDiffView) : comment removed * story(configDiffView) : CMCSNames DTO moved * story(configDiffView) : null case handled * story(configDiffView) : logger added * story(configDiffView) : code refactored * story(configDiffView) : code refactored v2 * story(configDiffView) : spec updated * story(configDiffView) : code refactored * story(configDiffView) : config names * main sync * overridden and global flag introduced in config diff autocomplete api * ent sync * get config data in resthandler * new api for showing all config data in config/data :- Service func -> GetAllConfigData * using a single key instead of global and overridden key in config/autocomplete api * ConfigState made string instead of int * not sending inheriting in case base config * code review comment incorporation * ent sync * code review comment incorp -1 * code review comment incorp -2 * code review comment incorp -3 * small fix in plugin * migration number changes (#5692) * main sync * minor fix * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * scipt number change --------- Co-authored-by: adi6859 Co-authored-by: Vikram Singh * fix: Helm apps entries in Ea mode (#5652) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * executed make after merging with develop branch * feat: refactoring deployment app name usage (#5702) * removing hard coded deployment app name * removing %s-%s usage * wip: query change for enterprise * wip * wip * wip * adding release mode in deployment config * wip: release changes * left join on pco and artifact * handling empty release mode - backward compatibility * fixing panic * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * migration updated * main merge and migration script updated * wip * review changes * fix sql no --------- Co-authored-by: Prakash * migration syn with ent (#5719) * fix: group image vulnerabilities by base/os image (#5680) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * feat: storing target,class and type values in imageScanExecutionResults * feat: add sql script * feat: add sql script * fix: add new columns * fix: update script numbers * fix: correct down script * fix: minor fix * chore: script number update * fix: remove sql script (#5727) * Revert "fix: Helm apps entries in Ea mode (#5652)" (#5733) This reverts commit f1aa1fca0624af32de5e620ceba4548488a07127. * chore: custom argo-workflow dependency (#5731) * bumped github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * reverted main branch changes * reverted main branch changes --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: fix go.sum file (#5734) * misc: Main sync develop (#5737) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * fix: Validate config cm cs (#5750) * validateConfigRequest before CMGlobalAddUpdate and CSGlobalAddUpdate * checkIfConfigDataAlreadyExist --------- Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: adi6859 Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> * doc: Update prerequisites of code-scan (#5625) * Update prerequisites of code-scan * Hyperlinked the Vulnerability scanning doc --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: ci patch rbac for branch update (#5759) * feat: Added basic auth support for servicemonitor (#5761) * Added support for basic auth in servicemonitor * Added support for namespace selector and custom matchLabels * Fixed indentations * fix: Bitnami chart repo tls issue (#5740) * bitnami_chart_fix * Rename 278_bitnami_chart_fix.down.sql to 282_bitnami_chart_fix.down.sql * Rename 278_bitnami_chart_fix.up.sql to 282_bitnami_chart_fix.up.sql --------- Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> * doc: Cosign plugin doc (#5665) * doc for cosign plugin * edits in task name * updates in intro and other fixes. * Attached link to Cosign GitHub repo * Hyperlink fixes --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: check rbac on env if envName is present (#5765) * admin check fix in config draft * minor fix * doc: CraneCopy plugin doc (#5658) * doc for * edits in task name * spelling correction * Updated password --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron CD Trigger Plugin doc (#5747) * devtron-cd-trigger plugin doc * minor update * Proofreading done * Update devtron-cd-trigger.md * Removed unwanted phrase * Changed wording * Changed plurality * Updated devtron token --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: DockerSlim plugin doc (#5660) * doc for DockerSlim plugin * Updated Docker-Slim to DockerSlim * Minor fixes * url update * Fixes in url --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron Job Trigger Plugin doc (#5742) * devtron-job-trigger plugin doc * summary updated * Updated input variable description * token value updated --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: scan tool active check removed (#5771) * scan tool active check removed * query fix * feat: Docker pull env driven (#5767) * useDockerApiToGetDigest menv driven flag to control pulling image either using docker pull or docker API * UseAppDockerConfigForPrivateRegistries in workflow request * revert * revert * fix: panic handlings and argocd app delete stuck in partial stage (#5770) * fix: panic handlings * fix: false positive matrics on gitOps failures * fix: for GetConfigForHelmApps err: pg no row * feat: plugin creation support (#5630) * wip: new plugin creation api and min plugin api with only shared plugin list * wip: create new plugin version code * wip:plugin type SHARED by default * wip:find plugin either by identifier or by id while creating a new version of existing plugin * wip: create new plugin tag logic improved * wip: optimize GetAllFilteredPluginParentMetadata query * wip: create plugin tag new flow * wip: minor fix * wip: minor fix * wip: minor fix * wip: newTagsPresent -> areNewTagsPresent * wip: icon is not mandatory code incorporated * wip:minor refactoring * wip: prevent duplicate version from being created and save tags relation only when * wip: minor fix * wip: details api, get all plugin data or non * wip: code review incorp part -1 * wip: code review incorp part -2 * wip: code review incorp part -3 * wip: remove code duplication * wip: hardcode isExposed to true * wip: hardcode StepType= inline * wip: set default VariableStepIndex= 1 * Revert "feat: plugin creation support (#5630)" (#5778) This reverts commit 4296366ae288f3a67f87e547d2b946acbcd2dd65. * script no. fix --------- Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Co-authored-by: Badal Kumar <130441461+badal773@users.noreply.github.com> Co-authored-by: Badal Kumar Prusty Co-authored-by: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Co-authored-by: kartik-579 <84493919+kartik-579@users.noreply.github.com> Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: adi6859 Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Co-authored-by: Bhushan Nemade Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Co-authored-by: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> * chore: plugin creation oss (#5780) * wip: new plugin creation api and min plugin api with only shared plugin list * wip: create new plugin version code * wip:plugin type SHARED by default * wip:find plugin either by identifier or by id while creating a new version of existing plugin * wip: create new plugin tag logic improved * wip: optimize GetAllFilteredPluginParentMetadata query * wip: create plugin tag new flow * wip: minor fix * wip: minor fix * wip: minor fix * wip: newTagsPresent -> areNewTagsPresent * wip: icon is not mandatory code incorporated * wip:minor refactoring * wip: prevent duplicate version from being created and save tags relation only when * wip: minor fix * wip: details api, get all plugin data or non * wip: code review incorp part -1 * wip: code review incorp part -2 * wip: code review incorp part -3 * wip: remove code duplication * wip: hardcode isExposed to true * wip: hardcode StepType= inline * wip: set default VariableStepIndex= 1 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * doc: Edit Deployment Chart Schema (#5735) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * doc: Redirection of old entry in gitbook.yaml (#5738) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * Redirected Old Entry * docs: added Documentation for Air-Gapped Installation (#5360) * added docs for air-gapped-installation * added all the images in 7.0.0 * modified yq command in the docs * added an entry in summary.md * added installation commands * modified statements * modified variable name * added steps to navigation * added the latest oss chart images * added a note for docker * Added Intro + Proofreading + Structuring * Other fixes * Lang fix * added docs for ea-mode only * modified lang * Update install-devtron-in-airgapped-environment.md Changed h3 header to fit the ToC on the RHS * added changes * modified changes --------- Co-authored-by: Badal Kumar Prusty Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * feat: Env description handling (#5744) * env description handling added * license handling * misc: Main sync rc - branch update (#5753) * added config sql script (#5681) * feat: CVE severity categorisation and scan result listing API enhancements (#5617) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * fix: update script numbers * fix: minor fix * feat: casbin deny policy sql scripts (#5677) * system controller scripts * script additions * sql cript update * sql script number chnage * feat: Config diff phase 2 oss (#5488) * story(configDiffView) : open api spec * story(configDiffView) : open api spec updated * story(configDiffView) : open api spec updated for error state * story(configDiffView) : WIP * story(configDiffView) : WIP "some code changed" * story(configDiffView) : support for names added * story(configDiffView) : iota removed * story(configDiffView) : pg no rows handled * story(configDiffView) : spelling check * story(configDiffView) : code review comment resolved * story(configDiffView) : env id added * story(configDiffView) : intersection added * story(configDiffView) : comments removed * story(configDiffView) : code review comment resolved * story(configDiffView) : comment removed * story(configDiffView) : CMCSNames DTO moved * story(configDiffView) : null case handled * story(configDiffView) : logger added * story(configDiffView) : code refactored * story(configDiffView) : code refactored v2 * story(configDiffView) : spec updated * story(configDiffView) : code refactored * story(configDiffView) : config names * main sync * overridden and global flag introduced in config diff autocomplete api * ent sync * get config data in resthandler * new api for showing all config data in config/data :- Service func -> GetAllConfigData * using a single key instead of global and overridden key in config/autocomplete api * ConfigState made string instead of int * not sending inheriting in case base config * code review comment incorporation * ent sync * code review comment incorp -1 * code review comment incorp -2 * code review comment incorp -3 * small fix in plugin * migration number changes (#5692) * main sync * minor fix * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * scipt number change --------- Co-authored-by: adi6859 Co-authored-by: Vikram Singh * fix: Helm apps entries in Ea mode (#5652) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * executed make after merging with develop branch * feat: refactoring deployment app name usage (#5702) * removing hard coded deployment app name * removing %s-%s usage * wip: query change for enterprise * wip * wip * wip * adding release mode in deployment config * wip: release changes * left join on pco and artifact * handling empty release mode - backward compatibility * fixing panic * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * migration updated * main merge and migration script updated * wip * review changes * fix sql no --------- Co-authored-by: Prakash * migration syn with ent (#5719) * fix: group image vulnerabilities by base/os image (#5680) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * feat: storing target,class and type values in imageScanExecutionResults * feat: add sql script * feat: add sql script * fix: add new columns * fix: update script numbers * fix: correct down script * fix: minor fix * chore: script number update * fix: remove sql script (#5727) * Revert "fix: Helm apps entries in Ea mode (#5652)" (#5733) This reverts commit f1aa1fca0624af32de5e620ceba4548488a07127. * chore: custom argo-workflow dependency (#5731) * bumped github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * reverted main branch changes * reverted main branch changes --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: fix go.sum file (#5734) * misc: Main sync develop (#5737) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * fix: Validate config cm cs (#5750) * validateConfigRequest before CMGlobalAddUpdate and CSGlobalAddUpdate * checkIfConfigDataAlreadyExist --------- Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: adi6859 Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> * doc: Update prerequisites of code-scan (#5625) * Update prerequisites of code-scan * Hyperlinked the Vulnerability scanning doc --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: ci patch rbac for branch update (#5759) * feat: Added basic auth support for servicemonitor (#5761) * Added support for basic auth in servicemonitor * Added support for namespace selector and custom matchLabels * Fixed indentations * fix: Bitnami chart repo tls issue (#5740) * bitnami_chart_fix * Rename 278_bitnami_chart_fix.down.sql to 282_bitnami_chart_fix.down.sql * Rename 278_bitnami_chart_fix.up.sql to 282_bitnami_chart_fix.up.sql --------- Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> * doc: Cosign plugin doc (#5665) * doc for cosign plugin * edits in task name * updates in intro and other fixes. * Attached link to Cosign GitHub repo * Hyperlink fixes --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: check rbac on env if envName is present (#5765) * admin check fix in config draft * minor fix * doc: CraneCopy plugin doc (#5658) * doc for * edits in task name * spelling correction * Updated password --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron CD Trigger Plugin doc (#5747) * devtron-cd-trigger plugin doc * minor update * Proofreading done * Update devtron-cd-trigger.md * Removed unwanted phrase * Changed wording * Changed plurality * Updated devtron token --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: DockerSlim plugin doc (#5660) * doc for DockerSlim plugin * Updated Docker-Slim to DockerSlim * Minor fixes * url update * Fixes in url --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron Job Trigger Plugin doc (#5742) * devtron-job-trigger plugin doc * summary updated * Updated input variable description * token value updated --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: scan tool active check removed (#5771) * scan tool active check removed * query fix * feat: Docker pull env driven (#5767) * useDockerApiToGetDigest menv driven flag to control pulling image either using docker pull or docker API * UseAppDockerConfigForPrivateRegistries in workflow request * revert * revert * fix: panic handlings and argocd app delete stuck in partial stage (#5770) * fix: panic handlings * fix: false positive matrics on gitOps failures * fix: for GetConfigForHelmApps err: pg no row * feat: plugin creation support (#5630) * wip: new plugin creation api and min plugin api with only shared plugin list * wip: create new plugin version code * wip:plugin type SHARED by default * wip:find plugin either by identifier or by id while creating a new version of existing plugin * wip: create new plugin tag logic improved * wip: optimize GetAllFilteredPluginParentMetadata query * wip: create plugin tag new flow * wip: minor fix * wip: minor fix * wip: minor fix * wip: newTagsPresent -> areNewTagsPresent * wip: icon is not mandatory code incorporated * wip:minor refactoring * wip: prevent duplicate version from being created and save tags relation only when * wip: minor fix * wip: details api, get all plugin data or non * wip: code review incorp part -1 * wip: code review incorp part -2 * wip: code review incorp part -3 * wip: remove code duplication * wip: hardcode isExposed to true * wip: hardcode StepType= inline * wip: set default VariableStepIndex= 1 * Revert "feat: plugin creation support (#5630)" (#5778) This reverts commit 4296366ae288f3a67f87e547d2b946acbcd2dd65. --------- Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Co-authored-by: Badal Kumar <130441461+badal773@users.noreply.github.com> Co-authored-by: Badal Kumar Prusty Co-authored-by: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Co-authored-by: kartik-579 <84493919+kartik-579@users.noreply.github.com> Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: adi6859 Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Co-authored-by: Bhushan Nemade Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Co-authored-by: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> * fix: Sql injection fix develop (#5785) * sql injection fixes * query param init fix * feat: add support for tag name for external CI (#5689) * remove validation that url exists in external artifact * handle docker tag name regex validation * sync with enterprise * sync with enterprise * added validation for external ci tag name * fix log * mark deployment aborted if validation fails * digest validation added * fix typo * common lib update --------- Co-authored-by: prakhar katiyar * argo-assets moved out (#5788) * query param split (#5801) (#5803) * feat: acr polling plugin support (#5724) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * chore: polling plugin refactored * chore: refactored method name * update polling plugin response struct * updated migration number * updated polling plugin image * updated polling plugin migration script * fix: same digest for different image issue * fix: plugin migration handling * updated polling plugin migration script * updated migration number * fix: empty artifact issue * chore: polling plugin migration prod-image updated * updated common-lib version --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> * fix: build failed due to argo assets (#5805) * handled the release not found case for app detail of external app (#5791) Co-authored-by: Rajeev Ranjan * chore: Main develop sync bd5118 (#5817) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * doc: Edit Deployment Chart Schema (#5735) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * doc: Redirection of old entry in gitbook.yaml (#5738) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * Redirected Old Entry * docs: added Documentation for Air-Gapped Installation (#5360) * added docs for air-gapped-installation * added all the images in 7.0.0 * modified yq command in the docs * added an entry in summary.md * added installation commands * modified statements * modified variable name * added steps to navigation * added the latest oss chart images * added a note for docker * Added Intro + Proofreading + Structuring * Other fixes * Lang fix * added docs for ea-mode only * modified lang * Update install-devtron-in-airgapped-environment.md Changed h3 header to fit the ToC on the RHS * added changes * modified changes --------- Co-authored-by: Badal Kumar Prusty Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * feat: Env description handling (#5744) * env description handling added * license handling * misc: Main sync rc - branch update (#5753) * added config sql script (#5681) * feat: CVE severity categorisation and scan result listing API enhancements (#5617) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * fix: update script numbers * fix: minor fix * feat: casbin deny policy sql scripts (#5677) * system controller scripts * script additions * sql cript update * sql script number chnage * feat: Config diff phase 2 oss (#5488) * story(configDiffView) : open api spec * story(configDiffView) : open api spec updated * story(configDiffView) : open api spec updated for error state * story(configDiffView) : WIP * story(configDiffView) : WIP "some code changed" * story(configDiffView) : support for names added * story(configDiffView) : iota removed * story(configDiffView) : pg no rows handled * story(configDiffView) : spelling check * story(configDiffView) : code review comment resolved * story(configDiffView) : env id added * story(configDiffView) : intersection added * story(configDiffView) : comments removed * story(configDiffView) : code review comment resolved * story(configDiffView) : comment removed * story(configDiffView) : CMCSNames DTO moved * story(configDiffView) : null case handled * story(configDiffView) : logger added * story(configDiffView) : code refactored * story(configDiffView) : code refactored v2 * story(configDiffView) : spec updated * story(configDiffView) : code refactored * story(configDiffView) : config names * main sync * overridden and global flag introduced in config diff autocomplete api * ent sync * get config data in resthandler * new api for showing all config data in config/data :- Service func -> GetAllConfigData * using a single key instead of global and overridden key in config/autocomplete api * ConfigState made string instead of int * not sending inheriting in case base config * code review comment incorporation * ent sync * code review comment incorp -1 * code review comment incorp -2 * code review comment incorp -3 * small fix in plugin * migration number changes (#5692) * main sync * minor fix * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * scipt number change --------- Co-authored-by: adi6859 Co-authored-by: Vikram Singh * fix: Helm apps entries in Ea mode (#5652) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * executed make after merging with develop branch * feat: refactoring deployment app name usage (#5702) * removing hard coded deployment app name * removing %s-%s usage * wip: query change for enterprise * wip * wip * wip * adding release mode in deployment config * wip: release changes * left join on pco and artifact * handling empty release mode - backward compatibility * fixing panic * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * migration updated * main merge and migration script updated * wip * review changes * fix sql no --------- Co-authored-by: Prakash * migration syn with ent (#5719) * fix: group image vulnerabilities by base/os image (#5680) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * feat: storing target,class and type values in imageScanExecutionResults * feat: add sql script * feat: add sql script * fix: add new columns * fix: update script numbers * fix: correct down script * fix: minor fix * chore: script number update * fix: remove sql script (#5727) * Revert "fix: Helm apps entries in Ea mode (#5652)" (#5733) This reverts commit f1aa1fca0624af32de5e620ceba4548488a07127. * chore: custom argo-workflow dependency (#5731) * bumped github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * reverted main branch changes * reverted main branch changes --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: fix go.sum file (#5734) * misc: Main sync develop (#5737) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * fix: Validate config cm cs (#5750) * validateConfigRequest before CMGlobalAddUpdate and CSGlobalAddUpdate * checkIfConfigDataAlreadyExist --------- Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: adi6859 Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> * doc: Update prerequisites of code-scan (#5625) * Update prerequisites of code-scan * Hyperlinked the Vulnerability scanning doc --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: ci patch rbac for branch update (#5759) * feat: Added basic auth support for servicemonitor (#5761) * Added support for basic auth in servicemonitor * Added support for namespace selector and custom matchLabels * Fixed indentations * fix: Bitnami chart repo tls issue (#5740) * bitnami_chart_fix * Rename 278_bitnami_chart_fix.down.sql to 282_bitnami_chart_fix.down.sql * Rename 278_bitnami_chart_fix.up.sql to 282_bitnami_chart_fix.up.sql --------- Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> * doc: Cosign plugin doc (#5665) * doc for cosign plugin * edits in task name * updates in intro and other fixes. * Attached link to Cosign GitHub repo * Hyperlink fixes --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: check rbac on env if envName is present (#5765) * admin check fix in config draft * minor fix * doc: CraneCopy plugin doc (#5658) * doc for * edits in task name * spelling correction * Updated password --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron CD Trigger Plugin doc (#5747) * devtron-cd-trigger plugin doc * minor update * Proofreading done * Update devtron-cd-trigger.md * Removed unwanted phrase * Changed wording * Changed plurality * Updated devtron token --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: DockerSlim plugin doc (#5660) * doc for DockerSlim plugin * Updated Docker-Slim to DockerSlim * Minor fixes * url update * Fixes in url --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron Job Trigger Plugin doc (#5742) * devtron-job-trigger plugin doc * summary updated * Updated input variable description * token value updated --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: scan tool active check removed (#5771) * scan tool active check removed * query fix * feat: Docker pull env driven (#5767) * useDockerApiToGetDigest menv driven flag to control pulling image either using docker pull or docker API * UseAppDockerConfigForPrivateRegistries in workflow request * revert * revert * fix: panic handlings and argocd app delete stuck in partial stage (#5770) * fix: panic handlings * fix: false positive matrics on gitOps failures * fix: for GetConfigForHelmApps err: pg no row * feat: plugin creation support (#5630) * wip: new plugin creation api and min plugin api with only shared plugin list * wip: create new plugin version code * wip:plugin type SHARED by default * wip:find plugin either by identifier or by id while creating a new version of existing plugin * wip: create new plugin tag logic improved * wip: optimize GetAllFilteredPluginParentMetadata query * wip: create plugin tag new flow * wip: minor fix * wip: minor fix * wip: minor fix * wip: newTagsPresent -> areNewTagsPresent * wip: icon is not mandatory code incorporated * wip:minor refactoring * wip: prevent duplicate version from being created and save tags relation only when * wip: minor fix * wip: details api, get all plugin data or non * wip: code review incorp part -1 * wip: code review incorp part -2 * wip: code review incorp part -3 * wip: remove code duplication * wip: hardcode isExposed to true * wip: hardcode StepType= inline * wip: set default VariableStepIndex= 1 * Revert "feat: plugin creation support (#5630)" (#5778) This reverts commit 4296366ae288f3a67f87e547d2b946acbcd2dd65. * fix: unimplemented cluster cron service (#5781) * fix: sql injection fixes (#5783) * sql injection fixes * query param init fix * doc: Vulnerability Scanning Plugin doc (#5722) * vulnerability scanning plugin doc * summary.md added * docs: Jira plugins doc (Validator + Updater) (#5709) * Docs of Jira Plugins - Validator and Updater * Added Index Links * Additional Proofreading * add basic auth and tls for sm (#5789) * docs: added commands enable ingress during helm installation (#5794) * added commands emable ingress during helm installation * modified commands * improved statement * removed unrequired lines --------- Co-authored-by: Badal Kumar Prusty * Revamped + Restructured Ingress Setup Doc (#5798) * modifying route (#5799) Co-authored-by: Badal Kumar Prusty * fix: cron status update refactoring (#5790) Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> * docs: modified the anchorlink in ingress.md (#5800) * modifying route * modified the anchorlink --------- Co-authored-by: Badal Kumar Prusty * query param split (#5801) * fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) * upgraded to /argo-cd/v2 v2.9.21 * argocd vendor files added * sync with common-lib * fix: Ea rbac fixes (#5813) * app found using display or app name * single query optimization * display name handling for new apps --------- Co-authored-by: kartik-579 --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Co-authored-by: Badal Kumar <130441461+badal773@users.noreply.github.com> Co-authored-by: Badal Kumar Prusty Co-authored-by: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: adi6859 Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Co-authored-by: Bhushan Nemade Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Co-authored-by: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> * feat: Argo listing change (#5768) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * doc: Edit Deployment Chart Schema (#5735) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * doc: Redirection of old entry in gitbook.yaml (#5738) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * Redirected Old Entry * docs: added Documentation for Air-Gapped Installation (#5360) * added docs for air-gapped-installation * added all the images in 7.0.0 * modified yq command in the docs * added an entry in summary.md * added installation commands * modified statements * modified variable name * added steps to navigation * added the latest oss chart images * added a note for docker * Added Intro + Proofreading + Structuring * Other fixes * Lang fix * added docs for ea-mode only * modified lang * Update install-devtron-in-airgapped-environment.md Changed h3 header to fit the ToC on the RHS * added changes * modified changes --------- Co-authored-by: Badal Kumar Prusty Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * feat: Env description handling (#5744) * env description handling added * license handling * misc: Main sync rc - branch update (#5753) * added config sql script (#5681) * feat: CVE severity categorisation and scan result listing API enhancements (#5617) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * fix: update script numbers * fix: minor fix * feat: casbin deny policy sql scripts (#5677) * system controller scripts * script additions * sql cript update * sql script number chnage * feat: Config diff phase 2 oss (#5488) * story(configDiffView) : open api spec * story(configDiffView) : open api spec updated * story(configDiffView) : open api spec updated for error state * story(configDiffView) : WIP * story(configDiffView) : WIP "some code changed" * story(configDiffView) : support for names added * story(configDiffView) : iota removed * story(configDiffView) : pg no rows handled * story(configDiffView) : spelling check * story(configDiffView) : code review comment resolved * story(configDiffView) : env id added * story(configDiffView) : intersection added * story(configDiffView) : comments removed * story(configDiffView) : code review comment resolved * story(configDiffView) : comment removed * story(configDiffView) : CMCSNames DTO moved * story(configDiffView) : null case handled * story(configDiffView) : logger added * story(configDiffView) : code refactored * story(configDiffView) : code refactored v2 * story(configDiffView) : spec updated * story(configDiffView) : code refactored * story(configDiffView) : config names * main sync * overridden and global flag introduced in config diff autocomplete api * ent sync * get config data in resthandler * new api for showing all config data in config/data :- Service func -> GetAllConfigData * using a single key instead of global and overridden key in config/autocomplete api * ConfigState made string instead of int * not sending inheriting in case base config * code review comment incorporation * ent sync * code review comment incorp -1 * code review comment incorp -2 * code review comment incorp -3 * small fix in plugin * migration number changes (#5692) * main sync * minor fix * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * scipt number change --------- Co-authored-by: adi6859 Co-authored-by: Vikram Singh * fix: Helm apps entries in Ea mode (#5652) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * executed make after merging with develop branch * feat: refactoring deployment app name usage (#5702) * removing hard coded deployment app name * removing %s-%s usage * wip: query change for enterprise * wip * wip * wip * adding release mode in deployment config * wip: release changes * left join on pco and artifact * handling empty release mode - backward compatibility * fixing panic * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * migration updated * main merge and migration script updated * wip * review changes * fix sql no --------- Co-authored-by: Prakash * migration syn with ent (#5719) * fix: group image vulnerabilities by base/os image (#5680) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * feat: storing target,class and type values in imageScanExecutionResults * feat: add sql script * feat: add sql script * fix: add new columns * fix: update script numbers * fix: correct down script * fix: minor fix * chore: script number update * fix: remove sql script (#5727) * Revert "fix: Helm apps entries in Ea mode (#5652)" (#5733) This reverts commit f1aa1fca0624af32de5e620ceba4548488a07127. * chore: custom argo-workflow dependency (#5731) * bumped github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * reverted main branch changes * reverted main branch changes --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: fix go.sum file (#5734) * misc: Main sync develop (#5737) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * fix: Validate config cm cs (#5750) * validateConfigRequest before CMGlobalAddUpdate and CSGlobalAddUpdate * checkIfConfigDataAlreadyExist --------- Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: adi6859 Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> * doc: Update prerequisites of code-scan (#5625) * Update prerequisites of code-scan * Hyperlinked the Vulnerability scanning doc --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: ci patch rbac for branch update (#5759) * feat: Added basic auth support for servicemonitor (#5761) * Added support for basic auth in servicemonitor * Added support for namespace selector and custom matchLabels * Fixed indentations * fix: Bitnami chart repo tls issue (#5740) * bitnami_chart_fix * Rename 278_bitnami_chart_fix.down.sql to 282_bitnami_chart_fix.down.sql * Rename 278_bitnami_chart_fix.up.sql to 282_bitnami_chart_fix.up.sql --------- Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> * doc: Cosign plugin doc (#5665) * doc for cosign plugin * edits in task name * updates in intro and other fixes. * Attached link to Cosign GitHub repo * Hyperlink fixes --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: check rbac on env if envName is present (#5765) * admin check fix in config draft * minor fix * doc: CraneCopy plugin doc (#5658) * doc for * edits in task name * spelling correction * Updated password --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron CD Trigger Plugin doc (#5747) * devtron-cd-trigger plugin doc * minor update * Proofreading done * Update devtron-cd-trigger.md * Removed unwanted phrase * Changed wording * Changed plurality * Updated devtron token --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * doc: DockerSlim plugin doc (#5660) * doc for DockerSlim plugin * Updated Docker-Slim to DockerSlim * Minor fixes * url update * Fixes in url --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * doc: Devtron Job Trigger Plugin doc (#5742) * devtron-job-trigger plugin doc * summary updated * Updated input variable description * token value updated --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: scan tool active check removed (#5771) * scan tool active check removed * query fix * feat: Docker pull env driven (#5767) * useDockerApiToGetDigest menv driven flag to control pulling image either using docker pull or docker API * UseAppDockerConfigForPrivateRegistries in workflow request * revert * revert * fix: panic handlings and argocd app delete stuck in partial stage (#5770) * fix: panic handlings * fix: false positive matrics on gitOps failures * fix: for GetConfigForHelmApps err: pg no row * feat: plugin creation support (#5630) * wip: new plugin creation api and min plugin api with only shared plugin list * wip: create new plugin version code * wip:plugin type SHARED by default * wip:find plugin either by identifier or by id while creating a new version of existing plugin * wip: create new plugin tag logic improved * wip: optimize GetAllFilteredPluginParentMetadata query * wip: create plugin tag new flow * wip: minor fix * wip: minor fix * wip: minor fix * wip: newTagsPresent -> areNewTagsPresent * wip: icon is not mandatory code incorporated * wip:minor refactoring * wip: prevent duplicate version from being created and save tags relation only when * wip: minor fix * wip: details api, get all plugin data or non * wip: code review incorp part -1 * wip: code review incorp part -2 * wip: code review incorp part -3 * wip: remove code duplication * wip: hardcode isExposed to true * wip: hardcode StepType= inline * wip: set default VariableStepIndex= 1 * wip * Revert "feat: plugin creation support (#5630)" (#5778) This reverts commit 4296366ae288f3a67f87e547d2b946acbcd2dd65. * updated common lib to 761a56e2b * wip * fix: unimplemented cluster cron service (#5781) * fix: sql injection fixes (#5783) * sql injection fixes * query param init fix * doc: Vulnerability Scanning Plugin doc (#5722) * vulnerability scanning plugin doc * summary.md added * docs: Jira plugins doc (Validator + Updater) (#5709) * Docs of Jira Plugins - Validator and Updater * Added Index Links * Additional Proofreading * add basic auth and tls for sm (#5789) * reverted common lib change * updated handling for getting restConfig through custom transport commonm method * docs: added commands enable ingress during helm installation (#5794) * added commands emable ingress during helm installation * modified commands * improved statement * removed unrequired lines --------- Co-authored-by: Badal Kumar Prusty * Revamped + Restructured Ingress Setup Doc (#5798) * modifying route (#5799) Co-authored-by: Badal Kumar Prusty * fix: cron status update refactoring (#5790) Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> * docs: modified the anchorlink in ingress.md (#5800) * modifying route * modified the anchorlink --------- Co-authored-by: Badal Kumar Prusty * query param split (#5801) * fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) * upgraded to /argo-cd/v2 v2.9.21 * argocd vendor files added * sync with common-lib * fix: Ea rbac fixes (#5813) * app found using display or app name * single query optimization * display name handling for new apps --------- Co-authored-by: kartik-579 --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Co-authored-by: Badal Kumar <130441461+badal773@users.noreply.github.com> Co-authored-by: Badal Kumar Prusty Co-authored-by: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: adi6859 Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Co-authored-by: Bhushan Nemade Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Co-authored-by: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> * feat: Feature release v7 sql scripts (#5757) * sql scripts * release channel schema * script number chnage * is exposed * sql script chnage * script number chnage * fix: sync with common-lib changes (#5820) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * doc: Edit Deployment Chart Schema (#5735) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * doc: Redirection of old entry in gitbook.yaml (#5738) * Edit Deployment Chart Schema * Fixes * PM + CO Feedback Incorporated * Redirected Old Entry * docs: added Documentation for Air-Gapped Installation (#5360) * added docs for air-gapped-installation * added all the images in 7.0.0 * modified yq command in the docs * added an entry in summary.md * added installation commands * modified statements * modified variable name * added steps to navigation * added the latest oss chart images * added a note for docker * Added Intro + Proofreading + Structuring * Other fixes * Lang fix * added docs for ea-mode only * modified lang * Update install-devtron-in-airgapped-environment.md Changed h3 header to fit the ToC on the RHS * added changes * modified changes --------- Co-authored-by: Badal Kumar Prusty Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * feat: Env description handling (#5744) * env description handling added * license handling * misc: Main sync rc - branch update (#5753) * added config sql script (#5681) * feat: CVE severity categorisation and scan result listing API enhancements (#5617) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * fix: update script numbers * fix: minor fix * feat: casbin deny policy sql scripts (#5677) * system controller scripts * script additions * sql cript update * sql script number chnage * feat: Config diff phase 2 oss (#5488) * story(configDiffView) : open api spec * story(configDiffView) : open api spec updated * story(configDiffView) : open api spec updated for error state * story(configDiffView) : WIP * story(configDiffView) : WIP "some code changed" * story(configDiffView) : support for names added * story(configDiffView) : iota removed * story(configDiffView) : pg no rows handled * story(configDiffView) : spelling check * story(configDiffView) : code review comment resolved * story(configDiffView) : env id added * story(configDiffView) : intersection added * story(configDiffView) : comments removed * story(configDiffView) : code review comment resolved * story(configDiffView) : comment removed * story(configDiffView) : CMCSNames DTO moved * story(configDiffView) : null case handled * story(configDiffView) : logger added * story(configDiffView) : code refactored * story(configDiffView) : code refactored v2 * story(configDiffView) : spec updated * story(configDiffView) : code refactored * story(configDiffView) : config names * main sync * overridden and global flag introduced in config diff autocomplete api * ent sync * get config data in resthandler * new api for showing all config data in config/data :- Service func -> GetAllConfigData * using a single key instead of global and overridden key in config/autocomplete api * ConfigState made string instead of int * not sending inheriting in case base config * code review comment incorporation * ent sync * code review comment incorp -1 * code review comment incorp -2 * code review comment incorp -3 * small fix in plugin * migration number changes (#5692) * main sync * minor fix * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * scipt number change --------- Co-authored-by: adi6859 Co-authored-by: Vikram Singh * fix: Helm apps entries in Ea mode (#5652) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * executed make after merging with develop branch * feat: refactoring deployment app name usage (#5702) * removing hard coded deployment app name * removing %s-%s usage * wip: query change for enterprise * wip * wip * wip * adding release mode in deployment config * wip: release changes * left join on pco and artifact * handling empty release mode - backward compatibility * fixing panic * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * migration updated * main merge and migration script updated * wip * review changes * fix sql no --------- Co-authored-by: Prakash * migration syn with ent (#5719) * fix: group image vulnerabilities by base/os image (#5680) * feat: add support for app and env sorting in scan list api and add medium, high and unknown severity support * fix: query fix for appName sort or envName sort * fix: sql script number change * fix: minor changes * fix: review fix * fix: remove dml on cve_store and handle it in code handling this versioning * fix: review comments * feat: storing target,class and type values in imageScanExecutionResults * feat: add sql script * feat: add sql script * fix: add new columns * fix: update script numbers * fix: correct down script * fix: minor fix * chore: script number update * fix: remove sql script (#5727) * Revert "fix: Helm apps entries in Ea mode (#5652)" (#5733) This reverts commit f1aa1fca0624af32de5e620ceba4548488a07127. * chore: custom argo-workflow dependency (#5731) * bumped github.com/argoproj/argo-workflows/v3 v3.5.10 => github.com/devtron-labs/argo-workflows/v3 v3.5.10 * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) * reverted main branch changes * reverted main branch changes --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * chore: fix go.sum file (#5734) * misc: Main sync develop (#5737) * migration number changes (#5692) * refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) * fix: Decode secret fix on add update oss (#5695) * ValidateEncodedDataByDecoding in case add or update secret * wire fix from main * minor refactor * comment * saving pco concurrency case handled (#5688) * fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) * script for pipelineStageStepVariable, making input value and default_value text from varchar255 * erro log fix * fix: ea fixes for helm app (#5708) * added the ea apps entry app table * resolved the ea mode multiple rows error during configuration of app * modified the ea dockerfile in ca-certificates cmd * uncommented the code and left the ea helm app making way untouched * remodified the dockerfile as previous state * modified the docker file ea mode * dockerfile exit code 100 due to ap install alternative in ea mode dockerfile * execute make after main merge * modified changes in dockerfile ea mode * resolved comments after first level review * Revert "fix: ea fixes for helm app (#5708)" (#5713) This reverts commit 3e31f49f95d373f92b13afbe1806606ac4a39d85. * fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) * SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation * minor refactor * minor refactor * migration syn with ent (#5718) --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> * fix: Validate config cm cs (#5750) * validateConfigRequest before CMGlobalAddUpdate and CSGlobalAddUpdate * checkIfConfigDataAlreadyExist --------- Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: adi6859 Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> * doc: Update prerequisites of code-scan (#5625) * Update prerequisites of code-scan * Hyperlinked the Vulnerability scanning doc --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: ci patch rbac for branch update (#5759) * feat: Added basic auth support for servicemonitor (#5761) * Added support for basic auth in servicemonitor * Added support for namespace selector and custom matchLabels * Fixed indentations * fix: Bitnami chart repo tls issue (#5740) * bitnami_chart_fix * Rename 278_bitnami_chart_fix.down.sql to 282_bitnami_chart_fix.down.sql * Rename 278_bitnami_chart_fix.up.sql to 282_bitnami_chart_fix.up.sql --------- Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> * doc: Cosign plugin doc (#5665) * doc for cosign plugin * edits in task name * updates in intro and other fixes. * Attached link to Cosign GitHub repo * Hyperlink fixes --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: check rbac on env if envName is present (#5765) * admin check fix in config draft * minor fix * doc: CraneCopy plugin doc (#5658) * doc for * edits in task name * spelling correction * Updated password --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron CD Trigger Plugin doc (#5747) * devtron-cd-trigger plugin doc * minor update * Proofreading done * Update devtron-cd-trigger.md * Removed unwanted phrase * Changed wording * Changed plurality * Updated devtron token --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: DockerSlim plugin doc (#5660) * doc for DockerSlim plugin * Updated Docker-Slim to DockerSlim * Minor fixes * url update * Fixes in url --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * doc: Devtron Job Trigger Plugin doc (#5742) * devtron-job-trigger plugin doc * summary updated * Updated input variable description * token value updated --------- Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> * fix: scan tool active check removed (#5771) * scan tool active check removed * query fix * feat: Docker pull env driven (#5767) * useDockerApiToGetDigest menv driven flag to control pulling image either using docker pull or docker API * UseAppDockerConfigForPrivateRegistries in workflow request * revert * revert * fix: panic handlings and argocd app delete stuck in partial stage (#5770) * fix: panic handlings * fix: false positive matrics on gitOps failures * fix: for GetConfigForHelmApps err: pg no row * feat: plugin creation support (#5630) * wip: new plugin creation api and min plugin api with only shared plugin list * wip: create new plugin version code * wip:plugin type SHARED by default * wip:find plugin either by identifier or by id while creating a new version of existing plugin * wip: create new plugin tag logic improved * wip: optimize GetAllFilteredPluginParentMetadata query * wip: create plugin tag new flow * wip: minor fix * wip: minor fix * wip: minor fix * wip: newTagsPresent -> areNewTagsPresent * wip: icon is not mandatory code incorporated * wip:minor refactoring * wip: prevent duplicate version from being created and save tags relation only when * wip: minor fix * wip: details api, get all plugin data or non * wip: code review incorp part -1 * wip: code review incorp part -2 * wip: code review incorp part -3 * wip: remove code duplication * wip: hardcode isExposed to true * wip: hardcode StepType= inline * wip: set default VariableStepIndex= 1 * Revert "feat: plugin creation support (#5630)" (#5778) This reverts commit 4296366ae288f3a67f87e547d2b946acbcd2dd65. * fix: unimplemented cluster cron service (#5781) * fix: sql injection fixes (#5783) * sql injection fixes * query param init fix * doc: Vulnerability Scanning Plugin doc (#5722) * vulnerability scanning plugin doc * summary.md added * docs: Jira plugins doc (Validator + Updater) (#5709) * Docs of Jira Plugins - Validator and Updater * Added Index Links * Additional Proofreading * add basic auth and tls for sm (#5789) * docs: added commands enable ingress during helm installation (#5794) * added commands emable ingress during helm installation * modified commands * improved statement * removed unrequired lines --------- Co-authored-by: Badal Kumar Prusty * Revamped + Restructured Ingress Setup Doc (#5798) * modifying route (#5799) Co-authored-by: Badal Kumar Prusty * fix: cron status update refactoring (#5790) Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> * docs: modified the anchorlink in ingress.md (#5800) * modifying route * modified the anchorlink --------- Co-authored-by: Badal Kumar Prusty * query param split (#5801) * fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) * upgraded to /argo-cd/v2 v2.9.21 * argocd vendor files added * sync with common-lib * fix: Ea rbac fixes (#5813) * app found using display or app name * single query optimization * display name handling for new apps --------- Co-authored-by: kartik-579 * fix: scan list in global security page sql injection fix (#5808) * scan list in global security page sql injection fix * comment * fix: sync with common-lib changes This PR was created by multi-gitter to sync common-lib changes across multiple repositories. * sync with common-lib --------- Co-authored-by: Prakash Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Co-authored-by: Badal Kumar <130441461+badal773@users.noreply.github.com> Co-authored-by: Badal Kumar Prusty Co-authored-by: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Co-authored-by: kartik-579 <84493919+kartik-579@users.noreply.github.com> Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: adi6859 Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Co-authored-by: Bhushan Nemade Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Co-authored-by: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> Co-authored-by: kartik-579 Co-authored-by: prakhar katiyar * fix: sync with common-lib changes This PR was created by multi-gitter to sync common-lib changes across multiple repositories. --------- Co-authored-by: ayu-devtron <167413063+ayu-devtron@users.noreply.github.com> Co-authored-by: Vikram Singh Co-authored-by: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Co-authored-by: Shivam Nagar <124123645+Shivam-nagar23@users.noreply.github.com> Co-authored-by: Prakash Co-authored-by: adi6859 Co-authored-by: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> Co-authored-by: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Co-authored-by: Asutosh Das Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: kripanshdevtron <107392309+kripanshdevtron@users.noreply.github.com> Co-authored-by: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Co-authored-by: Badal Kumar <130441461+badal773@users.noreply.github.com> Co-authored-by: Badal Kumar Prusty Co-authored-by: kartik-579 <84493919+kartik-579@users.noreply.github.com> Co-authored-by: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Co-authored-by: Bhushan Nemade Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Co-authored-by: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> Co-authored-by: prakhar katiyar Co-authored-by: Nishant <58689354+nishant-d@users.noreply.github.com> Co-authored-by: kartik-579 --- Dockerfile | 3 +- DockerfileEA | 3 +- .../ArgoApplicationRestHandler.go | 9 +- api/argoApplication/wire_argoApplication.go | 4 + api/helm-app/HelmAppRestHandler.go | 11 +- .../application/k8sApplicationRestHandler.go | 73 +- api/restHandler/GlobalPluginRestHandler.go | 67 ++ api/router/GlobalPluginRouter.go | 5 +- .../assets => argocd-assets}/badge.svg | 0 .../builtin-policy.csv | 0 .../assets => argocd-assets}/model.conf | 0 .../assets => argocd-assets}/swagger.json | 0 cmd/external-app/wire_gen.go | 22 +- go.mod | 5 +- go.sum | 4 +- .../sql/repository/CiArtifactRepository.go | 25 + internal/util/ErrorUtil.go | 9 + pkg/argoApplication/ArgoApplicationService.go | 406 ++------ pkg/argoApplication/{ => helper}/helper.go | 2 +- .../read/ArgoApplicationReadService.go | 402 ++++++++ pkg/eventProcessor/bean/workflowEventBean.go | 31 +- .../in/WorkflowEventProcessorService.go | 44 +- pkg/k8s/K8sCommonService.go | 12 +- pkg/k8s/application/k8sApplicationService.go | 73 +- .../AppDeploymentTypeChangeManager.go | 2 +- pkg/pipeline/CiService.go | 77 +- pkg/pipeline/PipelineBuilder.go | 2 +- pkg/pipeline/WebhookService.go | 16 +- pkg/plugin/GlobalPluginService.go | 412 ++++++-- pkg/plugin/adaptor/adaptor.go | 80 ++ pkg/plugin/bean/bean.go | 84 +- .../repository/GlobalPluginRepository.go | 178 +++- pkg/plugin/utils/utils.go | 25 + pkg/terminal/terminalSesion.go | 32 +- pkg/workflow/dag/WorkflowDagExecutor.go | 6 +- scripts/sql/283_user_group.down.sql | 8 + scripts/sql/283_user_group.up.sql | 42 + scripts/sql/284_polling_plugin_v2.down.sql | 5 + scripts/sql/284_polling_plugin_v2.up.sql | 30 + scripts/sql/285_release_channel.down.sql | 3 + scripts/sql/285_release_channel.up.sql | 206 ++++ util/helper.go | 16 - .../aws/aws-sdk-go-v2/service/ecr/LICENSE.txt | 202 ---- .../aws-sdk-go-v2/service/ecr/types/enums.go | 326 ------- .../aws-sdk-go-v2/service/ecr/types/errors.go | 905 ------------------ .../aws-sdk-go-v2/service/ecr/types/types.go | 882 ----------------- vendor/github.com/aws/smithy-go/.gitignore | 26 - vendor/github.com/aws/smithy-go/.travis.yml | 28 - vendor/github.com/aws/smithy-go/CHANGELOG.md | 182 ---- .../aws/smithy-go/CODE_OF_CONDUCT.md | 4 - .../github.com/aws/smithy-go/CONTRIBUTING.md | 59 -- vendor/github.com/aws/smithy-go/LICENSE | 175 ---- vendor/github.com/aws/smithy-go/Makefile | 97 -- vendor/github.com/aws/smithy-go/NOTICE | 1 - vendor/github.com/aws/smithy-go/README.md | 12 - vendor/github.com/aws/smithy-go/doc.go | 2 - vendor/github.com/aws/smithy-go/document.go | 10 - .../github.com/aws/smithy-go/document/doc.go | 12 - .../aws/smithy-go/document/document.go | 153 --- .../aws/smithy-go/document/errors.go | 75 -- vendor/github.com/aws/smithy-go/errors.go | 137 --- .../aws/smithy-go/go_module_metadata.go | 6 - .../aws/smithy-go/local-mod-replace.sh | 39 - vendor/github.com/aws/smithy-go/modman.toml | 11 - vendor/github.com/aws/smithy-go/properties.go | 52 - vendor/github.com/aws/smithy-go/validation.go | 140 --- .../common-lib/blob-storage/GCPBlob.go | 5 +- .../common-lib/git-manager/GitCliManager.go | 7 +- .../common-lib/git-manager/GitManager.go | 23 +- .../common-lib/utils/CommonUtils.go | 35 + .../common-lib/utils/bean/bean.go | 8 + .../common-lib/utils/registry/bean.go | 18 + .../utils/registry/pluginArtifact.go | 101 ++ vendor/modules.txt | 10 +- wire_gen.go | 22 +- 75 files changed, 1986 insertions(+), 4213 deletions(-) rename {vendor/github.com/argoproj/argo-cd/assets => argocd-assets}/badge.svg (100%) rename {vendor/github.com/argoproj/argo-cd/assets => argocd-assets}/builtin-policy.csv (100%) rename {vendor/github.com/argoproj/argo-cd/assets => argocd-assets}/model.conf (100%) rename {vendor/github.com/argoproj/argo-cd/assets => argocd-assets}/swagger.json (100%) rename pkg/argoApplication/{ => helper}/helper.go (98%) create mode 100644 pkg/argoApplication/read/ArgoApplicationReadService.go create mode 100644 pkg/plugin/adaptor/adaptor.go create mode 100644 scripts/sql/283_user_group.down.sql create mode 100644 scripts/sql/283_user_group.up.sql create mode 100644 scripts/sql/284_polling_plugin_v2.down.sql create mode 100644 scripts/sql/284_polling_plugin_v2.up.sql create mode 100644 scripts/sql/285_release_channel.down.sql create mode 100644 scripts/sql/285_release_channel.up.sql delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go delete mode 100644 vendor/github.com/aws/smithy-go/.gitignore delete mode 100644 vendor/github.com/aws/smithy-go/.travis.yml delete mode 100644 vendor/github.com/aws/smithy-go/CHANGELOG.md delete mode 100644 vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/aws/smithy-go/CONTRIBUTING.md delete mode 100644 vendor/github.com/aws/smithy-go/LICENSE delete mode 100644 vendor/github.com/aws/smithy-go/Makefile delete mode 100644 vendor/github.com/aws/smithy-go/NOTICE delete mode 100644 vendor/github.com/aws/smithy-go/README.md delete mode 100644 vendor/github.com/aws/smithy-go/doc.go delete mode 100644 vendor/github.com/aws/smithy-go/document.go delete mode 100644 vendor/github.com/aws/smithy-go/document/doc.go delete mode 100644 vendor/github.com/aws/smithy-go/document/document.go delete mode 100644 vendor/github.com/aws/smithy-go/document/errors.go delete mode 100644 vendor/github.com/aws/smithy-go/errors.go delete mode 100644 vendor/github.com/aws/smithy-go/go_module_metadata.go delete mode 100644 vendor/github.com/aws/smithy-go/local-mod-replace.sh delete mode 100644 vendor/github.com/aws/smithy-go/modman.toml delete mode 100644 vendor/github.com/aws/smithy-go/properties.go delete mode 100644 vendor/github.com/aws/smithy-go/validation.go create mode 100644 vendor/github.com/devtron-labs/common-lib/utils/registry/bean.go create mode 100644 vendor/github.com/devtron-labs/common-lib/utils/registry/pluginArtifact.go diff --git a/Dockerfile b/Dockerfile index 37df5c28c61..fc5dbe1dcef 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,8 @@ RUN apt clean autoclean RUN apt autoremove -y && rm -rf /var/lib/apt/lists/* COPY --from=build-env /go/src/github.com/devtron-labs/devtron/devtron . COPY --from=build-env /go/src/github.com/devtron-labs/devtron/auth_model.conf . -COPY --from=build-env /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets/ /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets +#COPY --from=build-env /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets/ /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets +COPY --from=build-env /go/src/github.com/devtron-labs/devtron/argocd-assets/ /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets COPY --from=build-env /go/src/github.com/devtron-labs/devtron/scripts/devtron-reference-helm-charts scripts/devtron-reference-helm-charts COPY --from=build-env /go/src/github.com/devtron-labs/devtron/scripts/sql scripts/sql COPY --from=build-env /go/src/github.com/devtron-labs/devtron/scripts/casbin scripts/casbin diff --git a/DockerfileEA b/DockerfileEA index b142d25dad1..72b8e31849c 100644 --- a/DockerfileEA +++ b/DockerfileEA @@ -17,7 +17,8 @@ RUN apt autoremove -y && rm -rf /var/lib/apt/lists/* COPY --from=build-env /go/src/github.com/devtron-labs/devtron/auth_model.conf . COPY --from=build-env /go/src/github.com/devtron-labs/devtron/cmd/external-app/devtron-ea . -COPY --from=build-env /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets/ /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets +#COPY --from=build-env /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets/ /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets +COPY --from=build-env /go/src/github.com/devtron-labs/devtron/argocd-assets/ /go/src/github.com/devtron-labs/devtron/vendor/github.com/argoproj/argo-cd/assets COPY --from=build-env /go/src/github.com/devtron-labs/devtron/scripts/devtron-reference-helm-charts scripts/devtron-reference-helm-charts COPY --from=build-env /go/src/github.com/devtron-labs/devtron/scripts/sql scripts/sql COPY --from=build-env /go/src/github.com/devtron-labs/devtron/scripts/casbin scripts/casbin diff --git a/api/argoApplication/ArgoApplicationRestHandler.go b/api/argoApplication/ArgoApplicationRestHandler.go index 5478f62e47f..a48812d3290 100644 --- a/api/argoApplication/ArgoApplicationRestHandler.go +++ b/api/argoApplication/ArgoApplicationRestHandler.go @@ -20,6 +20,7 @@ import ( "errors" "github.com/devtron-labs/devtron/api/restHandler/common" "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" "go.uber.org/zap" "net/http" @@ -34,14 +35,16 @@ type ArgoApplicationRestHandler interface { type ArgoApplicationRestHandlerImpl struct { argoApplicationService argoApplication.ArgoApplicationService + readService read.ArgoApplicationReadService logger *zap.SugaredLogger enforcer casbin.Enforcer } func NewArgoApplicationRestHandlerImpl(argoApplicationService argoApplication.ArgoApplicationService, - logger *zap.SugaredLogger, enforcer casbin.Enforcer) *ArgoApplicationRestHandlerImpl { + readService read.ArgoApplicationReadService, logger *zap.SugaredLogger, enforcer casbin.Enforcer) *ArgoApplicationRestHandlerImpl { return &ArgoApplicationRestHandlerImpl{ argoApplicationService: argoApplicationService, + readService: readService, logger: logger, enforcer: enforcer, } @@ -101,9 +104,9 @@ func (handler *ArgoApplicationRestHandlerImpl) GetApplicationDetail(w http.Respo return } } - resp, err := handler.argoApplicationService.GetAppDetail(resourceName, namespace, clusterId) + resp, err := handler.readService.GetAppDetail(resourceName, namespace, clusterId) if err != nil { - handler.logger.Errorw("error in listing all argo applications", "err", err, "resourceName", resourceName, "clusterId", clusterId) + handler.logger.Errorw("error in getting argo application app detail", "err", err, "resourceName", resourceName, "clusterId", clusterId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } diff --git a/api/argoApplication/wire_argoApplication.go b/api/argoApplication/wire_argoApplication.go index a65ac47028f..d70cfd30822 100644 --- a/api/argoApplication/wire_argoApplication.go +++ b/api/argoApplication/wire_argoApplication.go @@ -18,10 +18,14 @@ package argoApplication import ( "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" "github.com/google/wire" ) var ArgoApplicationWireSet = wire.NewSet( + read.NewArgoApplicationReadServiceImpl, + wire.Bind(new(read.ArgoApplicationReadService), new(*read.ArgoApplicationReadServiceImpl)), + argoApplication.NewArgoApplicationServiceImpl, wire.Bind(new(argoApplication.ArgoApplicationService), new(*argoApplication.ArgoApplicationServiceImpl)), diff --git a/api/helm-app/HelmAppRestHandler.go b/api/helm-app/HelmAppRestHandler.go index 059ce2ad9d2..3762a109901 100644 --- a/api/helm-app/HelmAppRestHandler.go +++ b/api/helm-app/HelmAppRestHandler.go @@ -25,9 +25,11 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service" "github.com/devtron-labs/devtron/pkg/appStore/installedApp/service/EAMode" "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/helper" clientErrors "github.com/devtron-labs/devtron/pkg/errors" "github.com/devtron-labs/devtron/pkg/fluxApplication" bean2 "github.com/devtron-labs/devtron/pkg/k8s/application/bean" + "github.com/devtron-labs/devtron/pkg/pipeline" "net/http" "strconv" "strings" @@ -144,6 +146,11 @@ func (handler *HelmAppRestHandlerImpl) GetApplicationDetail(w http.ResponseWrite //RBAC enforcer Ends appdetail, err := handler.helmAppService.GetApplicationDetail(context.Background(), appIdentifier) if err != nil { + + if pipeline.CheckAppReleaseNotExist(err) { + common.WriteJsonResp(w, err, nil, http.StatusNotFound) + return + } apiError := clientErrors.ConvertToApiError(err) if apiError != nil { err = apiError @@ -226,7 +233,7 @@ func (handler *HelmAppRestHandlerImpl) handleFluxApplicationHibernate(r *http.Re return handler.fluxApplication.HibernateFluxApplication(r.Context(), appIdentifier, hibernateRequest) } func (handler *HelmAppRestHandlerImpl) handleArgoApplicationHibernate(r *http.Request, token string, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) { - appIdentifier, err := argoApplication.DecodeExternalArgoAppId(*hibernateRequest.AppId) + appIdentifier, err := helper.DecodeExternalArgoAppId(*hibernateRequest.AppId) if err != nil { return nil, err } @@ -310,7 +317,7 @@ func (handler *HelmAppRestHandlerImpl) handleFluxApplicationUnHibernate(r *http. return handler.fluxApplication.UnHibernateFluxApplication(r.Context(), appIdentifier, hibernateRequest) } func (handler *HelmAppRestHandlerImpl) handleArgoApplicationUnHibernate(r *http.Request, token string, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) { - appIdentifier, err := argoApplication.DecodeExternalArgoAppId(*hibernateRequest.AppId) + appIdentifier, err := helper.DecodeExternalArgoAppId(*hibernateRequest.AppId) if err != nil { return nil, err } diff --git a/api/k8s/application/k8sApplicationRestHandler.go b/api/k8s/application/k8sApplicationRestHandler.go index d153d84c980..8f5e7ee968b 100644 --- a/api/k8s/application/k8sApplicationRestHandler.go +++ b/api/k8s/application/k8sApplicationRestHandler.go @@ -33,7 +33,8 @@ import ( client "github.com/devtron-labs/devtron/api/helm-app/service" "github.com/devtron-labs/devtron/api/restHandler/common" util2 "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/helper" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/cluster" @@ -81,39 +82,39 @@ type K8sApplicationRestHandler interface { } type K8sApplicationRestHandlerImpl struct { - logger *zap.SugaredLogger - k8sApplicationService application2.K8sApplicationService - pump connector.Pump - terminalSessionHandler terminal.TerminalSessionHandler - enforcer casbin.Enforcer - validator *validator.Validate - enforcerUtil rbac.EnforcerUtil - enforcerUtilHelm rbac.EnforcerUtilHelm - helmAppService client.HelmAppService - userService user.UserService - k8sCommonService k8s.K8sCommonService - terminalEnvVariables *util.TerminalEnvVariables - fluxAppService fluxApplication.FluxApplicationService - argoApplication argoApplication.ArgoApplicationService + logger *zap.SugaredLogger + k8sApplicationService application2.K8sApplicationService + pump connector.Pump + terminalSessionHandler terminal.TerminalSessionHandler + enforcer casbin.Enforcer + validator *validator.Validate + enforcerUtil rbac.EnforcerUtil + enforcerUtilHelm rbac.EnforcerUtilHelm + helmAppService client.HelmAppService + userService user.UserService + k8sCommonService k8s.K8sCommonService + terminalEnvVariables *util.TerminalEnvVariables + fluxAppService fluxApplication.FluxApplicationService + argoApplicationReadService read.ArgoApplicationReadService } -func NewK8sApplicationRestHandlerImpl(logger *zap.SugaredLogger, k8sApplicationService application2.K8sApplicationService, pump connector.Pump, terminalSessionHandler terminal.TerminalSessionHandler, enforcer casbin.Enforcer, enforcerUtilHelm rbac.EnforcerUtilHelm, enforcerUtil rbac.EnforcerUtil, helmAppService client.HelmAppService, userService user.UserService, k8sCommonService k8s.K8sCommonService, validator *validator.Validate, envVariables *util.EnvironmentVariables, fluxAppService fluxApplication.FluxApplicationService, argoApplication argoApplication.ArgoApplicationService, +func NewK8sApplicationRestHandlerImpl(logger *zap.SugaredLogger, k8sApplicationService application2.K8sApplicationService, pump connector.Pump, terminalSessionHandler terminal.TerminalSessionHandler, enforcer casbin.Enforcer, enforcerUtilHelm rbac.EnforcerUtilHelm, enforcerUtil rbac.EnforcerUtil, helmAppService client.HelmAppService, userService user.UserService, k8sCommonService k8s.K8sCommonService, validator *validator.Validate, envVariables *util.EnvironmentVariables, fluxAppService fluxApplication.FluxApplicationService, argoApplicationReadService read.ArgoApplicationReadService, ) *K8sApplicationRestHandlerImpl { return &K8sApplicationRestHandlerImpl{ - logger: logger, - k8sApplicationService: k8sApplicationService, - pump: pump, - terminalSessionHandler: terminalSessionHandler, - enforcer: enforcer, - validator: validator, - enforcerUtilHelm: enforcerUtilHelm, - enforcerUtil: enforcerUtil, - helmAppService: helmAppService, - userService: userService, - k8sCommonService: k8sCommonService, - terminalEnvVariables: envVariables.TerminalEnvVariables, - fluxAppService: fluxAppService, - argoApplication: argoApplication, + logger: logger, + k8sApplicationService: k8sApplicationService, + pump: pump, + terminalSessionHandler: terminalSessionHandler, + enforcer: enforcer, + validator: validator, + enforcerUtilHelm: enforcerUtilHelm, + enforcerUtil: enforcerUtil, + helmAppService: helmAppService, + userService: userService, + k8sCommonService: k8sCommonService, + terminalEnvVariables: envVariables.TerminalEnvVariables, + fluxAppService: fluxAppService, + argoApplicationReadService: argoApplicationReadService, } } @@ -289,7 +290,7 @@ func (handler *K8sApplicationRestHandlerImpl) GetHostUrlsByBatch(w http.Response resourceTreeResponse = appDetail.ResourceTreeResponse } else if appType == bean2.ArgoAppType { - appIdentifier, err := argoApplication.DecodeExternalArgoAppId(appIdString) + appIdentifier, err := helper.DecodeExternalArgoAppId(appIdString) if err != nil { common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return @@ -301,7 +302,7 @@ func (handler *K8sApplicationRestHandlerImpl) GetHostUrlsByBatch(w http.Response } //RBAC enforcer Ends - appDetail, err := handler.argoApplication.GetAppDetail(appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) + appDetail, err := handler.argoApplicationReadService.GetAppDetail(appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) if err != nil { apiError := clientErrors.ConvertToApiError(err) if apiError != nil { @@ -721,12 +722,12 @@ func (handler *K8sApplicationRestHandlerImpl) requestValidationAndRBAC(w http.Re } //RBAC enforcer ends here } else if request.AppType == bean2.ArgoAppType && request.ExternalArgoApplicationName != "" { - appIdentifier, err := argoApplication.DecodeExternalArgoAppId(request.AppId) + appIdentifier, err := helper.DecodeExternalArgoAppId(request.AppId) if err != nil { handler.logger.Errorw(bean2.AppIdDecodingError, "err", err, "appIdentifier", request.AppIdentifier) common.WriteJsonResp(w, err, nil, http.StatusBadRequest) } - valid, err := handler.k8sApplicationService.ValidateArgoResourceRequest(r.Context(), appIdentifier, request.K8sRequest) + valid, err := handler.argoApplicationReadService.ValidateArgoResourceRequest(r.Context(), appIdentifier, request.K8sRequest) if err != nil || !valid { handler.logger.Errorw("error in validating resource request", "err", err) common.WriteJsonResp(w, err, nil, http.StatusBadRequest) @@ -1145,14 +1146,14 @@ func (handler *K8sApplicationRestHandlerImpl) verifyRbacForAppRequests(token str envObject := "" switch request.AppType { case bean2.ArgoAppType: - argoAppIdentifier, err := argoApplication.DecodeExternalArgoAppId(request.AppId) + argoAppIdentifier, err := helper.DecodeExternalArgoAppId(request.AppId) if err != nil { handler.logger.Errorw("error in decoding appId", "err", err, "appId", request.AppId) return false, err } request.ClusterId = argoAppIdentifier.ClusterId request.ExternalArgoApplicationName = argoAppIdentifier.AppName - valid, err := handler.k8sApplicationService.ValidateArgoResourceRequest(r.Context(), argoAppIdentifier, request.K8sRequest) + valid, err := handler.argoApplicationReadService.ValidateArgoResourceRequest(r.Context(), argoAppIdentifier, request.K8sRequest) if err != nil || !valid { handler.logger.Errorw("error in validating resource request", "err", err) return false, err diff --git a/api/restHandler/GlobalPluginRestHandler.go b/api/restHandler/GlobalPluginRestHandler.go index 7fc25ec6b16..44a2305c0ab 100644 --- a/api/restHandler/GlobalPluginRestHandler.go +++ b/api/restHandler/GlobalPluginRestHandler.go @@ -35,6 +35,7 @@ import ( type GlobalPluginRestHandler interface { PatchPlugin(w http.ResponseWriter, r *http.Request) + CreatePlugin(w http.ResponseWriter, r *http.Request) GetAllGlobalVariables(w http.ResponseWriter, r *http.Request) ListAllPlugins(w http.ResponseWriter, r *http.Request) @@ -46,6 +47,7 @@ type GlobalPluginRestHandler interface { GetPluginDetailByIds(w http.ResponseWriter, r *http.Request) GetAllUniqueTags(w http.ResponseWriter, r *http.Request) MigratePluginData(w http.ResponseWriter, r *http.Request) + GetAllPluginMinData(w http.ResponseWriter, r *http.Request) } func NewGlobalPluginRestHandler(logger *zap.SugaredLogger, globalPluginService plugin.GlobalPluginService, @@ -420,3 +422,68 @@ func (handler *GlobalPluginRestHandlerImpl) MigratePluginData(w http.ResponseWri } common.WriteJsonResp(w, nil, nil, http.StatusOK) } + +func (handler *GlobalPluginRestHandlerImpl) CreatePlugin(w http.ResponseWriter, r *http.Request) { + userId, err := handler.userService.GetLoggedInUser(r) + if userId == 0 || err != nil { + common.WriteJsonResp(w, err, "Unauthorized User", http.StatusUnauthorized) + return + } + token := r.Header.Get("token") + appId, err := common.ExtractIntQueryParam(w, r, "appId", 0) + if err != nil { + return + } + ok, err := handler.IsUserAuthorized(token, appId) + if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + if !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) + return + } + decoder := json.NewDecoder(r.Body) + var pluginDataDto bean.PluginParentMetadataDto + err = decoder.Decode(&pluginDataDto) + if err != nil { + handler.logger.Errorw("request err, CreatePlugin", "error", err, "payload", pluginDataDto) + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + handler.logger.Infow("request payload received for creating plugins", pluginDataDto, "userId", userId) + + pluginVersionId, err := handler.globalPluginService.CreatePluginOrVersions(&pluginDataDto, userId) + if err != nil { + handler.logger.Errorw("service error, error in creating plugin", "pluginCreateRequestDto", pluginDataDto, "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + + common.WriteJsonResp(w, nil, bean.NewPluginMinDto().WithPluginVersionId(pluginVersionId), http.StatusOK) +} + +func (handler *GlobalPluginRestHandlerImpl) GetAllPluginMinData(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("token") + appId, err := common.ExtractIntQueryParam(w, r, "appId", 0) + if err != nil { + return + } + ok, err := handler.IsUserAuthorized(token, appId) + if err != nil { + common.WriteJsonResp(w, err, nil, http.StatusBadRequest) + return + } + if !ok { + common.WriteJsonResp(w, fmt.Errorf("unauthorized user"), "Unauthorized User", http.StatusForbidden) + return + } + + pluginDetail, err := handler.globalPluginService.GetAllPluginMinData() + if err != nil { + handler.logger.Errorw("error in getting all unique tags", "err", err) + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } + common.WriteJsonResp(w, nil, pluginDetail, http.StatusOK) +} diff --git a/api/router/GlobalPluginRouter.go b/api/router/GlobalPluginRouter.go index 06950c33421..0d8154bf822 100644 --- a/api/router/GlobalPluginRouter.go +++ b/api/router/GlobalPluginRouter.go @@ -41,7 +41,8 @@ type GlobalPluginRouterImpl struct { func (impl *GlobalPluginRouterImpl) initGlobalPluginRouter(globalPluginRouter *mux.Router) { globalPluginRouter.Path("/migrate"). HandlerFunc(impl.globalPluginRestHandler.MigratePluginData).Methods("PUT") - + globalPluginRouter.Path("/create"). + HandlerFunc(impl.globalPluginRestHandler.CreatePlugin).Methods("POST") // versioning impact handling to be done for below apis, globalPluginRouter.Path(""). HandlerFunc(impl.globalPluginRestHandler.PatchPlugin).Methods("POST") @@ -68,5 +69,7 @@ func (impl *GlobalPluginRouterImpl) initGlobalPluginRouter(globalPluginRouter *m globalPluginRouter.Path("/list/tags"). HandlerFunc(impl.globalPluginRestHandler.GetAllUniqueTags).Methods("GET") + globalPluginRouter.Path("/list/v2/min"). + HandlerFunc(impl.globalPluginRestHandler.GetAllPluginMinData).Methods("GET") } diff --git a/vendor/github.com/argoproj/argo-cd/assets/badge.svg b/argocd-assets/badge.svg similarity index 100% rename from vendor/github.com/argoproj/argo-cd/assets/badge.svg rename to argocd-assets/badge.svg diff --git a/vendor/github.com/argoproj/argo-cd/assets/builtin-policy.csv b/argocd-assets/builtin-policy.csv similarity index 100% rename from vendor/github.com/argoproj/argo-cd/assets/builtin-policy.csv rename to argocd-assets/builtin-policy.csv diff --git a/vendor/github.com/argoproj/argo-cd/assets/model.conf b/argocd-assets/model.conf similarity index 100% rename from vendor/github.com/argoproj/argo-cd/assets/model.conf rename to argocd-assets/model.conf diff --git a/vendor/github.com/argoproj/argo-cd/assets/swagger.json b/argocd-assets/swagger.json similarity index 100% rename from vendor/github.com/argoproj/argo-cd/assets/swagger.json rename to argocd-assets/swagger.json diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index 5271cca4cdd..ab3bca55cdc 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -67,6 +67,7 @@ import ( "github.com/devtron-labs/devtron/pkg/appStore/values/repository" service4 "github.com/devtron-labs/devtron/pkg/appStore/values/service" "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" "github.com/devtron-labs/devtron/pkg/argoRepositoryCreds" "github.com/devtron-labs/devtron/pkg/attributes" "github.com/devtron-labs/devtron/pkg/auth/authentication" @@ -297,24 +298,25 @@ func InitializeApp() (*App, error) { deletePostProcessorImpl := service2.NewDeletePostProcessorImpl(sugaredLogger) appStoreDeploymentServiceImpl := service2.NewAppStoreDeploymentServiceImpl(sugaredLogger, installedAppRepositoryImpl, installedAppDBServiceImpl, appStoreDeploymentDBServiceImpl, chartGroupDeploymentRepositoryImpl, appStoreApplicationVersionRepositoryImpl, appRepositoryImpl, eaModeDeploymentServiceImpl, eaModeDeploymentServiceImpl, environmentServiceImpl, helmAppServiceImpl, installedAppVersionHistoryRepositoryImpl, environmentVariables, acdConfig, gitOpsConfigReadServiceImpl, deletePostProcessorImpl, appStoreValidatorImpl, deploymentConfigServiceImpl) fluxApplicationServiceImpl := fluxApplication.NewFluxApplicationServiceImpl(sugaredLogger, helmAppServiceImpl, clusterServiceImpl, helmAppClientImpl, pumpImpl) - argoApplicationServiceImpl := argoApplication.NewArgoApplicationServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmUserServiceImpl, helmAppClientImpl, helmAppServiceImpl) - helmAppRestHandlerImpl := client2.NewHelmAppRestHandlerImpl(sugaredLogger, helmAppServiceImpl, enforcerImpl, clusterServiceImpl, enforcerUtilHelmImpl, appStoreDeploymentServiceImpl, installedAppDBServiceImpl, userServiceImpl, attributesServiceImpl, serverEnvConfigServerEnvConfig, fluxApplicationServiceImpl, argoApplicationServiceImpl) - helmAppRouterImpl := client2.NewHelmAppRouterImpl(helmAppRestHandlerImpl) - k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(sugaredLogger, k8sServiceImpl, clusterServiceImpl, argoApplicationServiceImpl) - environmentRestHandlerImpl := cluster2.NewEnvironmentRestHandlerImpl(environmentServiceImpl, sugaredLogger, userServiceImpl, validate, enforcerImpl, deleteServiceImpl, k8sServiceImpl, k8sCommonServiceImpl) - environmentRouterImpl := cluster2.NewEnvironmentRouterImpl(environmentRestHandlerImpl) k8sResourceHistoryRepositoryImpl := repository8.NewK8sResourceHistoryRepositoryImpl(db, sugaredLogger) k8sResourceHistoryServiceImpl := kubernetesResourceAuditLogs.Newk8sResourceHistoryServiceImpl(k8sResourceHistoryRepositoryImpl, sugaredLogger, appRepositoryImpl, environmentRepositoryImpl) + argoApplicationReadServiceImpl := read.NewArgoApplicationReadServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmUserServiceImpl, helmAppClientImpl, helmAppServiceImpl) + k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(sugaredLogger, k8sServiceImpl, clusterServiceImpl, argoApplicationReadServiceImpl) ephemeralContainersRepositoryImpl := repository2.NewEphemeralContainersRepositoryImpl(db, transactionUtilImpl) ephemeralContainerServiceImpl := cluster.NewEphemeralContainerServiceImpl(ephemeralContainersRepositoryImpl, sugaredLogger) - terminalSessionHandlerImpl := terminal.NewTerminalSessionHandlerImpl(environmentServiceImpl, clusterServiceImpl, sugaredLogger, k8sServiceImpl, ephemeralContainerServiceImpl, argoApplicationServiceImpl) - k8sApplicationServiceImpl, err := application.NewK8sApplicationServiceImpl(sugaredLogger, clusterServiceImpl, pumpImpl, helmAppServiceImpl, k8sServiceImpl, acdAuthConfig, k8sResourceHistoryServiceImpl, k8sCommonServiceImpl, terminalSessionHandlerImpl, ephemeralContainerServiceImpl, ephemeralContainersRepositoryImpl, argoApplicationServiceImpl, fluxApplicationServiceImpl) + terminalSessionHandlerImpl := terminal.NewTerminalSessionHandlerImpl(environmentServiceImpl, clusterServiceImpl, sugaredLogger, k8sServiceImpl, ephemeralContainerServiceImpl, argoApplicationReadServiceImpl) + k8sApplicationServiceImpl, err := application.NewK8sApplicationServiceImpl(sugaredLogger, clusterServiceImpl, pumpImpl, helmAppServiceImpl, k8sServiceImpl, acdAuthConfig, k8sResourceHistoryServiceImpl, k8sCommonServiceImpl, terminalSessionHandlerImpl, ephemeralContainerServiceImpl, ephemeralContainersRepositoryImpl, fluxApplicationServiceImpl) if err != nil { return nil, err } + argoApplicationServiceImpl := argoApplication.NewArgoApplicationServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, helmUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationReadServiceImpl) + helmAppRestHandlerImpl := client2.NewHelmAppRestHandlerImpl(sugaredLogger, helmAppServiceImpl, enforcerImpl, clusterServiceImpl, enforcerUtilHelmImpl, appStoreDeploymentServiceImpl, installedAppDBServiceImpl, userServiceImpl, attributesServiceImpl, serverEnvConfigServerEnvConfig, fluxApplicationServiceImpl, argoApplicationServiceImpl) + helmAppRouterImpl := client2.NewHelmAppRouterImpl(helmAppRestHandlerImpl) + environmentRestHandlerImpl := cluster2.NewEnvironmentRestHandlerImpl(environmentServiceImpl, sugaredLogger, userServiceImpl, validate, enforcerImpl, deleteServiceImpl, k8sServiceImpl, k8sCommonServiceImpl) + environmentRouterImpl := cluster2.NewEnvironmentRouterImpl(environmentRestHandlerImpl) ciPipelineRepositoryImpl := pipelineConfig.NewCiPipelineRepositoryImpl(db, sugaredLogger, transactionUtilImpl) enforcerUtilImpl := rbac.NewEnforcerUtilImpl(sugaredLogger, teamRepositoryImpl, appRepositoryImpl, environmentRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, clusterRepositoryImpl, enforcerImpl) - k8sApplicationRestHandlerImpl := application2.NewK8sApplicationRestHandlerImpl(sugaredLogger, k8sApplicationServiceImpl, pumpImpl, terminalSessionHandlerImpl, enforcerImpl, enforcerUtilHelmImpl, enforcerUtilImpl, helmAppServiceImpl, userServiceImpl, k8sCommonServiceImpl, validate, environmentVariables, fluxApplicationServiceImpl, argoApplicationServiceImpl) + k8sApplicationRestHandlerImpl := application2.NewK8sApplicationRestHandlerImpl(sugaredLogger, k8sApplicationServiceImpl, pumpImpl, terminalSessionHandlerImpl, enforcerImpl, enforcerUtilHelmImpl, enforcerUtilImpl, helmAppServiceImpl, userServiceImpl, k8sCommonServiceImpl, validate, environmentVariables, fluxApplicationServiceImpl, argoApplicationReadServiceImpl) k8sApplicationRouterImpl := application2.NewK8sApplicationRouterImpl(k8sApplicationRestHandlerImpl) chartRepositoryRestHandlerImpl := chartRepo2.NewChartRepositoryRestHandlerImpl(sugaredLogger, userServiceImpl, chartRepositoryServiceImpl, enforcerImpl, validate, deleteServiceImpl, attributesServiceImpl) chartRepositoryRouterImpl := chartRepo2.NewChartRepositoryRouterImpl(chartRepositoryRestHandlerImpl) @@ -428,7 +430,7 @@ func InitializeApp() (*App, error) { rbacRoleServiceImpl := user.NewRbacRoleServiceImpl(sugaredLogger, rbacRoleDataRepositoryImpl) rbacRoleRestHandlerImpl := user2.NewRbacRoleHandlerImpl(sugaredLogger, validate, rbacRoleServiceImpl, userServiceImpl, enforcerImpl, enforcerUtilImpl) rbacRoleRouterImpl := user2.NewRbacRoleRouterImpl(sugaredLogger, validate, rbacRoleRestHandlerImpl) - argoApplicationRestHandlerImpl := argoApplication2.NewArgoApplicationRestHandlerImpl(argoApplicationServiceImpl, sugaredLogger, enforcerImpl) + argoApplicationRestHandlerImpl := argoApplication2.NewArgoApplicationRestHandlerImpl(argoApplicationServiceImpl, argoApplicationReadServiceImpl, sugaredLogger, enforcerImpl) argoApplicationRouterImpl := argoApplication2.NewArgoApplicationRouterImpl(argoApplicationRestHandlerImpl) fluxApplicationRestHandlerImpl := fluxApplication2.NewFluxApplicationRestHandlerImpl(fluxApplicationServiceImpl, sugaredLogger, enforcerImpl) fluxApplicationRouterImpl := fluxApplication2.NewFluxApplicationRouterImpl(fluxApplicationRestHandlerImpl) diff --git a/go.mod b/go.mod index ddbebb37bae..d5f09c6b3dc 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/deckarep/golang-set v1.8.0 github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 - github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63 + github.com/devtron-labs/common-lib v0.16.1-0.20240911071031-2625327bc7b4 github.com/devtron-labs/go-bitbucket v0.9.60-beta github.com/devtron-labs/protos v0.0.3-0.20240802105333-92ee9bb85d80 github.com/evanphx/json-patch v5.7.0+incompatible @@ -76,6 +76,7 @@ require ( go.uber.org/zap v1.21.0 golang.org/x/crypto v0.25.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/mod v0.17.0 golang.org/x/oauth2 v0.21.0 google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d google.golang.org/grpc v1.59.0 @@ -121,7 +122,6 @@ require ( github.com/apparentlymart/go-textseg v1.0.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/argoproj/pkg v0.13.7-0.20230627120311-a4dd357b057e // indirect - github.com/aws/smithy-go v1.14.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/bmatcuk/doublestar/v4 v4.6.0 // indirect @@ -245,7 +245,6 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.27.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect diff --git a/go.sum b/go.sum index fcbc80679f8..d4251e80e8c 100644 --- a/go.sum +++ b/go.sum @@ -188,8 +188,8 @@ github.com/devtron-labs/argo-workflows/v3 v3.5.10 h1:6rxQOesOzDz6SgQCMDQNHaehsKF github.com/devtron-labs/argo-workflows/v3 v3.5.10/go.mod h1:/vqxcovDPT4zqr4DjR5v7CF8ggpY1l3TSa2CIG3jmjA= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8 h1:2+Q7Jdhpo/uMiaQiZZzAh+ZX7wEJIFuMFG6DEiMuo64= github.com/devtron-labs/authenticator v0.4.35-0.20240809073103-6e11da8083f8/go.mod h1:702R6WIf5y9UzKGoCGxQ+x3l5Ws+l0fXg2xlCpSGFZI= -github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63 h1:C5SMozwP2rVIKItqEZs3PtWkBhNnEeHIm9xtnDkK5VA= -github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63/go.mod h1:rAY9Xd6iz+OqNQ3nO3reVHapAVr1N6Osf4Irdc0A08Q= +github.com/devtron-labs/common-lib v0.16.1-0.20240911071031-2625327bc7b4 h1:OWhV5B2SQRWZges8cltVsyUrdA/8EByBjjRxX95qN7o= +github.com/devtron-labs/common-lib v0.16.1-0.20240911071031-2625327bc7b4/go.mod h1:rAY9Xd6iz+OqNQ3nO3reVHapAVr1N6Osf4Irdc0A08Q= github.com/devtron-labs/go-bitbucket v0.9.60-beta h1:VEx1jvDgdtDPS6A1uUFoaEi0l1/oLhbr+90xOwr6sDU= github.com/devtron-labs/go-bitbucket v0.9.60-beta/go.mod h1:GnuiCesvh8xyHeMCb+twm8lBR/kQzJYSKL28ZfObp1Y= github.com/devtron-labs/protos v0.0.3-0.20240802105333-92ee9bb85d80 h1:xwbTeijNTf4/j1v+tSfwVqwLVnReas/NqEKeQHvSTys= diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 08710aac5ba..9cf6a18d824 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -105,6 +105,8 @@ type CiArtifactRepository interface { Get(id int) (artifact *CiArtifact, err error) GetArtifactParentCiAndWorkflowDetailsByIds(ids []int) ([]*CiArtifact, error) GetByWfId(wfId int) (artifact *CiArtifact, err error) + IfArtifactExistByImage(imageName string, pipelineId int) (exist bool, err error) + IfArtifactExistByImageDigest(imageDigest string, imageName string, pipelineId int) (exist bool, err error) GetArtifactsByCDPipeline(cdPipelineId, limit int, parentId int, parentType bean.WorkflowType) ([]*CiArtifact, error) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, int, error) GetLatestArtifactTimeByCiPipelineIds(ciPipelineIds []int) ([]*CiArtifact, error) @@ -316,6 +318,29 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipeline(cdPipelineId, limi return artifactsAll, err } +func (impl CiArtifactRepositoryImpl) IfArtifactExistByImage(imageName string, pipelineId int) (exist bool, err error) { + count, err := impl.dbConnection.Model(&CiArtifact{}). + Where("image = ?", imageName). + Where("pipeline_id = ?", pipelineId). + Count() + if err != nil { + return false, err + } + return count > 0, nil +} + +func (impl CiArtifactRepositoryImpl) IfArtifactExistByImageDigest(imageDigest string, imageName string, pipelineId int) (exist bool, err error) { + count, err := impl.dbConnection.Model(&CiArtifact{}). + Where("image_digest = ?", imageDigest). + Where("image = ?", imageName). + Where("pipeline_id = ?", pipelineId). + Count() + if err != nil { + return false, err + } + return count > 0, nil +} + func (impl CiArtifactRepositoryImpl) GetArtifactsByCDPipelineV3(listingFilterOpts *bean.ArtifactsListFilterOptions) ([]*CiArtifact, int, error) { if listingFilterOpts.ParentStageType != bean.CI_WORKFLOW_TYPE && listingFilterOpts.ParentStageType != bean.WEBHOOK_WORKFLOW_TYPE { diff --git a/internal/util/ErrorUtil.go b/internal/util/ErrorUtil.go index 59a8d234415..43f3c9b942e 100644 --- a/internal/util/ErrorUtil.go +++ b/internal/util/ErrorUtil.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "net/http" + "strconv" ) type ApiError struct { @@ -36,6 +37,14 @@ type ApiError struct { UserDetailMessage string `json:"userDetailMessage,omitempty"` } +func GetApiError(code int, userMessage, internalMessage string) *ApiError { + return &ApiError{ + HttpStatusCode: code, + Code: strconv.Itoa(code), + InternalMessage: internalMessage, + UserMessage: userMessage, + } +} func NewApiError() *ApiError { return &ApiError{} } diff --git a/pkg/argoApplication/ArgoApplicationService.go b/pkg/argoApplication/ArgoApplicationService.go index e75272b84f0..3f2908c7e88 100644 --- a/pkg/argoApplication/ArgoApplicationService.go +++ b/pkg/argoApplication/ArgoApplicationService.go @@ -18,57 +18,56 @@ package argoApplication import ( "context" - "encoding/json" - "fmt" + "github.com/devtron-labs/common-lib/utils/k8s" + k8sCommonBean "github.com/devtron-labs/common-lib/utils/k8s/commonBean" "github.com/devtron-labs/devtron/api/helm-app/gRPC" openapi "github.com/devtron-labs/devtron/api/helm-app/openapiClient" "github.com/devtron-labs/devtron/api/helm-app/service" - - "github.com/devtron-labs/common-lib/utils/k8s" - k8sCommonBean "github.com/devtron-labs/common-lib/utils/k8s/commonBean" "github.com/devtron-labs/devtron/pkg/argoApplication/bean" + "github.com/devtron-labs/devtron/pkg/argoApplication/helper" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" cluster2 "github.com/devtron-labs/devtron/pkg/cluster" clusterRepository "github.com/devtron-labs/devtron/pkg/cluster/repository" + k8s2 "github.com/devtron-labs/devtron/pkg/k8s" + "github.com/devtron-labs/devtron/pkg/k8s/application" "github.com/devtron-labs/devtron/util/argo" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/errors" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/rest" ) type ArgoApplicationService interface { ListApplications(clusterIds []int) ([]*bean.ArgoApplicationListDto, error) - GetAppDetail(resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) - GetServerConfigIfClusterIsNotAddedOnDevtron(resourceResp *k8s.ManifestResponse, restConfig *rest.Config, - clusterWithApplicationObject clusterRepository.Cluster, clusterServerUrlIdMap map[string]int) (*rest.Config, error) - GetClusterConfigFromAllClusters(clusterId int) (*k8s.ClusterConfig, clusterRepository.Cluster, map[string]int, error) - GetRestConfigForExternalArgo(ctx context.Context, clusterId int, externalArgoApplicationName string) (*rest.Config, error) HibernateArgoApplication(ctx context.Context, app *bean.ArgoAppIdentifier, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) UnHibernateArgoApplication(ctx context.Context, app *bean.ArgoAppIdentifier, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) } type ArgoApplicationServiceImpl struct { - logger *zap.SugaredLogger - clusterRepository clusterRepository.ClusterRepository - k8sUtil *k8s.K8sServiceImpl - argoUserService argo.ArgoUserService - helmAppClient gRPC.HelmAppClient - helmAppService service.HelmAppService + logger *zap.SugaredLogger + clusterRepository clusterRepository.ClusterRepository + k8sUtil *k8s.K8sServiceImpl + argoUserService argo.ArgoUserService + helmAppClient gRPC.HelmAppClient + helmAppService service.HelmAppService + k8sApplicationService application.K8sApplicationService + readService read.ArgoApplicationReadService } func NewArgoApplicationServiceImpl(logger *zap.SugaredLogger, clusterRepository clusterRepository.ClusterRepository, k8sUtil *k8s.K8sServiceImpl, argoUserService argo.ArgoUserService, helmAppClient gRPC.HelmAppClient, - helmAppService service.HelmAppService) *ArgoApplicationServiceImpl { + helmAppService service.HelmAppService, + k8sApplicationService application.K8sApplicationService, + readService read.ArgoApplicationReadService) *ArgoApplicationServiceImpl { return &ArgoApplicationServiceImpl{ - logger: logger, - clusterRepository: clusterRepository, - k8sUtil: k8sUtil, - argoUserService: argoUserService, - helmAppService: helmAppService, - helmAppClient: helmAppClient, + logger: logger, + clusterRepository: clusterRepository, + k8sUtil: k8sUtil, + argoUserService: argoUserService, + helmAppService: helmAppService, + helmAppClient: helmAppClient, + k8sApplicationService: k8sApplicationService, + readService: readService, } } @@ -91,6 +90,14 @@ func (impl *ArgoApplicationServiceImpl) ListApplications(clusterIds []int) ([]*b } } + listReq := &k8s2.ResourceRequestBean{ + K8sRequest: &k8s.K8sRequestBean{ + ResourceIdentifier: k8s.ResourceIdentifier{ + Namespace: bean.AllNamespaces, + GroupVersionKind: bean.GvkForArgoApplication, + }, + }, + } // TODO: make goroutine and channel for optimization appListFinal := make([]*bean.ArgoApplicationListDto, 0) for _, cluster := range clusters { @@ -105,7 +112,7 @@ func (impl *ArgoApplicationServiceImpl) ListApplications(clusterIds []int) ([]*b impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", clusterObj.Id) return nil, err } - resp, _, err := impl.k8sUtil.GetResourceList(context.Background(), restConfig, bean.GvkForArgoApplication, bean.AllNamespaces, true, nil) + resp, err := impl.k8sApplicationService.GetResourceListWithRestConfig(context.Background(), "", listReq, nil, restConfig, clusterObj.ClusterName) if err != nil { if errStatus, ok := err.(*errors.StatusError); ok { if errStatus.Status().Code == 404 { @@ -117,352 +124,57 @@ func (impl *ArgoApplicationServiceImpl) ListApplications(clusterIds []int) ([]*b impl.logger.Errorw("error in getting resource list", "err", err) return nil, err } - appLists := getApplicationListDtos(resp.Resources.Object, clusterObj.ClusterName, clusterObj.Id) + appLists := getApplicationListDtos(resp, clusterObj.ClusterName, clusterObj.Id) appListFinal = append(appListFinal, appLists...) } return appListFinal, nil } -func (impl *ArgoApplicationServiceImpl) GetAppDetail(resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) { - appDetail := &bean.ArgoApplicationDetailDto{ - ArgoApplicationListDto: &bean.ArgoApplicationListDto{ - Name: resourceName, - Namespace: resourceNamespace, - ClusterId: clusterId, - }, - } - clusters, err := impl.clusterRepository.FindAllActive() - if err != nil { - impl.logger.Errorw("error in getting all active clusters", "err", err) - return nil, err - } - var clusterWithApplicationObject clusterRepository.Cluster - clusterServerUrlIdMap := make(map[string]int, len(clusters)) - for _, cluster := range clusters { - if cluster.Id == clusterId { - clusterWithApplicationObject = cluster - } - clusterServerUrlIdMap[cluster.ServerUrl] = cluster.Id - } - if clusterWithApplicationObject.Id > 0 { - appDetail.ClusterName = clusterWithApplicationObject.ClusterName - } - if clusterWithApplicationObject.IsVirtualCluster { - return appDetail, nil - } else if len(clusterWithApplicationObject.ErrorInConnecting) != 0 { - return nil, fmt.Errorf("error in connecting to cluster") - } - clusterBean := cluster2.GetClusterBean(clusterWithApplicationObject) - clusterConfig := clusterBean.GetClusterConfig() - restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) - if err != nil { - impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", clusterWithApplicationObject.Id) - return nil, err - } - resp, err := impl.k8sUtil.GetResource(context.Background(), resourceNamespace, resourceName, bean.GvkForArgoApplication, restConfig) - if err != nil { - impl.logger.Errorw("error in getting resource list", "err", err) - return nil, err - } - var destinationServer string - var argoManagedResources []*bean.ArgoManagedResource - if resp != nil && resp.Manifest.Object != nil { - appDetail.Manifest = resp.Manifest.Object - appDetail.HealthStatus, appDetail.SyncStatus, destinationServer, argoManagedResources = - getHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(resp.Manifest.Object) - } - appDeployedOnClusterId := 0 - if destinationServer == k8s.DefaultClusterUrl { - appDeployedOnClusterId = clusterWithApplicationObject.Id - } else if clusterIdFromMap, ok := clusterServerUrlIdMap[destinationServer]; ok { - appDeployedOnClusterId = clusterIdFromMap - } - var configOfClusterWhereAppIsDeployed bean.ArgoClusterConfigObj - if appDeployedOnClusterId < 1 { - // cluster is not added on devtron, need to get server config from secret which argo-cd saved - coreV1Client, err := impl.k8sUtil.GetCoreV1ClientByRestConfig(restConfig) - secrets, err := coreV1Client.Secrets(bean.AllNamespaces).List(context.Background(), v1.ListOptions{ - LabelSelector: labels.SelectorFromSet(labels.Set{"argocd.argoproj.io/secret-type": "cluster"}).String(), - }) - if err != nil { - impl.logger.Errorw("error in getting resource list, secrets", "err", err) - return nil, err - } - for _, secret := range secrets.Items { - if secret.Data != nil { - if val, ok := secret.Data["server"]; ok { - if string(val) == destinationServer { - if config, ok := secret.Data["config"]; ok { - err = json.Unmarshal(config, &configOfClusterWhereAppIsDeployed) - if err != nil { - impl.logger.Errorw("error in unmarshaling", "err", err) - return nil, err - } - break - } - } - } - } - } - } - resourceTreeResp, err := impl.getResourceTreeForExternalCluster(appDeployedOnClusterId, destinationServer, configOfClusterWhereAppIsDeployed, argoManagedResources) - if err != nil { - impl.logger.Errorw("error in getting resource tree response", "err", err) - return nil, err - } - appDetail.ResourceTree = resourceTreeResp - return appDetail, nil -} - -func (impl *ArgoApplicationServiceImpl) getResourceTreeForExternalCluster(clusterId int, destinationServer string, - configOfClusterWhereAppIsDeployed bean.ArgoClusterConfigObj, argoManagedResources []*bean.ArgoManagedResource) (*gRPC.ResourceTreeResponse, error) { - var resources []*gRPC.ExternalResourceDetail - for _, argoManagedResource := range argoManagedResources { - resources = append(resources, &gRPC.ExternalResourceDetail{ - Group: argoManagedResource.Group, - Kind: argoManagedResource.Kind, - Version: argoManagedResource.Version, - Name: argoManagedResource.Name, - Namespace: argoManagedResource.Namespace, - }) - } - var clusterConfigOfClusterWhereAppIsDeployed *gRPC.ClusterConfig - if len(configOfClusterWhereAppIsDeployed.BearerToken) > 0 { - clusterConfigOfClusterWhereAppIsDeployed = &gRPC.ClusterConfig{ - ApiServerUrl: destinationServer, - Token: configOfClusterWhereAppIsDeployed.BearerToken, - InsecureSkipTLSVerify: configOfClusterWhereAppIsDeployed.TlsClientConfig.Insecure, - KeyData: configOfClusterWhereAppIsDeployed.TlsClientConfig.KeyData, - CaData: configOfClusterWhereAppIsDeployed.TlsClientConfig.CaData, - CertData: configOfClusterWhereAppIsDeployed.TlsClientConfig.CertData, - } - } - resourceTreeResp, err := impl.helmAppService.GetResourceTreeForExternalResources(context.Background(), clusterId, clusterConfigOfClusterWhereAppIsDeployed, resources) - if err != nil { - impl.logger.Errorw("error in getting resource tree for external resources", "err", err) - return nil, err - } - return resourceTreeResp, nil -} - -func getApplicationListDtos(manifestObj map[string]interface{}, clusterName string, clusterId int) []*bean.ArgoApplicationListDto { +func getApplicationListDtos(resp *k8s.ClusterResourceListMap, clusterName string, clusterId int) []*bean.ArgoApplicationListDto { appLists := make([]*bean.ArgoApplicationListDto, 0) - // map of keys and index in row cells, initially set as 0 will be updated by object - keysToBeFetchedFromColumnDefinitions := map[string]int{k8sCommonBean.K8sResourceColumnDefinitionName: 0, - k8sCommonBean.K8sResourceColumnDefinitionHealthStatus: 0, k8sCommonBean.K8sResourceColumnDefinitionSyncStatus: 0} - keysToBeFetchedFromRawObject := []string{k8sCommonBean.K8sClusterResourceNamespaceKey} - - columnsDataRaw := manifestObj[k8sCommonBean.K8sClusterResourceColumnDefinitionKey] - if columnsDataRaw != nil { - columnsData := columnsDataRaw.([]interface{}) - for i, columnData := range columnsData { - columnDataMap := columnData.(map[string]interface{}) - for key := range keysToBeFetchedFromColumnDefinitions { - if columnDataMap[k8sCommonBean.K8sClusterResourceNameKey] == key { - keysToBeFetchedFromColumnDefinitions[key] = i - } + if resp != nil { + appLists = make([]*bean.ArgoApplicationListDto, len(resp.Data)) + for i, rowData := range resp.Data { + if rowData == nil { + continue } - } - } - - rowsDataRaw := manifestObj[k8sCommonBean.K8sClusterResourceRowsKey] - if rowsDataRaw != nil { - rowsData := rowsDataRaw.([]interface{}) - for _, rowData := range rowsData { appListDto := &bean.ArgoApplicationListDto{ ClusterId: clusterId, ClusterName: clusterName, } - rowDataMap := rowData.(map[string]interface{}) - rowCells := rowDataMap[k8sCommonBean.K8sClusterResourceCellKey].([]interface{}) - for key, value := range keysToBeFetchedFromColumnDefinitions { - resolvedValueFromRowCell := rowCells[value].(string) - switch key { - case k8sCommonBean.K8sResourceColumnDefinitionName: - appListDto.Name = resolvedValueFromRowCell - case k8sCommonBean.K8sResourceColumnDefinitionSyncStatus: - appListDto.SyncStatus = resolvedValueFromRowCell - case k8sCommonBean.K8sResourceColumnDefinitionHealthStatus: - appListDto.HealthStatus = resolvedValueFromRowCell + if rowData[k8sCommonBean.K8sClusterResourceNameKey] != nil { + if nameStr, ok := rowData[k8sCommonBean.K8sClusterResourceNameKey].(string); ok { + appListDto.Name = nameStr } } - rowObject := rowDataMap[k8sCommonBean.K8sClusterResourceObjectKey].(map[string]interface{}) - for _, key := range keysToBeFetchedFromRawObject { - switch key { - case k8sCommonBean.K8sClusterResourceNamespaceKey: - metadata := rowObject[k8sCommonBean.K8sClusterResourceMetadataKey].(map[string]interface{}) - appListDto.Namespace = metadata[k8sCommonBean.K8sClusterResourceNamespaceKey].(string) + if rowData[k8sCommonBean.K8sResourceColumnDefinitionSyncStatus] != nil { + if syncStatusStr, ok := rowData[k8sCommonBean.K8sResourceColumnDefinitionSyncStatus].(string); ok { + appListDto.SyncStatus = syncStatusStr } } - - appLists = append(appLists, appListDto) - } - } - return appLists -} - -func getHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(obj map[string]interface{}) (string, - string, string, []*bean.ArgoManagedResource) { - var healthStatus, syncStatus, destinationServer string - argoManagedResources := make([]*bean.ArgoManagedResource, 0) - if specObjRaw, ok := obj[k8sCommonBean.Spec]; ok { - specObj := specObjRaw.(map[string]interface{}) - if destinationObjRaw, ok2 := specObj[bean.Destination]; ok2 { - destinationObj := destinationObjRaw.(map[string]interface{}) - if destinationServerIf, ok3 := destinationObj[bean.Server]; ok3 { - destinationServer = destinationServerIf.(string) - } - } - } - if statusObjRaw, ok := obj[k8sCommonBean.K8sClusterResourceStatusKey]; ok { - statusObj := statusObjRaw.(map[string]interface{}) - if healthObjRaw, ok2 := statusObj[k8sCommonBean.K8sClusterResourceHealthKey]; ok2 { - healthObj := healthObjRaw.(map[string]interface{}) - if healthStatusIf, ok3 := healthObj[k8sCommonBean.K8sClusterResourceStatusKey]; ok3 { - healthStatus = healthStatusIf.(string) - } - } - if syncObjRaw, ok2 := statusObj[k8sCommonBean.K8sClusterResourceSyncKey]; ok2 { - syncObj := syncObjRaw.(map[string]interface{}) - if syncStatusIf, ok3 := syncObj[k8sCommonBean.K8sClusterResourceStatusKey]; ok3 { - syncStatus = syncStatusIf.(string) - } - } - if resourceObjsRaw, ok2 := statusObj[k8sCommonBean.K8sClusterResourceResourcesKey]; ok2 { - resourceObjs := resourceObjsRaw.([]interface{}) - argoManagedResources = make([]*bean.ArgoManagedResource, 0, len(resourceObjs)) - for _, resourceObjRaw := range resourceObjs { - argoManagedResource := &bean.ArgoManagedResource{} - resourceObj := resourceObjRaw.(map[string]interface{}) - if groupRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceGroupKey]; ok { - argoManagedResource.Group = groupRaw.(string) - } - if kindRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceKindKey]; ok { - argoManagedResource.Kind = kindRaw.(string) + if rowData[k8sCommonBean.K8sResourceColumnDefinitionHealthStatus] != nil { + if healthStatusStr, ok := rowData[k8sCommonBean.K8sResourceColumnDefinitionHealthStatus].(string); ok { + appListDto.HealthStatus = healthStatusStr } - if versionRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceVersionKey]; ok { - argoManagedResource.Version = versionRaw.(string) - } - if nameRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceMetadataNameKey]; ok { - argoManagedResource.Name = nameRaw.(string) - } - if namespaceRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceNamespaceKey]; ok { - argoManagedResource.Namespace = namespaceRaw.(string) - } - argoManagedResources = append(argoManagedResources, argoManagedResource) } - } - } - return healthStatus, syncStatus, destinationServer, argoManagedResources -} - -func (impl *ArgoApplicationServiceImpl) GetServerConfigIfClusterIsNotAddedOnDevtron(resourceResp *k8s.ManifestResponse, restConfig *rest.Config, - clusterWithApplicationObject clusterRepository.Cluster, clusterServerUrlIdMap map[string]int) (*rest.Config, error) { - var destinationServer string - if resourceResp != nil && resourceResp.Manifest.Object != nil { - _, _, destinationServer, _ = - getHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(resourceResp.Manifest.Object) - } - appDeployedOnClusterId := 0 - if destinationServer == k8s.DefaultClusterUrl { - appDeployedOnClusterId = clusterWithApplicationObject.Id - } else if clusterIdFromMap, ok := clusterServerUrlIdMap[destinationServer]; ok { - appDeployedOnClusterId = clusterIdFromMap - } - var configOfClusterWhereAppIsDeployed *bean.ArgoClusterConfigObj - if appDeployedOnClusterId < 1 { - // cluster is not added on devtron, need to get server config from secret which argo-cd saved - coreV1Client, err := impl.k8sUtil.GetCoreV1ClientByRestConfig(restConfig) - secrets, err := coreV1Client.Secrets(bean.AllNamespaces).List(context.Background(), v1.ListOptions{ - LabelSelector: labels.SelectorFromSet(labels.Set{"argocd.argoproj.io/secret-type": "cluster"}).String(), - }) - if err != nil { - impl.logger.Errorw("error in getting resource list, secrets", "err", err) - return nil, err - } - for _, secret := range secrets.Items { - if secret.Data != nil { - if val, ok := secret.Data[bean.Server]; ok { - if string(val) == destinationServer { - if config, ok := secret.Data[bean.Config]; ok { - err = json.Unmarshal(config, &configOfClusterWhereAppIsDeployed) - if err != nil { - impl.logger.Errorw("error in unmarshaling", "err", err) - return nil, err - } - break - } - } + if rowData[k8sCommonBean.K8sClusterResourceNamespaceKey] != nil { + if namespaceStr, ok := rowData[k8sCommonBean.K8sClusterResourceNamespaceKey].(string); ok { + appListDto.Namespace = namespaceStr } } - } - if configOfClusterWhereAppIsDeployed != nil { - restConfig.Host = destinationServer - restConfig.TLSClientConfig = rest.TLSClientConfig{ - Insecure: configOfClusterWhereAppIsDeployed.TlsClientConfig.Insecure, - KeyFile: configOfClusterWhereAppIsDeployed.TlsClientConfig.KeyData, - CAFile: configOfClusterWhereAppIsDeployed.TlsClientConfig.CaData, - CertFile: configOfClusterWhereAppIsDeployed.TlsClientConfig.CertData, - } - restConfig.BearerToken = configOfClusterWhereAppIsDeployed.BearerToken + appLists[i] = appListDto } } - return restConfig, nil -} - -func (impl *ArgoApplicationServiceImpl) GetClusterConfigFromAllClusters(clusterId int) (*k8s.ClusterConfig, clusterRepository.Cluster, map[string]int, error) { - clusters, err := impl.clusterRepository.FindAllActive() - var clusterWithApplicationObject clusterRepository.Cluster - if err != nil { - impl.logger.Errorw("error in getting all active clusters", "err", err) - return nil, clusterWithApplicationObject, nil, err - } - clusterServerUrlIdMap := make(map[string]int, len(clusters)) - for _, cluster := range clusters { - if cluster.Id == clusterId { - clusterWithApplicationObject = cluster - } - clusterServerUrlIdMap[cluster.ServerUrl] = cluster.Id - } - if len(clusterWithApplicationObject.ErrorInConnecting) != 0 { - return nil, clusterWithApplicationObject, nil, fmt.Errorf("error in connecting to cluster") - } - clusterBean := cluster2.GetClusterBean(clusterWithApplicationObject) - clusterConfig := clusterBean.GetClusterConfig() - return clusterConfig, clusterWithApplicationObject, clusterServerUrlIdMap, err -} - -func (impl *ArgoApplicationServiceImpl) GetRestConfigForExternalArgo(ctx context.Context, clusterId int, externalArgoApplicationName string) (*rest.Config, error) { - clusterConfig, clusterWithApplicationObject, clusterServerUrlIdMap, err := impl.GetClusterConfigFromAllClusters(clusterId) - if err != nil { - impl.logger.Errorw("error in getting cluster config", "err", err, "clusterId", clusterId) - return nil, err - } - restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) - if err != nil { - impl.logger.Errorw("error in getting rest config", "err", err, "clusterId", clusterId) - return nil, err - } - resourceResp, err := impl.k8sUtil.GetResource(ctx, bean.DevtronCDNamespae, externalArgoApplicationName, bean.GvkForArgoApplication, restConfig) - if err != nil { - impl.logger.Errorw("not on external cluster", "err", err, "externalArgoApplicationName", externalArgoApplicationName) - return nil, err - } - restConfig, err = impl.GetServerConfigIfClusterIsNotAddedOnDevtron(resourceResp, restConfig, clusterWithApplicationObject, clusterServerUrlIdMap) - if err != nil { - impl.logger.Errorw("error in getting server config", "err", err, "cluster with application object", clusterWithApplicationObject) - return nil, err - } - return restConfig, nil + return appLists } func (impl *ArgoApplicationServiceImpl) HibernateArgoApplication(ctx context.Context, app *bean.ArgoAppIdentifier, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) { - _, clusterBean, _, err := impl.GetClusterConfigFromAllClusters(app.ClusterId) + _, clusterBean, _, err := impl.readService.GetClusterConfigFromAllClusters(app.ClusterId) if err != nil { impl.logger.Errorw("HibernateArgoApplication", "error in getting the cluster config", err, "clusterId", app.ClusterId, "appName", app.AppName) return nil, err } - conf := ConvertClusterBeanToGrpcConfig(clusterBean) + conf := helper.ConvertClusterBeanToGrpcConfig(clusterBean) req := service.HibernateReqAdaptor(hibernateRequest) req.ClusterConfig = conf @@ -476,12 +188,12 @@ func (impl *ArgoApplicationServiceImpl) HibernateArgoApplication(ctx context.Con } func (impl *ArgoApplicationServiceImpl) UnHibernateArgoApplication(ctx context.Context, app *bean.ArgoAppIdentifier, hibernateRequest *openapi.HibernateRequest) ([]*openapi.HibernateStatus, error) { - _, clusterBean, _, err := impl.GetClusterConfigFromAllClusters(app.ClusterId) + _, clusterBean, _, err := impl.readService.GetClusterConfigFromAllClusters(app.ClusterId) if err != nil { impl.logger.Errorw("HibernateArgoApplication", "error in getting the cluster config", err, "clusterId", app.ClusterId, "appName", app.AppName) return nil, err } - conf := ConvertClusterBeanToGrpcConfig(clusterBean) + conf := helper.ConvertClusterBeanToGrpcConfig(clusterBean) req := service.HibernateReqAdaptor(hibernateRequest) req.ClusterConfig = conf diff --git a/pkg/argoApplication/helper.go b/pkg/argoApplication/helper/helper.go similarity index 98% rename from pkg/argoApplication/helper.go rename to pkg/argoApplication/helper/helper.go index 97a80e2c4de..d931b078083 100644 --- a/pkg/argoApplication/helper.go +++ b/pkg/argoApplication/helper/helper.go @@ -1,4 +1,4 @@ -package argoApplication +package helper import ( "fmt" diff --git a/pkg/argoApplication/read/ArgoApplicationReadService.go b/pkg/argoApplication/read/ArgoApplicationReadService.go new file mode 100644 index 00000000000..4a815e99b44 --- /dev/null +++ b/pkg/argoApplication/read/ArgoApplicationReadService.go @@ -0,0 +1,402 @@ +package read + +import ( + "context" + "encoding/json" + "fmt" + "github.com/devtron-labs/common-lib/utils/k8s" + k8sCommonBean "github.com/devtron-labs/common-lib/utils/k8s/commonBean" + "github.com/devtron-labs/devtron/api/helm-app/gRPC" + "github.com/devtron-labs/devtron/api/helm-app/service" + "github.com/devtron-labs/devtron/pkg/argoApplication/bean" + cluster2 "github.com/devtron-labs/devtron/pkg/cluster" + clusterRepository "github.com/devtron-labs/devtron/pkg/cluster/repository" + clientErrors "github.com/devtron-labs/devtron/pkg/errors" + "github.com/devtron-labs/devtron/util/argo" + "go.uber.org/zap" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" +) + +type ArgoApplicationReadService interface { + GetRestConfigForExternalArgo(ctx context.Context, clusterId int, externalArgoApplicationName string) (*rest.Config, error) + GetClusterConfigFromAllClusters(clusterId int) (*k8s.ClusterConfig, clusterRepository.Cluster, map[string]int, error) + ValidateArgoResourceRequest(ctx context.Context, appIdentifier *bean.ArgoAppIdentifier, request *k8s.K8sRequestBean) (bool, error) + GetAppDetail(resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) +} + +type ArgoApplicationReadServiceImpl struct { + logger *zap.SugaredLogger + clusterRepository clusterRepository.ClusterRepository + k8sUtil *k8s.K8sServiceImpl + argoUserService argo.ArgoUserService + helmAppClient gRPC.HelmAppClient + helmAppService service.HelmAppService +} + +func NewArgoApplicationReadServiceImpl(logger *zap.SugaredLogger, + clusterRepository clusterRepository.ClusterRepository, + k8sUtil *k8s.K8sServiceImpl, + argoUserService argo.ArgoUserService, helmAppClient gRPC.HelmAppClient, + helmAppService service.HelmAppService) *ArgoApplicationReadServiceImpl { + return &ArgoApplicationReadServiceImpl{ + logger: logger, + clusterRepository: clusterRepository, + k8sUtil: k8sUtil, + argoUserService: argoUserService, + helmAppService: helmAppService, + helmAppClient: helmAppClient, + } + +} + +func (impl *ArgoApplicationReadServiceImpl) GetRestConfigForExternalArgo(ctx context.Context, clusterId int, externalArgoApplicationName string) (*rest.Config, error) { + clusterConfig, clusterWithApplicationObject, clusterServerUrlIdMap, err := impl.GetClusterConfigFromAllClusters(clusterId) + if err != nil { + impl.logger.Errorw("error in getting cluster config", "err", err, "clusterId", clusterId) + return nil, err + } + restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) + if err != nil { + impl.logger.Errorw("error in getting rest config", "err", err, "clusterId", clusterId) + return nil, err + } + resourceResp, err := impl.k8sUtil.GetResource(ctx, bean.DevtronCDNamespae, externalArgoApplicationName, bean.GvkForArgoApplication, restConfig) + if err != nil { + impl.logger.Errorw("not on external cluster", "err", err, "externalArgoApplicationName", externalArgoApplicationName) + return nil, err + } + restConfig, err = impl.GetServerConfigIfClusterIsNotAddedOnDevtron(resourceResp, restConfig, clusterWithApplicationObject, clusterServerUrlIdMap) + if err != nil { + impl.logger.Errorw("error in getting server config", "err", err, "cluster with application object", clusterWithApplicationObject) + return nil, err + } + return restConfig, nil +} + +func (impl *ArgoApplicationReadServiceImpl) GetServerConfigIfClusterIsNotAddedOnDevtron(resourceResp *k8s.ManifestResponse, restConfig *rest.Config, + clusterWithApplicationObject clusterRepository.Cluster, clusterServerUrlIdMap map[string]int) (*rest.Config, error) { + var destinationServer string + if resourceResp != nil && resourceResp.Manifest.Object != nil { + _, _, destinationServer, _ = + getHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(resourceResp.Manifest.Object) + } + appDeployedOnClusterId := 0 + if destinationServer == k8s.DefaultClusterUrl { + appDeployedOnClusterId = clusterWithApplicationObject.Id + } else if clusterIdFromMap, ok := clusterServerUrlIdMap[destinationServer]; ok { + appDeployedOnClusterId = clusterIdFromMap + } + var configOfClusterWhereAppIsDeployed *bean.ArgoClusterConfigObj + if appDeployedOnClusterId < 1 { + // cluster is not added on devtron, need to get server config from secret which argo-cd saved + coreV1Client, err := impl.k8sUtil.GetCoreV1ClientByRestConfig(restConfig) + secrets, err := coreV1Client.Secrets(bean.AllNamespaces).List(context.Background(), v1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{"argocd.argoproj.io/secret-type": "cluster"}).String(), + }) + if err != nil { + impl.logger.Errorw("error in getting resource list, secrets", "err", err) + return nil, err + } + for _, secret := range secrets.Items { + if secret.Data != nil { + if val, ok := secret.Data[bean.Server]; ok { + if string(val) == destinationServer { + if config, ok := secret.Data[bean.Config]; ok { + err = json.Unmarshal(config, &configOfClusterWhereAppIsDeployed) + if err != nil { + impl.logger.Errorw("error in unmarshaling", "err", err) + return nil, err + } + break + } + } + } + } + } + if configOfClusterWhereAppIsDeployed != nil { + restConfig, err = impl.k8sUtil.GetRestConfigByCluster(&k8s.ClusterConfig{ + Host: destinationServer, + BearerToken: configOfClusterWhereAppIsDeployed.BearerToken, + InsecureSkipTLSVerify: configOfClusterWhereAppIsDeployed.TlsClientConfig.Insecure, + KeyData: configOfClusterWhereAppIsDeployed.TlsClientConfig.KeyData, + CAData: configOfClusterWhereAppIsDeployed.TlsClientConfig.CaData, + CertData: configOfClusterWhereAppIsDeployed.TlsClientConfig.CertData, + }) + if err != nil { + impl.logger.Errorw("error in GetRestConfigByCluster, GetServerConfigIfClusterIsNotAddedOnDevtron", "err", err, "serverUrl", destinationServer) + return nil, err + } + } + } + return restConfig, nil +} + +func (impl *ArgoApplicationReadServiceImpl) GetClusterConfigFromAllClusters(clusterId int) (*k8s.ClusterConfig, clusterRepository.Cluster, map[string]int, error) { + clusters, err := impl.clusterRepository.FindAllActive() + var clusterWithApplicationObject clusterRepository.Cluster + if err != nil { + impl.logger.Errorw("error in getting all active clusters", "err", err) + return nil, clusterWithApplicationObject, nil, err + } + clusterServerUrlIdMap := make(map[string]int, len(clusters)) + for _, cluster := range clusters { + if cluster.Id == clusterId { + clusterWithApplicationObject = cluster + } + clusterServerUrlIdMap[cluster.ServerUrl] = cluster.Id + } + if len(clusterWithApplicationObject.ErrorInConnecting) != 0 { + return nil, clusterWithApplicationObject, nil, fmt.Errorf("error in connecting to cluster") + } + clusterBean := cluster2.GetClusterBean(clusterWithApplicationObject) + clusterConfig := clusterBean.GetClusterConfig() + return clusterConfig, clusterWithApplicationObject, clusterServerUrlIdMap, err +} + +func (impl *ArgoApplicationReadServiceImpl) GetAppDetail(resourceName, resourceNamespace string, clusterId int) (*bean.ArgoApplicationDetailDto, error) { + appDetail := &bean.ArgoApplicationDetailDto{ + ArgoApplicationListDto: &bean.ArgoApplicationListDto{ + Name: resourceName, + Namespace: resourceNamespace, + ClusterId: clusterId, + }, + } + clusters, err := impl.clusterRepository.FindAllActive() + if err != nil { + impl.logger.Errorw("error in getting all active clusters", "err", err) + return nil, err + } + var clusterWithApplicationObject clusterRepository.Cluster + clusterServerUrlIdMap := make(map[string]int, len(clusters)) + for _, cluster := range clusters { + if cluster.Id == clusterId { + clusterWithApplicationObject = cluster + } + clusterServerUrlIdMap[cluster.ServerUrl] = cluster.Id + } + if clusterWithApplicationObject.Id > 0 { + appDetail.ClusterName = clusterWithApplicationObject.ClusterName + } + if clusterWithApplicationObject.IsVirtualCluster { + return appDetail, nil + } else if len(clusterWithApplicationObject.ErrorInConnecting) != 0 { + return nil, fmt.Errorf("error in connecting to cluster") + } + clusterBean := cluster2.GetClusterBean(clusterWithApplicationObject) + clusterConfig := clusterBean.GetClusterConfig() + restConfig, err := impl.k8sUtil.GetRestConfigByCluster(clusterConfig) + if err != nil { + impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", clusterWithApplicationObject.Id) + return nil, err + } + resp, err := impl.k8sUtil.GetResource(context.Background(), resourceNamespace, resourceName, bean.GvkForArgoApplication, restConfig) + if err != nil { + impl.logger.Errorw("error in getting resource list", "err", err) + return nil, err + } + var destinationServer string + var argoManagedResources []*bean.ArgoManagedResource + if resp != nil && resp.Manifest.Object != nil { + appDetail.Manifest = resp.Manifest.Object + appDetail.HealthStatus, appDetail.SyncStatus, destinationServer, argoManagedResources = + getHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(resp.Manifest.Object) + } + appDeployedOnClusterId := 0 + if destinationServer == k8s.DefaultClusterUrl { + appDeployedOnClusterId = clusterWithApplicationObject.Id + } else if clusterIdFromMap, ok := clusterServerUrlIdMap[destinationServer]; ok { + appDeployedOnClusterId = clusterIdFromMap + } + var configOfClusterWhereAppIsDeployed bean.ArgoClusterConfigObj + if appDeployedOnClusterId < 1 { + // cluster is not added on devtron, need to get server config from secret which argo-cd saved + coreV1Client, err := impl.k8sUtil.GetCoreV1ClientByRestConfig(restConfig) + secrets, err := coreV1Client.Secrets(bean.AllNamespaces).List(context.Background(), v1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{"argocd.argoproj.io/secret-type": "cluster"}).String(), + }) + if err != nil { + impl.logger.Errorw("error in getting resource list, secrets", "err", err) + return nil, err + } + for _, secret := range secrets.Items { + if secret.Data != nil { + if val, ok := secret.Data["server"]; ok { + if string(val) == destinationServer { + if config, ok := secret.Data["config"]; ok { + err = json.Unmarshal(config, &configOfClusterWhereAppIsDeployed) + if err != nil { + impl.logger.Errorw("error in unmarshaling", "err", err) + return nil, err + } + break + } + } + } + } + } + } + resourceTreeResp, err := impl.getResourceTreeForExternalCluster(appDeployedOnClusterId, destinationServer, configOfClusterWhereAppIsDeployed, argoManagedResources) + if err != nil { + impl.logger.Errorw("error in getting resource tree response", "err", err) + return nil, err + } + appDetail.ResourceTree = resourceTreeResp + return appDetail, nil +} + +func (impl *ArgoApplicationReadServiceImpl) getResourceTreeForExternalCluster(clusterId int, destinationServer string, + configOfClusterWhereAppIsDeployed bean.ArgoClusterConfigObj, argoManagedResources []*bean.ArgoManagedResource) (*gRPC.ResourceTreeResponse, error) { + var resources []*gRPC.ExternalResourceDetail + for _, argoManagedResource := range argoManagedResources { + resources = append(resources, &gRPC.ExternalResourceDetail{ + Group: argoManagedResource.Group, + Kind: argoManagedResource.Kind, + Version: argoManagedResource.Version, + Name: argoManagedResource.Name, + Namespace: argoManagedResource.Namespace, + }) + } + var clusterConfigOfClusterWhereAppIsDeployed *gRPC.ClusterConfig + if len(configOfClusterWhereAppIsDeployed.BearerToken) > 0 { + clusterConfigOfClusterWhereAppIsDeployed = &gRPC.ClusterConfig{ + ApiServerUrl: destinationServer, + Token: configOfClusterWhereAppIsDeployed.BearerToken, + InsecureSkipTLSVerify: configOfClusterWhereAppIsDeployed.TlsClientConfig.Insecure, + KeyData: configOfClusterWhereAppIsDeployed.TlsClientConfig.KeyData, + CaData: configOfClusterWhereAppIsDeployed.TlsClientConfig.CaData, + CertData: configOfClusterWhereAppIsDeployed.TlsClientConfig.CertData, + } + } + resourceTreeResp, err := impl.helmAppService.GetResourceTreeForExternalResources(context.Background(), clusterId, clusterConfigOfClusterWhereAppIsDeployed, resources) + if err != nil { + impl.logger.Errorw("error in getting resource tree for external resources", "err", err) + return nil, err + } + return resourceTreeResp, nil +} + +func (impl *ArgoApplicationReadServiceImpl) ValidateArgoResourceRequest(ctx context.Context, appIdentifier *bean.ArgoAppIdentifier, request *k8s.K8sRequestBean) (bool, error) { + app, err := impl.GetAppDetail(appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) + if err != nil { + impl.logger.Errorw("error in getting app detail", "err", err, "appDetails", appIdentifier) + apiError := clientErrors.ConvertToApiError(err) + if apiError != nil { + err = apiError + } + return false, err + } + + valid := false + + for _, node := range app.ResourceTree.Nodes { + nodeDetails := k8s.ResourceIdentifier{ + Name: node.Name, + Namespace: node.Namespace, + GroupVersionKind: schema.GroupVersionKind{ + Group: node.Group, + Version: node.Version, + Kind: node.Kind, + }, + } + if nodeDetails == request.ResourceIdentifier { + valid = true + break + } + } + appDetail := &gRPC.AppDetail{ + ResourceTreeResponse: app.ResourceTree, + } + return validateContainerNameIfReqd(valid, request, appDetail), nil +} + +func validateContainerNameIfReqd(valid bool, request *k8s.K8sRequestBean, app *gRPC.AppDetail) bool { + if !valid { + requestContainerName := request.PodLogsRequest.ContainerName + podName := request.ResourceIdentifier.Name + for _, pod := range app.ResourceTreeResponse.PodMetadata { + if pod.Name == podName { + + // finding the container name in main Containers + for _, container := range pod.Containers { + if container == requestContainerName { + return true + } + } + + // finding the container name in init containers + for _, initContainer := range pod.InitContainers { + if initContainer == requestContainerName { + return true + } + } + + // finding the container name in ephemeral containers + for _, ephemeralContainer := range pod.EphemeralContainers { + if ephemeralContainer.Name == requestContainerName { + return true + } + } + + } + } + } + return valid +} + +func getHealthSyncStatusDestinationServerAndManagedResourcesForArgoK8sRawObject(obj map[string]interface{}) (string, + string, string, []*bean.ArgoManagedResource) { + var healthStatus, syncStatus, destinationServer string + argoManagedResources := make([]*bean.ArgoManagedResource, 0) + if specObjRaw, ok := obj[k8sCommonBean.Spec]; ok { + specObj := specObjRaw.(map[string]interface{}) + if destinationObjRaw, ok2 := specObj[bean.Destination]; ok2 { + destinationObj := destinationObjRaw.(map[string]interface{}) + if destinationServerIf, ok3 := destinationObj[bean.Server]; ok3 { + destinationServer = destinationServerIf.(string) + } + } + } + if statusObjRaw, ok := obj[k8sCommonBean.K8sClusterResourceStatusKey]; ok { + statusObj := statusObjRaw.(map[string]interface{}) + if healthObjRaw, ok2 := statusObj[k8sCommonBean.K8sClusterResourceHealthKey]; ok2 { + healthObj := healthObjRaw.(map[string]interface{}) + if healthStatusIf, ok3 := healthObj[k8sCommonBean.K8sClusterResourceStatusKey]; ok3 { + healthStatus = healthStatusIf.(string) + } + } + if syncObjRaw, ok2 := statusObj[k8sCommonBean.K8sClusterResourceSyncKey]; ok2 { + syncObj := syncObjRaw.(map[string]interface{}) + if syncStatusIf, ok3 := syncObj[k8sCommonBean.K8sClusterResourceStatusKey]; ok3 { + syncStatus = syncStatusIf.(string) + } + } + if resourceObjsRaw, ok2 := statusObj[k8sCommonBean.K8sClusterResourceResourcesKey]; ok2 { + resourceObjs := resourceObjsRaw.([]interface{}) + argoManagedResources = make([]*bean.ArgoManagedResource, 0, len(resourceObjs)) + for _, resourceObjRaw := range resourceObjs { + argoManagedResource := &bean.ArgoManagedResource{} + resourceObj := resourceObjRaw.(map[string]interface{}) + if groupRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceGroupKey]; ok { + argoManagedResource.Group = groupRaw.(string) + } + if kindRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceKindKey]; ok { + argoManagedResource.Kind = kindRaw.(string) + } + if versionRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceVersionKey]; ok { + argoManagedResource.Version = versionRaw.(string) + } + if nameRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceMetadataNameKey]; ok { + argoManagedResource.Name = nameRaw.(string) + } + if namespaceRaw, ok := resourceObj[k8sCommonBean.K8sClusterResourceNamespaceKey]; ok { + argoManagedResource.Namespace = namespaceRaw.(string) + } + argoManagedResources = append(argoManagedResources, argoManagedResource) + } + } + } + return healthStatus, syncStatus, destinationServer, argoManagedResources +} diff --git a/pkg/eventProcessor/bean/workflowEventBean.go b/pkg/eventProcessor/bean/workflowEventBean.go index bdcac00f6c9..755defdd3ee 100644 --- a/pkg/eventProcessor/bean/workflowEventBean.go +++ b/pkg/eventProcessor/bean/workflowEventBean.go @@ -18,7 +18,8 @@ package bean import ( "context" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" + "encoding/json" + "github.com/devtron-labs/common-lib/utils/registry" "github.com/devtron-labs/devtron/api/bean" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" bean3 "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -62,11 +63,6 @@ func (r *UserDeploymentRequest) WithPipelineOverrideId(id int) *UserDeploymentRe return r } -type ImageDetailsFromCR struct { - ImageDetails []types.ImageDetail `json:"imageDetails"` - Region string `json:"region"` -} - type CiCompleteEvent struct { CiProjectDetails []bean3.CiProjectDetails `json:"ciProjectDetails"` DockerImage string `json:"dockerImage" validate:"required,image-validator"` @@ -81,9 +77,30 @@ type CiCompleteEvent struct { AppName string `json:"appName"` IsArtifactUploaded bool `json:"isArtifactUploaded"` FailureReason string `json:"failureReason"` - ImageDetailsFromCR *ImageDetailsFromCR `json:"imageDetailsFromCR"` + ImageDetailsFromCR json.RawMessage `json:"imageDetailsFromCR"` PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` PluginArtifactStage string `json:"pluginArtifactStage"` + pluginImageDetails *registry.ImageDetailsFromCR +} + +func (c *CiCompleteEvent) GetPluginImageDetails() *registry.ImageDetailsFromCR { + if c == nil { + return nil + } + return c.pluginImageDetails +} + +func (c *CiCompleteEvent) SetImageDetailsFromCR() error { + if c.ImageDetailsFromCR == nil { + return nil + } + var imageDetailsFromCR *registry.ImageDetailsFromCR + err := json.Unmarshal(c.ImageDetailsFromCR, &imageDetailsFromCR) + if err != nil { + return err + } + c.pluginImageDetails = imageDetailsFromCR + return nil } type DevtronAppReleaseContextType struct { diff --git a/pkg/eventProcessor/in/WorkflowEventProcessorService.go b/pkg/eventProcessor/in/WorkflowEventProcessorService.go index 2ac2f3a9c2f..d01fb1bfe60 100644 --- a/pkg/eventProcessor/in/WorkflowEventProcessorService.go +++ b/pkg/eventProcessor/in/WorkflowEventProcessorService.go @@ -22,9 +22,9 @@ import ( "errors" "fmt" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" pubsub "github.com/devtron-labs/common-lib/pubsub-lib" "github.com/devtron-labs/common-lib/pubsub-lib/model" + "github.com/devtron-labs/common-lib/utils/registry" apiBean "github.com/devtron-labs/devtron/api/bean" client "github.com/devtron-labs/devtron/client/events" "github.com/devtron-labs/devtron/internal/sql/models" @@ -474,12 +474,25 @@ func (impl *WorkflowEventProcessorImpl) SubscribeCDWorkflowStatusUpdate() error return nil } +func (impl *WorkflowEventProcessorImpl) extractCiCompleteEventFrom(msg *model.PubSubMsg) (bean.CiCompleteEvent, error) { + ciCompleteEvent := bean.CiCompleteEvent{} + err := json.Unmarshal([]byte(msg.Data), &ciCompleteEvent) + if err != nil { + impl.logger.Error("error while unmarshalling json data", "error", err) + return ciCompleteEvent, err + } + err = ciCompleteEvent.SetImageDetailsFromCR() + if err != nil { + impl.logger.Error("error in unmarshalling imageDetailsFromCr results", "error", err) + return ciCompleteEvent, err + } + return ciCompleteEvent, nil +} + func (impl *WorkflowEventProcessorImpl) SubscribeCICompleteEvent() error { callback := func(msg *model.PubSubMsg) { - ciCompleteEvent := bean.CiCompleteEvent{} - err := json.Unmarshal([]byte(msg.Data), &ciCompleteEvent) + ciCompleteEvent, err := impl.extractCiCompleteEventFrom(msg) if err != nil { - impl.logger.Error("error while unmarshalling json data", "error", err) return } impl.logger.Debugw("ci complete event for ci", "ciPipelineId", ciCompleteEvent.PipelineId) @@ -501,34 +514,33 @@ func (impl *WorkflowEventProcessorImpl) SubscribeCICompleteEvent() error { ciCompleteEvent.PipelineId, "request: ", req, "error: ", err) return } - } else if ciCompleteEvent.ImageDetailsFromCR != nil { - if len(ciCompleteEvent.ImageDetailsFromCR.ImageDetails) > 0 { - imageDetails := globalUtil.GetReverseSortedImageDetails(ciCompleteEvent.ImageDetailsFromCR.ImageDetails) + } else if ciCompleteEvent.GetPluginImageDetails() != nil { + if len(ciCompleteEvent.GetPluginImageDetails().ImageDetails) > 0 { + imageDetails := registry.SortGenericImageDetailByCreatedOn(ciCompleteEvent.GetPluginImageDetails().ImageDetails, registry.Ascending) digestWorkflowMap, err := impl.webhookService.HandleMultipleImagesFromEvent(imageDetails, *ciCompleteEvent.WorkflowId) if err != nil { impl.logger.Errorw("error in getting digest workflow map", "err", err, "workflowId", ciCompleteEvent.WorkflowId) return } for _, detail := range imageDetails { - if detail.ImageTags == nil { + if detail == nil || len(detail.Image) == 0 { continue } - request, err := impl.BuildCIArtifactRequestForImageFromCR(detail, ciCompleteEvent.ImageDetailsFromCR.Region, ciCompleteEvent, digestWorkflowMap[*detail.ImageDigest].Id) + request, err := impl.buildCIArtifactRequestForImageFromCR(detail, ciCompleteEvent, digestWorkflowMap[detail.GetGenericImageDetailIdentifier()].Id) if err != nil { impl.logger.Error("Error while creating request for pipelineID", "pipelineId", ciCompleteEvent.PipelineId, "err", err) return } - resp, err := impl.ValidateAndHandleCiSuccessEvent(triggerContext, ciCompleteEvent.PipelineId, request, detail.ImagePushedAt) + resp, err := impl.ValidateAndHandleCiSuccessEvent(triggerContext, ciCompleteEvent.PipelineId, request, detail.LastUpdatedOn) if err != nil { return } impl.logger.Debug("response of handle ci success event for multiple images from plugin", "resp", resp) } } - } else { globalUtil.TriggerCIMetrics(ciCompleteEvent.Metrics, impl.globalEnvVariables.ExposeCiMetrics, ciCompleteEvent.PipelineName, ciCompleteEvent.AppName) - resp, err := impl.ValidateAndHandleCiSuccessEvent(triggerContext, ciCompleteEvent.PipelineId, req, &time.Time{}) + resp, err := impl.ValidateAndHandleCiSuccessEvent(triggerContext, ciCompleteEvent.PipelineId, req, time.Time{}) if err != nil { return } @@ -555,7 +567,7 @@ func (impl *WorkflowEventProcessorImpl) SubscribeCICompleteEvent() error { return nil } -func (impl *WorkflowEventProcessorImpl) ValidateAndHandleCiSuccessEvent(triggerContext triggerBean.TriggerContext, ciPipelineId int, request *wrokflowDagBean.CiArtifactWebhookRequest, imagePushedAt *time.Time) (int, error) { +func (impl *WorkflowEventProcessorImpl) ValidateAndHandleCiSuccessEvent(triggerContext triggerBean.TriggerContext, ciPipelineId int, request *wrokflowDagBean.CiArtifactWebhookRequest, imagePushedAt time.Time) (int, error) { validationErr := impl.validator.Struct(request) if validationErr != nil { impl.logger.Errorw("validation err, HandleCiSuccessEvent", "err", validationErr, "payload", request) @@ -643,13 +655,13 @@ func (impl *WorkflowEventProcessorImpl) BuildCiArtifactRequest(event bean.CiComp return request, nil } -func (impl *WorkflowEventProcessorImpl) BuildCIArtifactRequestForImageFromCR(imageDetails types.ImageDetail, region string, event bean.CiCompleteEvent, workflowId int) (*wrokflowDagBean.CiArtifactWebhookRequest, error) { +func (impl *WorkflowEventProcessorImpl) buildCIArtifactRequestForImageFromCR(imageDetails *registry.GenericImageDetail, event bean.CiCompleteEvent, workflowId int) (*wrokflowDagBean.CiArtifactWebhookRequest, error) { if event.TriggeredBy == 0 { event.TriggeredBy = 1 // system triggered event } request := &wrokflowDagBean.CiArtifactWebhookRequest{ - Image: globalUtil.ExtractEcrImage(*imageDetails.RegistryId, region, *imageDetails.RepositoryName, imageDetails.ImageTags[0]), - ImageDigest: *imageDetails.ImageDigest, + Image: imageDetails.Image, + ImageDigest: imageDetails.ImageDigest, DataSource: event.DataSource, PipelineName: event.PipelineName, UserId: event.TriggeredBy, diff --git a/pkg/k8s/K8sCommonService.go b/pkg/k8s/K8sCommonService.go index 09717b059b6..6c8ecc2fb3c 100644 --- a/pkg/k8s/K8sCommonService.go +++ b/pkg/k8s/K8sCommonService.go @@ -25,7 +25,7 @@ import ( "github.com/devtron-labs/devtron/api/bean" helmBean "github.com/devtron-labs/devtron/api/helm-app/service/bean" internalUtil "github.com/devtron-labs/devtron/internal/util" - "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" "github.com/devtron-labs/devtron/pkg/cluster" bean3 "github.com/devtron-labs/devtron/pkg/k8s/application/bean" "github.com/devtron-labs/devtron/util" @@ -71,7 +71,7 @@ type K8sCommonServiceImpl struct { K8sUtil *k8s.K8sServiceImpl clusterService cluster.ClusterService K8sApplicationServiceConfig *K8sApplicationServiceConfig - argoApplicationService argoApplication.ArgoApplicationService + argoApplicationReadService read.ArgoApplicationReadService } type K8sApplicationServiceConfig struct { BatchSize int `env:"BATCH_SIZE" envDefault:"5"` @@ -80,7 +80,7 @@ type K8sApplicationServiceConfig struct { func NewK8sCommonServiceImpl(Logger *zap.SugaredLogger, k8sUtils *k8s.K8sServiceImpl, clusterService cluster.ClusterService, - argoApplicationService argoApplication.ArgoApplicationService) *K8sCommonServiceImpl { + argoApplicationReadService read.ArgoApplicationReadService) *K8sCommonServiceImpl { cfg := &K8sApplicationServiceConfig{} err := env.Parse(cfg) if err != nil { @@ -91,7 +91,7 @@ func NewK8sCommonServiceImpl(Logger *zap.SugaredLogger, k8sUtils *k8s.K8sService K8sUtil: k8sUtils, clusterService: clusterService, K8sApplicationServiceConfig: cfg, - argoApplicationService: argoApplicationService, + argoApplicationReadService: argoApplicationReadService, } } @@ -190,7 +190,7 @@ func (impl *K8sCommonServiceImpl) GetRestConfigOfCluster(ctx context.Context, re //getting rest config by clusterId clusterId := request.ClusterId if len(request.ExternalArgoApplicationName) > 0 { - restConfig, err := impl.argoApplicationService.GetRestConfigForExternalArgo(ctx, clusterId, request.ExternalArgoApplicationName) + restConfig, err := impl.argoApplicationReadService.GetRestConfigForExternalArgo(ctx, clusterId, request.ExternalArgoApplicationName) if err != nil { impl.logger.Errorw("error in getting rest config", "err", err, "clusterId", clusterId, "externalArgoApplicationName", request.ExternalArgoApplicationName) return nil, err @@ -457,7 +457,7 @@ func (impl *K8sCommonServiceImpl) GetCoreClientByClusterId(clusterId int) (*kube } func (impl *K8sCommonServiceImpl) GetCoreClientByClusterIdForExternalArgoApps(req *cluster.EphemeralContainerRequest) (*kubernetes.Clientset, *clientV1.CoreV1Client, error) { - restConfig, err := impl.argoApplicationService.GetRestConfigForExternalArgo(context.Background(), req.ClusterId, req.ExternalArgoApplicationName) + restConfig, err := impl.argoApplicationReadService.GetRestConfigForExternalArgo(context.Background(), req.ClusterId, req.ExternalArgoApplicationName) if err != nil { impl.logger.Errorw("error in getting rest config", "err", err, "clusterId", req.ClusterId, "externalArgoApplicationName", req.ExternalArgoApplicationName) } diff --git a/pkg/k8s/application/k8sApplicationService.go b/pkg/k8s/application/k8sApplicationService.go index 39b1902b17b..af5b7e33f27 100644 --- a/pkg/k8s/application/k8sApplicationService.go +++ b/pkg/k8s/application/k8sApplicationService.go @@ -25,7 +25,7 @@ import ( "github.com/devtron-labs/devtron/api/helm-app/gRPC" client "github.com/devtron-labs/devtron/api/helm-app/service" "github.com/devtron-labs/devtron/api/helm-app/service/bean" - bean4 "github.com/devtron-labs/devtron/pkg/argoApplication/bean" + "github.com/devtron-labs/devtron/pkg/argoApplication/helper" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" clientErrors "github.com/devtron-labs/devtron/pkg/errors" "github.com/devtron-labs/devtron/pkg/fluxApplication" @@ -45,7 +45,6 @@ import ( yamlUtil "github.com/devtron-labs/common-lib/utils/yaml" "github.com/devtron-labs/devtron/api/connector" "github.com/devtron-labs/devtron/api/helm-app/openapiClient" - "github.com/devtron-labs/devtron/pkg/argoApplication" "github.com/devtron-labs/devtron/pkg/cluster" "github.com/devtron-labs/devtron/pkg/cluster/repository" "github.com/devtron-labs/devtron/pkg/k8s" @@ -81,6 +80,8 @@ type K8sApplicationService interface { GetAllApiResourceGVKWithoutAuthorization(ctx context.Context, clusterId int) (*k8s2.GetAllApiResourcesResponse, error) GetAllApiResources(ctx context.Context, clusterId int, isSuperAdmin bool, userId int32) (*k8s2.GetAllApiResourcesResponse, error) GetResourceList(ctx context.Context, token string, request *k8s.ResourceRequestBean, validateResourceAccess func(token string, clusterName string, request k8s.ResourceRequestBean, casbinAction string) bool) (*k8s2.ClusterResourceListMap, error) + GetResourceListWithRestConfig(ctx context.Context, token string, request *k8s.ResourceRequestBean, validateResourceAccess func(token string, clusterName string, request k8s.ResourceRequestBean, casbinAction string) bool, + restConfig *rest.Config, clusterName string) (*k8s2.ClusterResourceListMap, error) ApplyResources(ctx context.Context, token string, request *k8s2.ApplyResourcesRequest, resourceRbacHandler func(token string, clusterName string, request k8s.ResourceRequestBean, casbinAction string) bool) ([]*k8s2.ApplyResourcesResponse, error) CreatePodEphemeralContainers(req *cluster.EphemeralContainerRequest) error TerminatePodEphemeralContainer(req cluster.EphemeralContainerRequest) (bool, error) @@ -90,7 +91,6 @@ type K8sApplicationService interface { DeleteResourceWithAudit(ctx context.Context, request *k8s.ResourceRequestBean, userId int32) (*k8s2.ManifestResponse, error) GetUrlsByBatchForIngress(ctx context.Context, resp []k8s.BatchResourceResponse) []interface{} ValidateFluxResourceRequest(ctx context.Context, appIdentifier *bean2.FluxAppIdentifier, request *k8s2.K8sRequestBean) (bool, error) - ValidateArgoResourceRequest(ctx context.Context, appIdentifier *bean4.ArgoAppIdentifier, request *k8s2.K8sRequestBean) (bool, error) } type K8sApplicationServiceImpl struct { @@ -106,15 +106,15 @@ type K8sApplicationServiceImpl struct { ephemeralContainerService cluster.EphemeralContainerService ephemeralContainerRepository repository.EphemeralContainersRepository ephemeralContainerConfig *EphemeralContainerConfig - argoApplicationService argoApplication.ArgoApplicationService - fluxApplicationService fluxApplication.FluxApplicationService + //argoApplicationService argoApplication.ArgoApplicationService + fluxApplicationService fluxApplication.FluxApplicationService } func NewK8sApplicationServiceImpl(Logger *zap.SugaredLogger, clusterService cluster.ClusterService, pump connector.Pump, helmAppService client.HelmAppService, K8sUtil *k8s2.K8sServiceImpl, aCDAuthConfig *util3.ACDAuthConfig, K8sResourceHistoryService kubernetesResourceAuditLogs.K8sResourceHistoryService, k8sCommonService k8s.K8sCommonService, terminalSession terminal.TerminalSessionHandler, ephemeralContainerService cluster.EphemeralContainerService, ephemeralContainerRepository repository.EphemeralContainersRepository, - argoApplicationService argoApplication.ArgoApplicationService, fluxApplicationService fluxApplication.FluxApplicationService) (*K8sApplicationServiceImpl, error) { + fluxApplicationService fluxApplication.FluxApplicationService) (*K8sApplicationServiceImpl, error) { ephemeralContainerConfig := &EphemeralContainerConfig{} err := env.Parse(ephemeralContainerConfig) if err != nil { @@ -134,8 +134,8 @@ func NewK8sApplicationServiceImpl(Logger *zap.SugaredLogger, clusterService clus ephemeralContainerService: ephemeralContainerService, ephemeralContainerRepository: ephemeralContainerRepository, ephemeralContainerConfig: ephemeralContainerConfig, - argoApplicationService: argoApplicationService, - fluxApplicationService: fluxApplicationService, + //argoApplicationService: argoApplicationService, + fluxApplicationService: fluxApplicationService, }, nil } @@ -249,7 +249,7 @@ func (impl *K8sApplicationServiceImpl) ValidatePodLogsRequestQuery(r *http.Reque request.DeploymentType = deploymentType // Validate App Id if request.AppType == bean3.ArgoAppType { - appIdentifier, err := argoApplication.DecodeExternalArgoAppId(appId) + appIdentifier, err := helper.DecodeExternalArgoAppId(appId) if err != nil { impl.logger.Errorw(bean3.AppIdDecodingError, "err", err, "appId", appId) return nil, err @@ -356,7 +356,7 @@ func (impl *K8sApplicationServiceImpl) ValidateTerminalRequestQuery(r *http.Requ request.ClusterId = fluxAppIdentifier.ClusterId } else if appType == bean3.ArgoAppType { - appIdentifier, err := argoApplication.DecodeExternalArgoAppId(request.ApplicationId) + appIdentifier, err := helper.DecodeExternalArgoAppId(request.ApplicationId) if err != nil { impl.logger.Errorw(bean3.InvalidAppId, "err", err, "appId", request.ApplicationId) return nil, nil, err @@ -364,7 +364,7 @@ func (impl *K8sApplicationServiceImpl) ValidateTerminalRequestQuery(r *http.Requ resourceRequestBean.ExternalArgoApplicationName = appIdentifier.AppName resourceRequestBean.ClusterId = appIdentifier.ClusterId request.ClusterId = appIdentifier.ClusterId - //request.ExternalArgoApplicationName = appIdentifier.AppName + request.ExternalArgoApplicationName = appIdentifier.AppName } } else { // Validate Cluster Id @@ -522,39 +522,6 @@ func (impl *K8sApplicationServiceImpl) validateResourceRequest(ctx context.Conte } return impl.validateContainerNameIfReqd(valid, request, app), nil } -func (impl *K8sApplicationServiceImpl) ValidateArgoResourceRequest(ctx context.Context, appIdentifier *bean4.ArgoAppIdentifier, request *k8s2.K8sRequestBean) (bool, error) { - app, err := impl.argoApplicationService.GetAppDetail(appIdentifier.AppName, appIdentifier.Namespace, appIdentifier.ClusterId) - if err != nil { - impl.logger.Errorw("error in getting app detail", "err", err, "appDetails", appIdentifier) - apiError := clientErrors.ConvertToApiError(err) - if apiError != nil { - err = apiError - } - return false, err - } - - valid := false - - for _, node := range app.ResourceTree.Nodes { - nodeDetails := k8s2.ResourceIdentifier{ - Name: node.Name, - Namespace: node.Namespace, - GroupVersionKind: schema.GroupVersionKind{ - Group: node.Group, - Version: node.Version, - Kind: node.Kind, - }, - } - if nodeDetails == request.ResourceIdentifier { - valid = true - break - } - } - appDetail := &gRPC.AppDetail{ - ResourceTreeResponse: app.ResourceTree, - } - return impl.validateContainerNameIfReqd(valid, request, appDetail), nil -} func (impl *K8sApplicationServiceImpl) ValidateFluxResourceRequest(ctx context.Context, appIdentifier *bean2.FluxAppIdentifier, request *k8s2.K8sRequestBean) (bool, error) { app, err := impl.fluxApplicationService.GetFluxAppDetail(ctx, appIdentifier) @@ -766,6 +733,13 @@ func (impl *K8sApplicationServiceImpl) GetResourceList(ctx context.Context, toke impl.logger.Errorw("error in getting rest config by cluster Id", "err", err, "clusterId", request.ClusterId) return resourceList, err } + return impl.GetResourceListWithRestConfig(ctx, token, request, validateResourceAccess, restConfig, clusterBean.ClusterName) +} + +func (impl *K8sApplicationServiceImpl) GetResourceListWithRestConfig(ctx context.Context, token string, request *k8s.ResourceRequestBean, + validateResourceAccess func(token string, clusterName string, request k8s.ResourceRequestBean, casbinAction string) bool, + restConfig *rest.Config, clusterName string) (*k8s2.ClusterResourceListMap, error) { + resourceList := &k8s2.ClusterResourceListMap{} k8sRequest := request.K8sRequest // store the copy of requested resource identifier resourceIdentifierCloned := k8sRequest.ResourceIdentifier @@ -785,22 +759,13 @@ func (impl *K8sApplicationServiceImpl) GetResourceList(ctx context.Context, toke resourceIdentifier.GroupVersionKind = schema.GroupVersionKind{Group: group, Kind: kind} } k8sRequest.ResourceIdentifier = resourceIdentifier - return validateResourceAccess(token, clusterBean.ClusterName, *request, casbin.ActionGet) + return validateResourceAccess(token, clusterName, *request, casbin.ActionGet) } resourceList, err = impl.K8sUtil.BuildK8sObjectListTableData(&resp.Resources, namespaced, request.K8sRequest.ResourceIdentifier.GroupVersionKind, false, checkForResourceCallback) if err != nil { impl.logger.Errorw("error on parsing for k8s resource", "err", err) return resourceList, err } - // Not used in FE side - - //k8sServerVersion, err := impl.k8sCommonService.GetK8sServerVersion(clusterId) - //if err != nil { - // impl.logger.Errorw("error in getting k8s server version", "clusterId", clusterId, "err", err) - // // return nil, err - //} else { - // resourceList.ServerVersion = k8sServerVersion.String() - //} return resourceList, nil } diff --git a/pkg/pipeline/AppDeploymentTypeChangeManager.go b/pkg/pipeline/AppDeploymentTypeChangeManager.go index a6130a23576..714bc8720fa 100644 --- a/pkg/pipeline/AppDeploymentTypeChangeManager.go +++ b/pkg/pipeline/AppDeploymentTypeChangeManager.go @@ -744,7 +744,7 @@ func (impl *AppDeploymentTypeChangeManagerImpl) fetchDeletedApp(ctx context.Cont impl.logger.Errorw("error in getting application detail", "err", err, "deploymentAppName", pipeline.DeploymentAppName) } - if err != nil && checkAppReleaseNotExist(err) { + if err != nil && CheckAppReleaseNotExist(err) { successfulPipelines = impl.appendToDeploymentChangeStatusList( successfulPipelines, pipeline, diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index c9607825858..eff00206933 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -21,6 +21,8 @@ import ( "errors" "fmt" "github.com/caarlos0/env" + "github.com/devtron-labs/common-lib/utils" + bean3 "github.com/devtron-labs/common-lib/utils/bean" "github.com/devtron-labs/devtron/pkg/infraConfig" "github.com/devtron-labs/devtron/pkg/pipeline/adapter" "github.com/devtron-labs/devtron/pkg/pipeline/bean/CiPipeline" @@ -78,6 +80,7 @@ type CiServiceImpl struct { eventClient client.EventClient eventFactory client.EventFactory ciPipelineRepository pipelineConfig.CiPipelineRepository + ciArtifactRepository repository5.CiArtifactRepository pipelineStageService PipelineStageService userService user.UserService ciTemplateService CiTemplateService @@ -99,6 +102,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService ciWorkflowRepository pipelineConfig.CiWorkflowRepository, eventClient client.EventClient, eventFactory client.EventFactory, ciPipelineRepository pipelineConfig.CiPipelineRepository, + ciArtifactRepository repository5.CiArtifactRepository, pipelineStageService PipelineStageService, userService user.UserService, ciTemplateService CiTemplateService, appCrudOperationService app.AppCrudOperationService, envRepository repository1.EnvironmentRepository, appRepository appRepository.AppRepository, @@ -122,6 +126,7 @@ func NewCiServiceImpl(Logger *zap.SugaredLogger, workflowService WorkflowService eventClient: eventClient, eventFactory: eventFactory, ciPipelineRepository: ciPipelineRepository, + ciArtifactRepository: ciArtifactRepository, pipelineStageService: pipelineStageService, userService: userService, ciTemplateService: ciTemplateService, @@ -158,15 +163,65 @@ func (impl *CiServiceImpl) GetCiMaterials(pipelineId int, ciMaterials []*pipelin } } -func (impl *CiServiceImpl) handleRuntimeParamsValidations(trigger types.Trigger, ciMaterials []*pipelineConfig.CiPipelineMaterial) error { +func (impl *CiServiceImpl) handleRuntimeParamsValidations(trigger types.Trigger, ciMaterials []*pipelineConfig.CiPipelineMaterial, workflowRequest *types.WorkflowRequest) error { + // externalCi artifact is meant only for CI_JOB + if trigger.PipelineType != string(CiPipeline.CI_JOB) { + return nil + } + // checking if user has given run time parameters for externalCiArtifact, if given then sending git material to Ci-Runner externalCiArtifact, exists := trigger.ExtraEnvironmentVariables[CiPipeline.ExtraEnvVarExternalCiArtifactKey] // validate externalCiArtifact as docker image if exists { + externalCiArtifact = strings.TrimSpace(externalCiArtifact) if !strings.Contains(externalCiArtifact, ":") { - impl.Logger.Errorw("validation error", "externalCiArtifact", externalCiArtifact) - return fmt.Errorf("invalid image name given in externalCiArtifact") + if utils.IsValidDockerTagName(externalCiArtifact) { + fullImageUrl, err := utils.BuildDockerImagePath(bean3.DockerRegistryInfo{ + DockerImageTag: externalCiArtifact, + DockerRegistryId: workflowRequest.DockerRegistryId, + DockerRegistryType: workflowRequest.DockerRegistryType, + DockerRegistryURL: workflowRequest.DockerRegistryURL, + DockerRepository: workflowRequest.DockerRepository, + }) + if err != nil { + impl.Logger.Errorw("Error in building docker image", "err", err) + return err + } + externalCiArtifact = fullImageUrl + } else { + impl.Logger.Errorw("validation error", "externalCiArtifact", externalCiArtifact) + return fmt.Errorf("invalid image name or url given in externalCiArtifact") + } + + } + + trigger.ExtraEnvironmentVariables[CiPipeline.ExtraEnvVarExternalCiArtifactKey] = externalCiArtifact + + var artifactExists bool + var err error + if trigger.ExtraEnvironmentVariables[CiPipeline.ExtraEnvVarImageDigestKey] == "" { + artifactExists, err = impl.ciArtifactRepository.IfArtifactExistByImage(externalCiArtifact, trigger.PipelineId) + if err != nil { + impl.Logger.Errorw("error in fetching ci artifact", "err", err) + return err + } + if artifactExists { + impl.Logger.Errorw("ci artifact already exists with same image name", "artifact", externalCiArtifact) + return fmt.Errorf("ci artifact already exists with same image name") + } + } else { + artifactExists, err = impl.ciArtifactRepository.IfArtifactExistByImageDigest(trigger.ExtraEnvironmentVariables[CiPipeline.ExtraEnvVarImageDigestKey], externalCiArtifact, trigger.PipelineId) + if err != nil { + impl.Logger.Errorw("error in fetching ci artifact", "err", err) + return err + } + if artifactExists { + impl.Logger.Errorw("ci artifact already exists with same digest", "artifact", externalCiArtifact) + return fmt.Errorf("ci artifact already exists with same digest") + } + } + } if trigger.PipelineType == string(CiPipeline.CI_JOB) && len(ciMaterials) != 0 && !exists && externalCiArtifact == "" { ciMaterials[0].GitMaterial = nil @@ -181,10 +236,7 @@ func (impl *CiServiceImpl) TriggerCiPipeline(trigger types.Trigger) (int, error) if err != nil { return 0, err } - err = impl.handleRuntimeParamsValidations(trigger, ciMaterials) - if err != nil { - return 0, err - } + ciPipelineScripts, err := impl.ciPipelineRepository.FindCiScriptsByCiPipelineId(trigger.PipelineId) if err != nil && !util.IsErrNoRows(err) { return 0, err @@ -265,6 +317,17 @@ func (impl *CiServiceImpl) TriggerCiPipeline(trigger types.Trigger) (int, error) impl.Logger.Errorw("make workflow req", "err", err) return 0, err } + err = impl.handleRuntimeParamsValidations(trigger, ciMaterials, workflowRequest) + if err != nil { + savedCiWf.Status = pipelineConfig.WorkflowAborted + savedCiWf.Message = err.Error() + err1 := impl.ciWorkflowRepository.UpdateWorkFlow(savedCiWf) + if err1 != nil { + impl.Logger.Errorw("could not save workflow, after failing due to conflicting image tag") + } + return 0, err + } + workflowRequest.Scope = scope workflowRequest.BuildxCacheModeMin = impl.buildxCacheFlags.BuildxCacheModeMin workflowRequest.AsyncBuildxCacheExport = impl.buildxCacheFlags.AsyncBuildxCacheExport diff --git a/pkg/pipeline/PipelineBuilder.go b/pkg/pipeline/PipelineBuilder.go index 7fdc24e7029..d47772e3c62 100644 --- a/pkg/pipeline/PipelineBuilder.go +++ b/pkg/pipeline/PipelineBuilder.go @@ -284,7 +284,7 @@ type PipelineStrategy struct { Default bool `json:"default"` } -func checkAppReleaseNotExist(err error) bool { +func CheckAppReleaseNotExist(err error) bool { // RELEASE_NOT_EXIST check for helm App and NOT_FOUND check for argo app return strings.Contains(err.Error(), bean.NOT_FOUND) || strings.Contains(err.Error(), bean.RELEASE_NOT_EXIST) } diff --git a/pkg/pipeline/WebhookService.go b/pkg/pipeline/WebhookService.go index 1a461a9435f..5b4b3446f61 100644 --- a/pkg/pipeline/WebhookService.go +++ b/pkg/pipeline/WebhookService.go @@ -21,8 +21,8 @@ import ( "encoding/json" "fmt" "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" pubsub "github.com/devtron-labs/common-lib/pubsub-lib" + "github.com/devtron-labs/common-lib/utils/registry" "github.com/devtron-labs/devtron/internal/sql/repository" "github.com/devtron-labs/devtron/internal/sql/repository/pipelineConfig" "github.com/devtron-labs/devtron/pkg/pipeline/bean" @@ -50,14 +50,10 @@ type ExternalCiWebhookDto struct { AppName string `json:"appName"` IsArtifactUploaded bool `json:"isArtifactUploaded"` FailureReason string `json:"failureReason"` - ImageDetailsFromCR *ImageDetailsFromCR `json:"imageDetailsFromCR"` + ImageDetailsFromCR json.RawMessage `json:"imageDetailsFromCR"` PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` PluginArtifactStage string `json:"pluginArtifactStage"` } -type ImageDetailsFromCR struct { - ImageDetails []types.ImageDetail `json:"imageDetails"` - Region string `json:"region"` -} type CiArtifactWebhookRequest struct { Image string `json:"image" validate:"required"` @@ -75,7 +71,7 @@ type CiArtifactWebhookRequest struct { type WebhookService interface { AuthenticateExternalCiWebhook(apiKey string) (int, error) - HandleMultipleImagesFromEvent(imageDetails []types.ImageDetail, ciWorkflowId int) (map[string]*pipelineConfig.CiWorkflow, error) + HandleMultipleImagesFromEvent(imageDetails []*registry.GenericImageDetail, ciWorkflowId int) (map[string]*pipelineConfig.CiWorkflow, error) GetTriggerValidateFuncs() []pubsub.ValidateMsg } @@ -136,7 +132,7 @@ func (impl WebhookServiceImpl) AuthenticateExternalCiWebhook(apiKey string) (int } // HandleMultipleImagesFromEvent handles multiple images from plugin and creates ci workflow for n-1 images for mapping in ci_artifact -func (impl *WebhookServiceImpl) HandleMultipleImagesFromEvent(imageDetails []types.ImageDetail, ciWorkflowId int) (map[string]*pipelineConfig.CiWorkflow, error) { +func (impl *WebhookServiceImpl) HandleMultipleImagesFromEvent(imageDetails []*registry.GenericImageDetail, ciWorkflowId int) (map[string]*pipelineConfig.CiWorkflow, error) { ciWorkflow, err := impl.ciWorkflowRepository.FindById(ciWorkflowId) if err != nil { impl.logger.Errorw("error in finding ci workflow by id ", "err", err, "ciWorkFlowId", ciWorkflowId) @@ -146,7 +142,7 @@ func (impl *WebhookServiceImpl) HandleMultipleImagesFromEvent(imageDetails []typ // creating n-1 workflows for rest images, oldest will be mapped to original workflow id. digestWorkflowMap := make(map[string]*pipelineConfig.CiWorkflow) // mapping oldest to original ciworkflowId - digestWorkflowMap[*imageDetails[0].ImageDigest] = ciWorkflow + digestWorkflowMap[imageDetails[0].GetGenericImageDetailIdentifier()] = ciWorkflow for i := 1; i < len(imageDetails); i++ { workflow := &pipelineConfig.CiWorkflow{ Name: ciWorkflow.Name + fmt.Sprintf("-child-%d", i), @@ -170,7 +166,7 @@ func (impl *WebhookServiceImpl) HandleMultipleImagesFromEvent(imageDetails []typ impl.logger.Errorw("error in saving workflow for child workflow", "err", err, "parentCiWorkflowId", ciWorkflowId) return nil, err } - digestWorkflowMap[*imageDetails[i].ImageDigest] = workflow + digestWorkflowMap[imageDetails[i].GetGenericImageDetailIdentifier()] = workflow } return digestWorkflowMap, nil diff --git a/pkg/plugin/GlobalPluginService.go b/pkg/plugin/GlobalPluginService.go index 55ff942789b..5423ab22b2d 100644 --- a/pkg/plugin/GlobalPluginService.go +++ b/pkg/plugin/GlobalPluginService.go @@ -24,6 +24,7 @@ import ( "github.com/devtron-labs/devtron/pkg/auth/user" "github.com/devtron-labs/devtron/pkg/auth/user/bean" repository2 "github.com/devtron-labs/devtron/pkg/pipeline/repository" + "github.com/devtron-labs/devtron/pkg/plugin/adaptor" bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" helper2 "github.com/devtron-labs/devtron/pkg/plugin/helper" "github.com/devtron-labs/devtron/pkg/plugin/repository" @@ -31,8 +32,8 @@ import ( "github.com/devtron-labs/devtron/pkg/sql" "github.com/go-pg/pg" "go.uber.org/zap" + "golang.org/x/mod/semver" "net/http" - "strconv" "strings" "time" ) @@ -73,9 +74,11 @@ type GlobalPluginService interface { GetDetailedPluginInfoByPluginId(pluginId int) (*bean2.PluginMetadataDto, error) GetAllDetailedPluginInfo() ([]*bean2.PluginMetadataDto, error) + CreatePluginOrVersions(pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) ListAllPluginsV2(filter *bean2.PluginsListFilter) (*bean2.PluginsDto, error) GetPluginDetailV2(pluginVersionIds, parentPluginIds []int, fetchAllVersionDetails bool) (*bean2.PluginsDto, error) GetAllUniqueTags() (*bean2.PluginTagsDto, error) + GetAllPluginMinData() ([]*bean2.PluginMinDto, error) MigratePluginData() error } @@ -429,7 +432,7 @@ func (impl *GlobalPluginServiceImpl) validatePluginRequest(pluginReq *bean2.Plug return errors.New("invalid plugin type, should be of the type PRESET or SHARED") } - plugins, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() + plugins, err := impl.globalPluginRepository.GetAllPluginMinData() if err != nil { impl.logger.Errorw("error in getting all plugins", "err", err) return err @@ -670,33 +673,10 @@ func (impl *GlobalPluginServiceImpl) UpdatePluginPipelineScript(dbPluginPipeline func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int, pluginStepsReq []*bean2.PluginStepsDto, userId int32, tx *pg.Tx) error { for _, pluginStep := range pluginStepsReq { - pluginStepData := &repository.PluginStep{ - PluginId: pluginMetadataId, - Name: pluginStep.Name, - Description: pluginStep.Description, - Index: pluginStep.Index, - StepType: pluginStep.StepType, - RefPluginId: pluginStep.RefPluginId, - OutputDirectoryPath: pluginStep.OutputDirectoryPath, - DependentOnStep: pluginStep.DependentOnStep, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginStepData := adaptor.GetPluginStepDbObject(pluginStep, pluginMetadataId, userId) //get the script saved for this plugin step if pluginStep.PluginPipelineScript != nil { - pluginPipelineScript := &repository.PluginPipelineScript{ - Script: pluginStep.PluginPipelineScript.Script, - StoreScriptAt: pluginStep.PluginPipelineScript.StoreScriptAt, - Type: pluginStep.PluginPipelineScript.Type, - DockerfileExists: pluginStep.PluginPipelineScript.DockerfileExists, - MountPath: pluginStep.PluginPipelineScript.MountPath, - MountCodeToContainer: pluginStep.PluginPipelineScript.MountCodeToContainer, - MountCodeToContainerPath: pluginStep.PluginPipelineScript.MountCodeToContainerPath, - MountDirectoryFromHost: pluginStep.PluginPipelineScript.MountDirectoryFromHost, - ContainerImagePath: pluginStep.PluginPipelineScript.ContainerImagePath, - ImagePullSecretType: pluginStep.PluginPipelineScript.ImagePullSecretType, - ImagePullSecret: pluginStep.PluginPipelineScript.ImagePullSecret, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginPipelineScript := adaptor.GetPluginPipelineScriptDbObject(pluginStep.PluginPipelineScript, userId) pluginPipelineScript, err := impl.globalPluginRepository.SavePluginPipelineScript(pluginPipelineScript, tx) if err != nil { impl.logger.Errorw("error in saving plugin pipeline script", "pluginPipelineScript", pluginPipelineScript, "err", err) @@ -719,23 +699,7 @@ func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int pluginStep.Id = pluginStepData.Id //create entry in plugin_step_variable for _, pluginStepVariable := range pluginStep.PluginStepVariable { - pluginStepVariableData := &repository.PluginStepVariable{ - PluginStepId: pluginStepData.Id, - Name: pluginStepVariable.Name, - Format: pluginStepVariable.Format, - Description: pluginStepVariable.Description, - IsExposed: pluginStepVariable.IsExposed, - AllowEmptyValue: pluginStepVariable.AllowEmptyValue, - DefaultValue: pluginStepVariable.DefaultValue, - Value: pluginStepVariable.Value, - VariableType: pluginStepVariable.VariableType, - ValueType: pluginStepVariable.ValueType, - PreviousStepIndex: pluginStepVariable.PreviousStepIndex, - VariableStepIndex: pluginStepVariable.VariableStepIndex, - VariableStepIndexInPlugin: pluginStepVariable.VariableStepIndexInPlugin, - ReferenceVariableName: pluginStepVariable.ReferenceVariableName, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginStepVariableData := adaptor.GetPluginStepVariableDbObject(pluginStepData.Id, pluginStepVariable, userId) pluginStepVariableData, err = impl.globalPluginRepository.SavePluginStepVariables(pluginStepVariableData, tx) if err != nil { impl.logger.Errorw("error in saving plugin step variable", "pluginStepVariableData", pluginStepVariableData, "err", err) @@ -744,14 +708,7 @@ func (impl *GlobalPluginServiceImpl) saveDeepPluginStepData(pluginMetadataId int pluginStepVariable.Id = pluginStepVariableData.Id //create entry in plugin_step_condition for _, pluginStepCondition := range pluginStepVariable.PluginStepCondition { - pluginStepConditionData := &repository.PluginStepCondition{ - PluginStepId: pluginStepData.Id, - ConditionVariableId: pluginStepVariableData.Id, - ConditionType: pluginStepCondition.ConditionType, - ConditionalOperator: pluginStepCondition.ConditionalOperator, - ConditionalValue: pluginStepCondition.ConditionalValue, - AuditLog: sql.NewDefaultAuditLog(userId), - } + pluginStepConditionData := adaptor.GetPluginStepConditionDbObject(pluginStepData.Id, pluginStepVariableData.Id, pluginStepCondition, userId) pluginStepConditionData, err = impl.globalPluginRepository.SavePluginStepConditions(pluginStepConditionData, tx) if err != nil { impl.logger.Errorw("error in saving plugin step condition", "pluginStepConditionData", pluginStepConditionData, "err", err) @@ -768,7 +725,6 @@ func (impl *GlobalPluginServiceImpl) updatePlugin(pluginUpdateReq *bean2.PluginM if len(pluginUpdateReq.Type) == 0 { return nil, errors.New("invalid plugin type, should be of the type PRESET or SHARED") } - dbConnection := impl.globalPluginRepository.GetConnection() tx, err := dbConnection.Begin() if err != nil { @@ -856,6 +812,7 @@ func (impl *GlobalPluginServiceImpl) updatePlugin(pluginUpdateReq *bean2.PluginM return nil, err } } + if len(pluginStepsToUpdate) > 0 { err = impl.updateDeepPluginStepData(pluginStepsToUpdate, pluginStepVariables, pluginStepConditions, pluginSteps, userId, tx) if err != nil { @@ -1386,7 +1343,6 @@ func filterPluginStepData(existingPluginStepsInDb []*repository.PluginStep, plug } else { return nil, nil, pluginStepUpdateReq } - return newPluginStepsToCreate, pluginStepsToRemove, pluginStepsToUpdate } @@ -1805,28 +1761,59 @@ func (impl *GlobalPluginServiceImpl) ListAllPluginsV2(filter *bean2.PluginsListF return pluginDetails, nil } +func (impl *GlobalPluginServiceImpl) validateDetailRequest(pluginVersions []*repository.PluginMetadata, pluginVersionIds, parentPluginIds []int) error { + pluginVersionsIdMap, pluginParentIdMap := make(map[int]bool, len(pluginVersionIds)), make(map[int]bool, len(parentPluginIds)) + allPlugins, err := impl.globalPluginRepository.GetAllPluginMinData() + if err != nil { + impl.logger.Errorw("validateDetailRequest, error in getting all plugins parent metadata", "err", err) + return err + } + for _, pluginVersion := range pluginVersions { + pluginVersionsIdMap[pluginVersion.Id] = true + } + for _, plugin := range allPlugins { + pluginParentIdMap[plugin.Id] = true + } + for _, versionId := range pluginVersionIds { + if _, ok := pluginVersionsIdMap[versionId]; !ok { + errorMsg := fmt.Sprintf("there are some plugin version ids in request that do not exist:- %d", versionId) + return util.GetApiError(http.StatusBadRequest, errorMsg, errorMsg) + } + } + for _, pluginId := range parentPluginIds { + if _, ok := pluginParentIdMap[pluginId]; !ok { + errorMsg := fmt.Sprintf("there are some plugin parent ids in request that do not exist %d", pluginId) + return util.GetApiError(http.StatusBadRequest, errorMsg, errorMsg) + } + } + return nil +} // GetPluginDetailV2 returns all details of the of a plugin version according to the pluginVersionIds and parentPluginIds // provided by user, and minimal data for all versions of that plugin. func (impl *GlobalPluginServiceImpl) GetPluginDetailV2(pluginVersionIds, parentPluginIds []int, fetchAllVersionDetails bool) (*bean2.PluginsDto, error) { + var err error + pluginVersionsMetadata, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() + if err != nil { + impl.logger.Errorw("GetPluginDetailV2, error in getting all plugins versions metadata", "err", err) + return nil, err + } + err = impl.validateDetailRequest(pluginVersionsMetadata, pluginVersionIds, parentPluginIds) + if err != nil { + return nil, err + } pluginParentMetadataDtos := make([]*bean2.PluginParentMetadataDto, 0, len(pluginVersionIds)+len(parentPluginIds)) if len(pluginVersionIds) == 0 && len(parentPluginIds) == 0 { - return nil, &util.ApiError{HttpStatusCode: http.StatusBadRequest, Code: strconv.Itoa(http.StatusBadRequest), InternalMessage: bean2.NoPluginOrParentIdProvidedErr, UserMessage: bean2.NoPluginOrParentIdProvidedErr} + return nil, util.GetApiError(http.StatusBadRequest, bean2.NoPluginOrParentIdProvidedErr, bean2.NoPluginOrParentIdProvidedErr) } pluginVersionIdsMap, parentPluginIdsMap := helper2.GetPluginVersionAndParentPluginIdsMap(pluginVersionIds, parentPluginIds) - var err error pluginParentMetadataIds := make([]int, 0, len(pluginVersionIds)+len(parentPluginIds)) pluginVersionsIdToInclude := make(map[int]bool, len(pluginVersionIds)+len(parentPluginIds)) - pluginVersionsMetadata, err := impl.globalPluginRepository.GetMetaDataForAllPlugins() - if err != nil { - impl.logger.Errorw("GetPluginDetailV2, error in getting all plugins versions metadata", "err", err) - return nil, err - } filteredPluginVersionMetadata := helper2.GetPluginVersionsMetadataByVersionAndParentPluginIds(pluginVersionsMetadata, pluginVersionIdsMap, parentPluginIdsMap) if len(filteredPluginVersionMetadata) == 0 { - return nil, &util.ApiError{HttpStatusCode: http.StatusNotFound, Code: strconv.Itoa(http.StatusNotFound), InternalMessage: bean2.NoPluginFoundForThisSearchQueryErr, UserMessage: bean2.NoPluginFoundForThisSearchQueryErr} + return nil, util.GetApiError(http.StatusNotFound, bean2.NoPluginFoundForThisSearchQueryErr, bean2.NoPluginFoundForThisSearchQueryErr) } for _, version := range filteredPluginVersionMetadata { _, found := pluginVersionIdsMap[version.Id] @@ -1884,7 +1871,6 @@ func (impl *GlobalPluginServiceImpl) MigratePluginData() error { // MigratePluginDataToParentPluginMetadata migrates pre-existing plugin metadata from plugin_metadata table into plugin_parent_metadata table, // and also populate plugin_parent_metadata_id in plugin_metadata. -// this operation will happen only once when the get all plugin list v2 api is being called, returns error if any func (impl *GlobalPluginServiceImpl) MigratePluginDataToParentPluginMetadata(pluginsMetadata []*repository.PluginMetadata) error { dbConnection := impl.globalPluginRepository.GetConnection() tx, err := dbConnection.Begin() @@ -1948,3 +1934,303 @@ func (impl *GlobalPluginServiceImpl) MigratePluginDataToParentPluginMetadata(plu } return nil } + +func (impl *GlobalPluginServiceImpl) GetAllPluginMinData() ([]*bean2.PluginMinDto, error) { + pluginsParentMinData, err := impl.globalPluginRepository.GetAllPluginMinData() + if err != nil { + impl.logger.Errorw("GetAllPluginMinData, error in getting all plugin parent metadata min data", "err", err) + return nil, err + } + pluginMinList := make([]*bean2.PluginMinDto, 0, len(pluginsParentMinData)) + for _, item := range pluginsParentMinData { + //since creating new version of preset plugin is disabled for end user, hence ignoring PRESET plugin in min list + if item.Type == repository.PLUGIN_TYPE_PRESET { + continue + } + pluginMinList = append(pluginMinList, bean2.NewPluginMinDto().WithParentPluginId(item.Id).WithPluginName(item.Name).WithIcon(item.Icon)) + } + return pluginMinList, nil +} + +func (impl *GlobalPluginServiceImpl) checkValidationOnPluginNameAndIdentifier(pluginReq *bean2.PluginParentMetadataDto) error { + plugins, err := impl.globalPluginRepository.GetAllPluginMinData() + if err != nil { + impl.logger.Errorw("error in getting all plugins", "err", err) + return err + } + for _, plugin := range plugins { + if plugin.Identifier == pluginReq.PluginIdentifier { + return util.GetApiError(http.StatusConflict, bean2.PluginWithSameIdentifierExistsError, bean2.PluginWithSameIdentifierExistsError) + } + if plugin.Name == pluginReq.Name { + return util.GetApiError(http.StatusConflict, bean2.PluginWithSameNameExistError, bean2.PluginWithSameNameExistError) + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) checkValidationOnVersion(pluginReq *bean2.PluginParentMetadataDto) error { + pluginVersions, err := impl.globalPluginRepository.GetPluginVersionsByParentId(pluginReq.Id) + if err != nil { + impl.logger.Errorw("checkValidationOnVersion, error in getting all plugins versions by parentPluginId", "parentPluginId", pluginReq.Id, "err", err) + return err + } + for _, pluginVersion := range pluginVersions { + if pluginReq.Versions != nil && len(pluginReq.Versions.DetailedPluginVersionData) > 0 && pluginReq.Versions.DetailedPluginVersionData[0] != nil { + // if plugin version from req is already created then return error + if pluginVersion.PluginVersion == pluginReq.Versions.DetailedPluginVersionData[0].Version { + return util.GetApiError(http.StatusBadRequest, bean2.PluginVersionAlreadyExistError, bean2.PluginVersionAlreadyExistError) + } + } + + } + return nil +} + +func (impl *GlobalPluginServiceImpl) validateV2PluginRequest(pluginReq *bean2.PluginParentMetadataDto) error { + if pluginReq.Versions == nil || len(pluginReq.Versions.DetailedPluginVersionData) == 0 || pluginReq.Versions.DetailedPluginVersionData[0] == nil { + return util.GetApiError(http.StatusBadRequest, bean2.NoStepDataToProceedError, bean2.NoStepDataToProceedError) + } + if pluginReq.Id == 0 { + //create plugin req. + err := impl.checkValidationOnPluginNameAndIdentifier(pluginReq) + if err != nil { + impl.logger.Errorw("error in checkValidationOnPluginNameAndIdentifier", "err", err) + return err + } + } else { + err := impl.checkValidationOnVersion(pluginReq) + if err != nil { + impl.logger.Errorw("error in checkValidationOnPluginNameAndIdentifier", "err", err) + return err + } + } + version := pluginReq.Versions.DetailedPluginVersionData[0].Version + if !strings.Contains(version, "v") { + version = fmt.Sprintf("v%s", version) + } + // semantic versioning validation on plugin's version + if !semver.IsValid(version) { + return util.GetApiError(http.StatusBadRequest, bean2.PluginVersionNotSemanticallyCorrectError, bean2.PluginVersionNotSemanticallyCorrectError) + } + //validate icon url and size + if len(pluginReq.Icon) > 0 { + err := utils.FetchIconAndCheckSize(pluginReq.Icon, bean2.PluginIconMaxSizeInBytes) + if err != nil { + errMsg := fmt.Sprintf("%s err:= %s", bean2.PluginIconNotCorrectOrReachableError, err.Error()) + return util.GetApiError(http.StatusBadRequest, errMsg, errMsg) + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) createPluginTagAndRelations(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + if pluginReq.AreNewTagsPresent { + err := impl.CreateNewPluginTagsAndRelationsIfRequiredV2(pluginReq, userId, tx) + if err != nil { + impl.logger.Errorw("createPluginTagAndRelations, error in CreateNewPluginTagsAndRelationsIfRequired", "tags", pluginReq.Tags, "err", err) + return err + } + } else if len(pluginReq.Tags) > 0 { + err := impl.CreatePluginTagRelations(pluginReq, userId, tx) + if err != nil { + impl.logger.Errorw("createPluginTagAndRelations, error in CreatePluginTagRelations", "tags", pluginReq.Tags, "err", err) + return err + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) CreatePluginTagRelations(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + tags, err := impl.globalPluginRepository.GetPluginTagByNames(pluginReq.Tags) + if err != nil { + impl.logger.Errorw("CreatePluginTagRelations, error in GetPluginTagByNames", "tags", pluginReq.Tags, "err", err) + return err + } + newPluginTagRelationsToCreate := make([]*repository.PluginTagRelation, 0, len(pluginReq.Tags)) + for _, tag := range tags { + newPluginTagRelationsToCreate = append(newPluginTagRelationsToCreate, repository.NewPluginTagRelation().CreateAuditLog(userId).WithTagAndPluginId(tag.Id, pluginReq.Id)) + } + + if len(newPluginTagRelationsToCreate) > 0 { + err = impl.globalPluginRepository.SavePluginTagRelationInBulk(newPluginTagRelationsToCreate, tx) + if err != nil { + impl.logger.Errorw("CreatePluginTagRelations, error in saving plugin tag relation in bulk", "newPluginTagRelationsToCreate", newPluginTagRelationsToCreate, "err", err) + return err + } + } + return nil +} + +func (impl *GlobalPluginServiceImpl) createPluginStepDataAndTagRelations(pluginVersionId int, pluginVersionDetail *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + if len(pluginVersionDetail.PluginSteps) > 0 { + err := impl.saveDeepPluginStepData(pluginVersionId, pluginVersionDetail.PluginSteps, userId, tx) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in saving plugin step data", "err", err) + return err + } + } else { + return util.GetApiError(http.StatusBadRequest, bean2.PluginStepsNotProvidedError, bean2.PluginStepsNotProvidedError) + } + + err := impl.createPluginTagAndRelations(pluginVersionDetail, userId, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in createPluginTagAndRelations", "tags", pluginVersionDetail.Tags, "err", err) + return err + } + return nil +} + +func (impl *GlobalPluginServiceImpl) createNewPlugin(tx *pg.Tx, pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { + pluginParentMetadata, err := impl.globalPluginRepository.SavePluginParentMetadata(tx, adaptor.GetPluginParentMetadataDbObject(pluginDto, userId)) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in saving plugin parent metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + pluginDto.Id = pluginParentMetadata.Id + pluginVersionDto := adaptor.GetPluginVersionMetadataDbObject(pluginDto, userId). + WithPluginParentMetadataId(pluginParentMetadata.Id). + WithIsLatestFlag(true) + + pluginVersionMetadata, err := impl.globalPluginRepository.SavePluginMetadata(pluginVersionDto, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in saving plugin version metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + pluginDto.Versions.DetailedPluginVersionData[0].Id = pluginVersionMetadata.Id + + pluginStageMapping := &repository.PluginStageMapping{ + PluginId: pluginParentMetadata.Id, + StageType: repository.CI_CD, + AuditLog: sql.NewDefaultAuditLog(userId), + } + _, err = impl.globalPluginRepository.SavePluginStageMapping(pluginStageMapping, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in saving plugin stage mapping", "pluginDto", pluginDto, "err", err) + return 0, err + } + + err = impl.createPluginStepDataAndTagRelations(pluginVersionMetadata.Id, pluginDto.Versions.DetailedPluginVersionData[0], userId, tx) + if err != nil { + impl.logger.Errorw("createNewPlugin, error in createPluginStepDataAndTagRelations", "pluginDto", pluginDto, "err", err) + return 0, err + } + return pluginVersionMetadata.Id, nil +} + +func (impl *GlobalPluginServiceImpl) createNewPluginVersionOfExistingPlugin(tx *pg.Tx, pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { + var pluginParentMinData *repository.PluginParentMetadata + var err error + pluginParentMinData, err = impl.globalPluginRepository.GetPluginParentMinDataById(pluginDto.Id) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in getting plugin parent metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + // before saving new plugin version marking previous version's isLatest as false. + err = impl.globalPluginRepository.MarkPreviousPluginVersionLatestFalse(pluginParentMinData.Id) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in MarkPreviousPluginVersionLatestFalse", "pluginParentId", pluginDto.Id, "err", err) + return 0, err + } + pluginDto.Name = pluginParentMinData.Name + pluginVersionDto := adaptor.GetPluginVersionMetadataDbObject(pluginDto, userId). + WithPluginParentMetadataId(pluginParentMinData.Id). + WithIsLatestFlag(true) + + pluginVersionMetadata, err := impl.globalPluginRepository.SavePluginMetadata(pluginVersionDto, tx) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in saving plugin version metadata", "pluginDto", pluginDto, "err", err) + return 0, err + } + pluginDto.Versions.DetailedPluginVersionData[0].Id = pluginVersionMetadata.Id + + err = impl.createPluginStepDataAndTagRelations(pluginVersionMetadata.Id, pluginDto.Versions.DetailedPluginVersionData[0], userId, tx) + if err != nil { + impl.logger.Errorw("createNewPluginVersionOfExistingPlugin, error in createPluginStepDataAndTagRelations", "pluginDto", pluginDto, "err", err) + return 0, err + } + return pluginVersionMetadata.Id, nil +} + +func (impl *GlobalPluginServiceImpl) CreatePluginOrVersions(pluginDto *bean2.PluginParentMetadataDto, userId int32) (int, error) { + err := impl.validateV2PluginRequest(pluginDto) + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in validating create plugin request", "pluginReqDto", pluginDto, "err", err) + return 0, err + } + + dbConnection := impl.globalPluginRepository.GetConnection() + tx, err := dbConnection.Begin() + if err != nil { + return 0, err + } + // Rollback tx on error. + defer tx.Rollback() + var versionMetadataId int + if pluginDto.Id > 0 { + // create new version of existing plugin req. + versionMetadataId, err = impl.createNewPluginVersionOfExistingPlugin(tx, pluginDto, userId) + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in creating new version of an existing plugin", "existingPluginName", pluginDto.Name, "err", err) + return 0, err + } + } else { + // create new plugin req. + versionMetadataId, err = impl.createNewPlugin(tx, pluginDto, userId) + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in creating new plugin", "pluginDto", pluginDto, "err", err) + return 0, err + } + } + err = tx.Commit() + if err != nil { + impl.logger.Errorw("CreatePluginOrVersions, error in committing db transaction", "err", err) + return 0, err + } + return versionMetadataId, nil +} + +func (impl *GlobalPluginServiceImpl) CreateNewPluginTagsAndRelationsIfRequiredV2(pluginReq *bean2.PluginsVersionDetail, userId int32, tx *pg.Tx) error { + allPluginTags, err := impl.globalPluginRepository.GetAllPluginTags() + if err != nil { + impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in getting all plugin tags", "err", err) + return err + } + existingTagMap := make(map[string]*repository.PluginTag, len(allPluginTags)) + for _, tag := range allPluginTags { + existingTagMap[tag.Name] = tag + } + //check for new tags, then create new plugin_tag and plugin_tag_relation entry in db when new tags are present in request + newPluginTagsToCreate := make([]*repository.PluginTag, 0, len(pluginReq.Tags)) + newPluginTagRelationsToCreate := make([]*repository.PluginTagRelation, 0, len(pluginReq.Tags)) + + for _, tagReq := range pluginReq.Tags { + if _, ok := existingTagMap[tagReq]; !ok { + newPluginTagsToCreate = append(newPluginTagsToCreate, repository.NewPluginTag().CreateAuditLog(userId).WithName(tagReq)) + } + } + + if len(newPluginTagsToCreate) > 0 { + err = impl.globalPluginRepository.SavePluginTagInBulk(newPluginTagsToCreate, tx) + if err != nil { + impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in saving plugin tag", "newPluginTags", newPluginTagsToCreate, "err", err) + return err + } + for _, newTag := range newPluginTagsToCreate { + existingTagMap[newTag.Name] = newTag + } + } + + for _, tag := range pluginReq.Tags { + newPluginTagRelationsToCreate = append(newPluginTagRelationsToCreate, repository.NewPluginTagRelation().CreateAuditLog(userId).WithTagAndPluginId(existingTagMap[tag].Id, pluginReq.Id)) + } + + if len(newPluginTagRelationsToCreate) > 0 { + err = impl.globalPluginRepository.SavePluginTagRelationInBulk(newPluginTagRelationsToCreate, tx) + if err != nil { + impl.logger.Errorw("CreateNewPluginTagsAndRelationsIfRequiredV2, error in saving plugin tag relation in bulk", "newPluginTagRelationsToCreate", newPluginTagRelationsToCreate, "err", err) + return err + } + } + return nil +} diff --git a/pkg/plugin/adaptor/adaptor.go b/pkg/plugin/adaptor/adaptor.go new file mode 100644 index 00000000000..e5e0f50e9d3 --- /dev/null +++ b/pkg/plugin/adaptor/adaptor.go @@ -0,0 +1,80 @@ +package adaptor + +import ( + bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" + "github.com/devtron-labs/devtron/pkg/plugin/repository" + "github.com/devtron-labs/devtron/pkg/sql" +) + +func GetPluginParentMetadataDbObject(pluginDto *bean2.PluginParentMetadataDto, userId int32) *repository.PluginParentMetadata { + return repository.NewPluginParentMetadata().CreateAuditLog(userId). + WithBasicMetadata(pluginDto.Name, pluginDto.PluginIdentifier, pluginDto.Description, pluginDto.Icon, repository.PLUGIN_TYPE_SHARED) +} + +func GetPluginVersionMetadataDbObject(pluginDto *bean2.PluginParentMetadataDto, userId int32) *repository.PluginMetadata { + versionDto := pluginDto.Versions.DetailedPluginVersionData[0] + return repository.NewPluginVersionMetadata().CreateAuditLog(userId).WithBasicMetadata(pluginDto.Name, versionDto.Description, versionDto.Version, versionDto.DocLink) +} + +func GetPluginStepDbObject(pluginStepDto *bean2.PluginStepsDto, pluginVersionMetadataId int, userId int32) *repository.PluginStep { + return &repository.PluginStep{ + PluginId: pluginVersionMetadataId, + Name: pluginStepDto.Name, + Description: pluginStepDto.Description, + Index: 1, + StepType: repository.PLUGIN_STEP_TYPE_INLINE, + RefPluginId: pluginStepDto.RefPluginId, + OutputDirectoryPath: pluginStepDto.OutputDirectoryPath, + DependentOnStep: pluginStepDto.DependentOnStep, + AuditLog: sql.NewDefaultAuditLog(userId), + } +} +func GetPluginPipelineScriptDbObject(pluginPipelineScript *bean2.PluginPipelineScript, userId int32) *repository.PluginPipelineScript { + return &repository.PluginPipelineScript{ + Script: pluginPipelineScript.Script, + StoreScriptAt: pluginPipelineScript.StoreScriptAt, + Type: pluginPipelineScript.Type, + DockerfileExists: pluginPipelineScript.DockerfileExists, + MountPath: pluginPipelineScript.MountPath, + MountCodeToContainer: pluginPipelineScript.MountCodeToContainer, + MountCodeToContainerPath: pluginPipelineScript.MountCodeToContainerPath, + MountDirectoryFromHost: pluginPipelineScript.MountDirectoryFromHost, + ContainerImagePath: pluginPipelineScript.ContainerImagePath, + ImagePullSecretType: pluginPipelineScript.ImagePullSecretType, + ImagePullSecret: pluginPipelineScript.ImagePullSecret, + AuditLog: sql.NewDefaultAuditLog(userId), + } + +} + +func GetPluginStepVariableDbObject(pluginStepId int, pluginVariableDto *bean2.PluginVariableDto, userId int32) *repository.PluginStepVariable { + return &repository.PluginStepVariable{ + PluginStepId: pluginStepId, + Name: pluginVariableDto.Name, + Format: pluginVariableDto.Format, + Description: pluginVariableDto.Description, + IsExposed: true, //currently hard coding this, later after plugin creation gets more mature will let user decide + AllowEmptyValue: pluginVariableDto.AllowEmptyValue, + DefaultValue: pluginVariableDto.DefaultValue, + Value: pluginVariableDto.Value, + VariableType: pluginVariableDto.VariableType, + ValueType: pluginVariableDto.ValueType, + PreviousStepIndex: pluginVariableDto.PreviousStepIndex, + VariableStepIndex: 1, //currently hard coding this, later after plugin creation gets more mature will let user decide + VariableStepIndexInPlugin: pluginVariableDto.VariableStepIndexInPlugin, + ReferenceVariableName: pluginVariableDto.ReferenceVariableName, + AuditLog: sql.NewDefaultAuditLog(userId), + } +} + +func GetPluginStepConditionDbObject(stepDataId, pluginStepVariableId int, pluginStepCondition *bean2.PluginStepCondition, + userId int32) *repository.PluginStepCondition { + return &repository.PluginStepCondition{ + PluginStepId: stepDataId, + ConditionVariableId: pluginStepVariableId, + ConditionType: pluginStepCondition.ConditionType, + ConditionalOperator: pluginStepCondition.ConditionalOperator, + ConditionalValue: pluginStepCondition.ConditionalValue, + AuditLog: sql.NewDefaultAuditLog(userId), + } +} diff --git a/pkg/plugin/bean/bean.go b/pkg/plugin/bean/bean.go index 55424f3caac..c31d3463327 100644 --- a/pkg/plugin/bean/bean.go +++ b/pkg/plugin/bean/bean.go @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package bean import ( @@ -44,15 +43,47 @@ type PluginListComponentDto struct { //created new struct for backward compatibi } type PluginMetadataDto struct { - Id int `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` // SHARED, PRESET etc - Icon string `json:"icon,omitempty"` - Tags []string `json:"tags"` - Action int `json:"action,omitempty"` - PluginStage string `json:"pluginStage,omitempty"` - PluginSteps []*PluginStepsDto `json:"pluginSteps,omitempty"` + Id int `json:"id"` + Name string `json:"name" validate:"required,min=3,max=100,global-entity-name"` + Description string `json:"description" validate:"max=300"` + Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` // SHARED, PRESET etc + Icon string `json:"icon,omitempty"` + Tags []string `json:"tags"` + Action int `json:"action,omitempty"` + PluginStage string `json:"pluginStage,omitempty"` + PluginSteps []*PluginStepsDto `json:"pluginSteps,omitempty"` + AreNewTagsPresent bool `json:"areNewTagsPresent,omitempty"` +} + +type PluginMinDto struct { + ParentPluginId int `json:"id,omitempty"` + PluginName string `json:"name,omitempty"` + Icon string `json:"icon,omitempty"` + PluginVersionId int `json:"pluginVersionId,omitempty"` +} + +func NewPluginMinDto() *PluginMinDto { + return &PluginMinDto{} +} + +func (r *PluginMinDto) WithParentPluginId(id int) *PluginMinDto { + r.ParentPluginId = id + return r +} + +func (r *PluginMinDto) WithPluginName(name string) *PluginMinDto { + r.PluginName = name + return r +} + +func (r *PluginMinDto) WithIcon(icon string) *PluginMinDto { + r.Icon = icon + return r +} + +func (r *PluginMinDto) WithPluginVersionId(versionId int) *PluginMinDto { + r.PluginVersionId = versionId + return r } type PluginsDto struct { @@ -76,9 +107,9 @@ func (r *PluginsDto) WithTotalCount(count int) *PluginsDto { type PluginParentMetadataDto struct { Id int `json:"id"` - Name string `json:"name"` - PluginIdentifier string `json:"pluginIdentifier"` - Description string `json:"description"` + Name string `json:"name" validate:"required,min=3,max=100,global-entity-name"` + PluginIdentifier string `json:"pluginIdentifier" validate:"required,min=3,max=100,global-entity-name"` + Description string `json:"description" validate:"max=300"` Type string `json:"type,omitempty" validate:"oneof=SHARED PRESET"` Icon string `json:"icon,omitempty"` Versions *PluginVersions `json:"pluginVersions"` @@ -124,17 +155,6 @@ type PluginVersions struct { MinimalPluginVersionData []*PluginsVersionDetail `json:"minimalPluginVersionData"` // contains only few metadata } -type PluginMinDto struct { - PluginName string `json:"pluginName"` - PluginVersions []*PluginVersionsMinDto `json:"pluginVersions"` - Icon string `json:"icon"` -} - -type PluginVersionsMinDto struct { - Id int `json:"id"` - Version string `json:"version"` -} - func NewPluginVersions() *PluginVersions { return &PluginVersions{} } @@ -154,7 +174,7 @@ type PluginsVersionDetail struct { InputVariables []*PluginVariableDto `json:"inputVariables"` OutputVariables []*PluginVariableDto `json:"outputVariables"` DocLink string `json:"docLink"` - Version string `json:"pluginVersion"` + Version string `json:"pluginVersion" validate:"max=50,min=3"` IsLatest bool `json:"isLatest"` UpdatedBy string `json:"updatedBy"` CreatedOn time.Time `json:"-"` @@ -336,10 +356,18 @@ type RegistryCredentials struct { } const ( - NoPluginOrParentIdProvidedErr = "no value for pluginVersionIds and parentPluginIds provided in query param" - NoPluginFoundForThisSearchQueryErr = "unable to find desired plugin for the query filter" + NoPluginOrParentIdProvidedErr = "no value for pluginVersionIds and parentPluginIds provided in query param" + NoPluginFoundForThisSearchQueryErr = "unable to find desired plugin for the query filter" + PluginStepsNotProvidedError = "plugin steps not provided" + PluginWithSameNameExistError = "plugin with the same name exists, please choose another name" + PluginWithSameIdentifierExistsError = "plugin with the same identifier exists, please choose another identifier name" + PluginVersionNotSemanticallyCorrectError = "please provide a plugin version that adheres to Semantic Versioning 2.0.0 to ensure compatibility and proper versioning" + PluginIconNotCorrectOrReachableError = "cannot validate icon, make sure that provided url link is reachable" + PluginVersionAlreadyExistError = "this plugin version already exists, please provide another plugin version" + NoStepDataToProceedError = "no step data provided to save, please provide a plugin step to proceed further" ) const ( - SpecialCharsRegex = ` !"#$%&'()*+,./:;<=>?@[\]^_{|}~` + "`" + SpecialCharsRegex = ` !"#$%&'()*+,./:;<=>?@[\]^_{|}~` + "`" + PluginIconMaxSizeInBytes = 2 * 1024 * 1024 ) diff --git a/pkg/plugin/repository/GlobalPluginRepository.go b/pkg/plugin/repository/GlobalPluginRepository.go index 8b650935231..9cc50748f29 100644 --- a/pkg/plugin/repository/GlobalPluginRepository.go +++ b/pkg/plugin/repository/GlobalPluginRepository.go @@ -99,6 +99,16 @@ func (r *PluginParentMetadata) CreateAuditLog(userId int32) *PluginParentMetadat return r } +func (r *PluginParentMetadata) WithBasicMetadata(name, identifier, description, icon string, pluginType PluginType) *PluginParentMetadata { + r.Name = name + r.Identifier = identifier + r.Description = description + r.Icon = icon + r.Type = pluginType + r.Deleted = false + return r +} + // SetParentPluginMetadata method signature used only for migration purposes, sets pluginVersionsMetadata into plugin_parent_metadata func (r *PluginParentMetadata) SetParentPluginMetadata(pluginMetadata *PluginMetadata) *PluginParentMetadata { r.Name = pluginMetadata.Name @@ -135,6 +145,38 @@ type PluginMetadata struct { sql.AuditLog } +func NewPluginVersionMetadata() *PluginMetadata { + return &PluginMetadata{} +} + +func (r *PluginMetadata) CreateAuditLog(userId int32) *PluginMetadata { + r.CreatedBy = userId + r.CreatedOn = time.Now() + r.UpdatedBy = userId + r.UpdatedOn = time.Now() + return r +} + +func (r *PluginMetadata) WithBasicMetadata(name, description, pluginVersion, docLink string) *PluginMetadata { + r.Name = name + r.PluginVersion = pluginVersion + r.Description = description + r.DocLink = docLink + r.Deleted = false + r.IsDeprecated = false + return r +} + +func (r *PluginMetadata) WithPluginParentMetadataId(parentId int) *PluginMetadata { + r.PluginParentMetadataId = parentId + return r +} + +func (r *PluginMetadata) WithIsLatestFlag(isLatest bool) *PluginMetadata { + r.IsLatest = isLatest + return r +} + type PluginTag struct { tableName struct{} `sql:"plugin_tag" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -143,6 +185,23 @@ type PluginTag struct { sql.AuditLog } +func NewPluginTag() *PluginTag { + return &PluginTag{} +} + +func (r *PluginTag) WithName(name string) *PluginTag { + r.Name = name + return r +} + +func (r *PluginTag) CreateAuditLog(userId int32) *PluginTag { + r.CreatedBy = userId + r.CreatedOn = time.Now() + r.UpdatedBy = userId + r.UpdatedOn = time.Now() + return r +} + type PluginTagRelation struct { tableName struct{} `sql:"plugin_tag_relation" pg:",discard_unknown_columns"` Id int `sql:"id,pk"` @@ -151,6 +210,24 @@ type PluginTagRelation struct { sql.AuditLog } +func NewPluginTagRelation() *PluginTagRelation { + return &PluginTagRelation{} +} + +func (r *PluginTagRelation) WithTagAndPluginId(tagId, pluginId int) *PluginTagRelation { + r.TagId = tagId + r.PluginId = pluginId + return r +} + +func (r *PluginTagRelation) CreateAuditLog(userId int32) *PluginTagRelation { + r.CreatedBy = userId + r.CreatedOn = time.Now() + r.UpdatedBy = userId + r.UpdatedOn = time.Now() + return r +} + // Below two tables are used at pipeline-steps level too type PluginPipelineScript struct { @@ -247,7 +324,9 @@ type GlobalPluginRepository interface { GetMetaDataForAllPlugins() ([]*PluginMetadata, error) GetMetaDataForPluginWithStageType(stageType int) ([]*PluginMetadata, error) GetMetaDataByPluginId(pluginId int) (*PluginMetadata, error) + GetMetaDataByPluginIds(pluginIds []int) ([]*PluginMetadata, error) GetAllPluginTags() ([]*PluginTag, error) + GetPluginTagByNames(tagNames []string) ([]*PluginTag, error) GetAllPluginTagRelations() ([]*PluginTagRelation, error) GetTagsByPluginId(pluginId int) ([]string, error) GetScriptDetailById(id int) (*PluginPipelineScript, error) @@ -264,10 +343,14 @@ type GlobalPluginRepository interface { GetConditionsByPluginId(pluginId int) ([]*PluginStepCondition, error) GetPluginStageMappingByPluginId(pluginId int) (*PluginStageMapping, error) GetConnection() (dbConnection *pg.DB) + GetPluginVersionsByParentId(parentPluginId int) ([]*PluginMetadata, error) GetPluginParentMetadataByIdentifier(pluginIdentifier string) (*PluginParentMetadata, error) GetAllFilteredPluginParentMetadata(searchKey string, tags []string) ([]*PluginParentMetadata, error) GetPluginParentMetadataByIds(ids []int) ([]*PluginParentMetadata, error) + GetAllPluginMinData() ([]*PluginParentMetadata, error) + GetPluginParentMinDataById(id int) (*PluginParentMetadata, error) + MarkPreviousPluginVersionLatestFalse(pluginParentId int) error SavePluginMetadata(pluginMetadata *PluginMetadata, tx *pg.Tx) (*PluginMetadata, error) SavePluginStageMapping(pluginStageMapping *PluginStageMapping, tx *pg.Tx) (*PluginStageMapping, error) @@ -351,6 +434,19 @@ func (impl *GlobalPluginRepositoryImpl) GetAllPluginTags() ([]*PluginTag, error) return tags, nil } +func (impl *GlobalPluginRepositoryImpl) GetPluginTagByNames(tagNames []string) ([]*PluginTag, error) { + var tags []*PluginTag + err := impl.dbConnection.Model(&tags). + Where("deleted = ?", false). + Where("name in (?)", pg.In(tagNames)). + Select() + if err != nil { + impl.logger.Errorw("err in getting all tags by names", "tagNames", tagNames, "err", err) + return nil, err + } + return tags, nil +} + func (impl *GlobalPluginRepositoryImpl) GetAllPluginTagRelations() ([]*PluginTagRelation, error) { var rel []*PluginTagRelation err := impl.dbConnection.Model(&rel). @@ -385,6 +481,18 @@ func (impl *GlobalPluginRepositoryImpl) GetMetaDataByPluginId(pluginId int) (*Pl return &plugin, nil } +func (impl *GlobalPluginRepositoryImpl) GetMetaDataByPluginIds(pluginIds []int) ([]*PluginMetadata, error) { + var plugins []*PluginMetadata + err := impl.dbConnection.Model(&plugins). + Where("deleted = ?", false). + Where("id in (?)", pg.In(pluginIds)).Select() + if err != nil { + impl.logger.Errorw("err in getting plugins by pluginIds", "pluginIds", pluginIds, "err", err) + return nil, err + } + return plugins, nil +} + func (impl *GlobalPluginRepositoryImpl) GetStepsByPluginIds(pluginIds []int) ([]*PluginStep, error) { var pluginSteps []*PluginStep err := impl.dbConnection.Model(&pluginSteps). @@ -511,6 +619,20 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginByName(pluginName string) ([]*P } +func (impl *GlobalPluginRepositoryImpl) GetPluginVersionsByParentId(parentPluginId int) ([]*PluginMetadata, error) { + var plugin []*PluginMetadata + err := impl.dbConnection.Model(&plugin). + Where("plugin_parent_metadata_id = ?", parentPluginId). + Where("deleted = ?", false). + Where("is_deprecated = ?", false). + Select() + if err != nil { + impl.logger.Errorw("err in getting pluginVersionMetadata by parentPluginId", "parentPluginId", parentPluginId, "err", err) + return nil, err + } + return plugin, nil +} + func (impl *GlobalPluginRepositoryImpl) GetAllPluginMetaData() ([]*PluginMetadata, error) { var plugins []*PluginMetadata err := impl.dbConnection.Model(&plugins).Where("deleted = ?", false).Select() @@ -700,6 +822,18 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginParentMetadataByIdentifier(plug return &pluginParentMetadata, nil } +func (impl *GlobalPluginRepositoryImpl) GetPluginParentMinDataById(id int) (*PluginParentMetadata, error) { + var pluginParentMetadata PluginParentMetadata + err := impl.dbConnection.Model(&pluginParentMetadata). + Column("plugin_parent_metadata.id", "plugin_parent_metadata.name"). + Where("id = ?", id). + Where("deleted = ?", false).Select() + if err != nil { + return nil, err + } + return &pluginParentMetadata, nil +} + func (impl *GlobalPluginRepositoryImpl) SavePluginParentMetadata(tx *pg.Tx, pluginParentMetadata *PluginParentMetadata) (*PluginParentMetadata, error) { err := tx.Insert(pluginParentMetadata) return pluginParentMetadata, err @@ -712,24 +846,20 @@ func (impl *GlobalPluginRepositoryImpl) UpdatePluginMetadataInBulk(pluginsMetada func (impl *GlobalPluginRepositoryImpl) GetAllFilteredPluginParentMetadata(searchKey string, tags []string) ([]*PluginParentMetadata, error) { var plugins []*PluginParentMetadata - subQuery := "select ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + + query := "select ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + " inner join plugin_metadata pm on pm.plugin_parent_metadata_id=ppm.id" - whereCondition := fmt.Sprintf(" where ppm.deleted=false") - orderCondition := fmt.Sprintf(" ORDER BY ppm.id asc") + whereCondition := fmt.Sprintf(" where ppm.deleted=false AND pm.deleted=false AND pm.is_latest=true") if len(tags) > 0 { - subQuery = "select DISTINCT ON(ppm.id) ppm.id, ppm.identifier,ppm.name,ppm.description,ppm.type,ppm.icon,ppm.deleted,ppm.created_by, ppm.created_on,ppm.updated_by,ppm.updated_on from plugin_parent_metadata ppm" + - " inner join plugin_metadata pm on pm.plugin_parent_metadata_id=ppm.id" + - " left join plugin_tag_relation ptr on ptr.plugin_id=pm.id" + - " left join plugin_tag pt on ptr.tag_id=pt.id" - whereCondition += fmt.Sprintf(" AND pm.deleted=false AND pt.deleted=false AND pt.name in (%s)", helper.GetCommaSepratedStringWithComma(tags)) + tagFilterSubQuery := fmt.Sprintf("select ptr.plugin_id from plugin_tag_relation ptr inner join plugin_tag pt on ptr.tag_id =pt.id where pt.deleted =false and pt.name in (%s) group by ptr.plugin_id having count(ptr.plugin_id )=%d", helper.GetCommaSepratedStringWithComma(tags), len(tags)) + whereCondition += fmt.Sprintf(" AND pm.id in (%s)", tagFilterSubQuery) } if len(searchKey) > 0 { searchKeyLike := "%" + searchKey + "%" whereCondition += fmt.Sprintf(" AND (pm.description ilike '%s' or pm.name ilike '%s')", searchKeyLike, searchKeyLike) } - whereCondition += fmt.Sprintf(" AND pm.is_latest=true") - subQuery += whereCondition + orderCondition - query := fmt.Sprintf(" select * from (%s) x ORDER BY name asc;", subQuery) + orderCondition := " ORDER BY ppm.name asc;" + + query += whereCondition + orderCondition _, err := impl.dbConnection.Query(&plugins, query) if err != nil { return nil, err @@ -749,3 +879,29 @@ func (impl *GlobalPluginRepositoryImpl) GetPluginParentMetadataByIds(ids []int) } return plugins, nil } + +func (impl *GlobalPluginRepositoryImpl) GetAllPluginMinData() ([]*PluginParentMetadata, error) { + var plugins []*PluginParentMetadata + err := impl.dbConnection.Model(&plugins). + Column("plugin_parent_metadata.id", "plugin_parent_metadata.name", "plugin_parent_metadata.type", "plugin_parent_metadata.icon", "plugin_parent_metadata.identifier"). + Where("deleted = ?", false). + Select() + if err != nil { + impl.logger.Errorw("err in getting all plugin parent metadata min data", "err", err) + return nil, err + } + return plugins, nil +} + +func (impl *GlobalPluginRepositoryImpl) MarkPreviousPluginVersionLatestFalse(pluginParentId int) error { + var model PluginMetadata + _, err := impl.dbConnection.Model(&model). + Set("is_latest = ?", false). + Where("id = (select id from plugin_metadata where plugin_parent_metadata_id = ? and is_latest =true order by created_on desc limit ?)", pluginParentId, 1). + Update() + if err != nil { + impl.logger.Errorw("error in updating last version isLatest as false for a plugin parent id", "pluginParentId", pluginParentId, "err", err) + return err + } + return nil +} diff --git a/pkg/plugin/utils/utils.go b/pkg/plugin/utils/utils.go index 6d78a291439..168e694d89b 100644 --- a/pkg/plugin/utils/utils.go +++ b/pkg/plugin/utils/utils.go @@ -21,9 +21,11 @@ import ( "fmt" bean2 "github.com/devtron-labs/devtron/pkg/plugin/bean" "github.com/devtron-labs/devtron/pkg/plugin/repository" + "net/http" "regexp" "sort" "strings" + "time" ) func GetStageType(stageTypeReq string) (int, error) { @@ -72,3 +74,26 @@ func SortPluginsVersionDetailSliceByCreatedOn(pluginsVersionDetail []*bean2.Plug return false }) } + +func FetchIconAndCheckSize(url string, maxSize int64) error { + client := http.Client{ + Timeout: 5 * time.Second, + } + iconResp, err := client.Get(url) + if err != nil { + return fmt.Errorf("error in fetching icon : %s", err.Error()) + } + if iconResp != nil { + if iconResp.StatusCode >= 200 && iconResp.StatusCode < 300 { + if iconResp.ContentLength > maxSize { + return fmt.Errorf("icon size too large") + } + iconResp.Body.Close() + } else { + return fmt.Errorf("error in fetching icon : %s", iconResp.Status) + } + } else { + return fmt.Errorf("error in fetching icon : empty response") + } + return nil +} diff --git a/pkg/terminal/terminalSesion.go b/pkg/terminal/terminalSesion.go index 6d0e2a8fa07..ba9a4859756 100644 --- a/pkg/terminal/terminalSesion.go +++ b/pkg/terminal/terminalSesion.go @@ -26,7 +26,7 @@ import ( "github.com/caarlos0/env" "github.com/devtron-labs/common-lib/utils/k8s" "github.com/devtron-labs/devtron/internal/middleware" - "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" "github.com/devtron-labs/devtron/pkg/cluster" "github.com/devtron-labs/devtron/pkg/cluster/repository" errors1 "github.com/juju/errors" @@ -447,24 +447,24 @@ type TerminalSessionHandler interface { } type TerminalSessionHandlerImpl struct { - environmentService cluster.EnvironmentService - clusterService cluster.ClusterService - logger *zap.SugaredLogger - k8sUtil *k8s.K8sServiceImpl - ephemeralContainerService cluster.EphemeralContainerService - argoApplicationService argoApplication.ArgoApplicationService + environmentService cluster.EnvironmentService + clusterService cluster.ClusterService + logger *zap.SugaredLogger + k8sUtil *k8s.K8sServiceImpl + ephemeralContainerService cluster.EphemeralContainerService + argoApplicationReadService read.ArgoApplicationReadService } func NewTerminalSessionHandlerImpl(environmentService cluster.EnvironmentService, clusterService cluster.ClusterService, logger *zap.SugaredLogger, k8sUtil *k8s.K8sServiceImpl, ephemeralContainerService cluster.EphemeralContainerService, - argoApplicationService argoApplication.ArgoApplicationService) *TerminalSessionHandlerImpl { + argoApplicationReadService read.ArgoApplicationReadService) *TerminalSessionHandlerImpl { return &TerminalSessionHandlerImpl{ - environmentService: environmentService, - clusterService: clusterService, - logger: logger, - k8sUtil: k8sUtil, - ephemeralContainerService: ephemeralContainerService, - argoApplicationService: argoApplicationService, + environmentService: environmentService, + clusterService: clusterService, + logger: logger, + k8sUtil: k8sUtil, + ephemeralContainerService: ephemeralContainerService, + argoApplicationReadService: argoApplicationReadService, } } @@ -531,7 +531,7 @@ func (impl *TerminalSessionHandlerImpl) getClientSetAndRestConfigForTerminalConn var restConfig *rest.Config var err error if len(req.ExternalArgoApplicationName) > 0 { - restConfig, err = impl.argoApplicationService.GetRestConfigForExternalArgo(context.Background(), req.ClusterId, req.ExternalArgoApplicationName) + restConfig, err = impl.argoApplicationReadService.GetRestConfigForExternalArgo(context.Background(), req.ClusterId, req.ExternalArgoApplicationName) if err != nil { impl.logger.Errorw("error in getting rest config", "err", err, "clusterId", req.ClusterId, "externalArgoApplicationName", req.ExternalArgoApplicationName) return nil, nil, err @@ -652,7 +652,7 @@ func (impl *TerminalSessionHandlerImpl) saveEphemeralContainerTerminalAccessAudi var restConfig *rest.Config var err error if len(req.ExternalArgoApplicationName) > 0 { - restConfig, err = impl.argoApplicationService.GetRestConfigForExternalArgo(context.Background(), req.ClusterId, req.ExternalArgoApplicationName) + restConfig, err = impl.argoApplicationReadService.GetRestConfigForExternalArgo(context.Background(), req.ClusterId, req.ExternalArgoApplicationName) if err != nil { impl.logger.Errorw("error in getting rest config", "err", err, "clusterId", req.ClusterId, "externalArgoApplicationName", req.ExternalArgoApplicationName) return err diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index 86dd35d0451..511f76279ec 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -71,7 +71,7 @@ import ( ) type WorkflowDagExecutor interface { - HandleCiSuccessEvent(triggerContext triggerBean.TriggerContext, ciPipelineId int, request *bean2.CiArtifactWebhookRequest, imagePushedAt *time.Time) (id int, err error) + HandleCiSuccessEvent(triggerContext triggerBean.TriggerContext, ciPipelineId int, request *bean2.CiArtifactWebhookRequest, imagePushedAt time.Time) (id int, err error) HandlePreStageSuccessEvent(triggerContext triggerBean.TriggerContext, cdStageCompleteEvent eventProcessorBean.CdStageCompleteEvent) error HandleDeploymentSuccessEvent(triggerContext triggerBean.TriggerContext, pipelineOverride *chartConfig.PipelineOverride) error HandlePostStageSuccessEvent(triggerContext triggerBean.TriggerContext, cdWorkflowId int, cdPipelineId int, triggeredBy int32, pluginRegistryImageDetails map[string][]string) error @@ -687,7 +687,7 @@ func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(triggerContext return nil } -func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext triggerBean.TriggerContext, ciPipelineId int, request *bean2.CiArtifactWebhookRequest, imagePushedAt *time.Time) (id int, err error) { +func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext triggerBean.TriggerContext, ciPipelineId int, request *bean2.CiArtifactWebhookRequest, imagePushedAt time.Time) (id int, err error) { impl.logger.Infow("webhook for artifact save", "req", request) if request.WorkflowId != nil { savedWorkflow, err := impl.ciWorkflowRepository.FindById(*request.WorkflowId) @@ -730,7 +730,7 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext trigger createdOn := time.Now() updatedOn := time.Now() if !imagePushedAt.IsZero() { - createdOn = *imagePushedAt + createdOn = imagePushedAt } buildArtifact := &repository.CiArtifact{ Image: request.Image, diff --git a/scripts/sql/283_user_group.down.sql b/scripts/sql/283_user_group.down.sql new file mode 100644 index 00000000000..9374d97580b --- /dev/null +++ b/scripts/sql/283_user_group.down.sql @@ -0,0 +1,8 @@ +DROP INDEX IF EXISTS idx_unique_user_group_user_id; +DROP TABLE IF EXISTS "public"."user_group_mapping"; +DROP INDEX IF EXISTS idx_unique_user_group_name; +DROP INDEX IF EXISTS idx_unique_user_group_identifier; +DROP TABLE IF EXISTS "public"."user_group"; +DROP SEQUENCE IF EXISTS "public"."id_seq_user_group_mapping"; +DROP SEQUENCE IF EXISTS "public"."id_seq_user_group"; +ALTER TABLE user_auto_assigned_groups RENAME TO user_groups; \ No newline at end of file diff --git a/scripts/sql/283_user_group.up.sql b/scripts/sql/283_user_group.up.sql new file mode 100644 index 00000000000..dd3e2a7a448 --- /dev/null +++ b/scripts/sql/283_user_group.up.sql @@ -0,0 +1,42 @@ +CREATE SEQUENCE IF NOT EXISTS id_seq_user_group; +CREATE TABLE IF NOT EXISTS public.user_group +( + "id" int NOT NULL DEFAULT nextval('id_seq_user_group'::regclass), + "name" VARCHAR(50) NOT NULL, + "identifier" VARCHAR(50) NOT NULL, + "description" TEXT NOT NULL, + "active" bool NOT NULL, + "created_on" timestamptz NOT NULL, + "created_by" int4 NOT NULL, + "updated_on" timestamptz NOT NULL, + "updated_by" int4 NOT NULL, + PRIMARY KEY ("id") + ); + +CREATE UNIQUE INDEX idx_unique_user_group_name + ON user_group (name) + WHERE active = true; + +CREATE UNIQUE INDEX idx_unique_user_group_identifier + ON user_group (identifier) + WHERE active = true; + +CREATE SEQUENCE IF NOT EXISTS id_seq_user_group_mapping; +CREATE TABLE IF NOT EXISTS public.user_group_mapping +( + "id" int NOT NULL DEFAULT nextval('id_seq_user_group_mapping'::regclass), + "user_id" int NOT NULL, + "user_group_id" int NOT NULL, + "created_on" timestamptz NOT NULL, + "created_by" int4 NOT NULL, + "updated_on" timestamptz NOT NULL, + "updated_by" int4 NOT NULL, + PRIMARY KEY ("id"), + CONSTRAINT "user_group_mapping_user_group_id_fkey" FOREIGN KEY ("user_group_id") REFERENCES "public"."user_group" ("id"), + CONSTRAINT "user_group_mapping_user_id_fkey" FOREIGN KEY ("user_id") REFERENCES "public"."users" ("id") + ); + +CREATE UNIQUE INDEX idx_unique_user_group_user_id + ON user_group_mapping(user_id,user_group_id); + +ALTER TABLE user_groups RENAME TO user_auto_assigned_groups; diff --git a/scripts/sql/284_polling_plugin_v2.down.sql b/scripts/sql/284_polling_plugin_v2.down.sql new file mode 100644 index 00000000000..4ca19c7bbe2 --- /dev/null +++ b/scripts/sql/284_polling_plugin_v2.down.sql @@ -0,0 +1,5 @@ +-- revert the container image path of the polling plugin version 1.0.0 +UPDATE plugin_pipeline_script +SET container_image_path ='quay.io/devtron/poll-container-image:97a996a5-545-16654' +WHERE container_image_path ='quay.io/devtron/devtron-plugins:polling-plugin-v1.0.1' +AND deleted = false; \ No newline at end of file diff --git a/scripts/sql/284_polling_plugin_v2.up.sql b/scripts/sql/284_polling_plugin_v2.up.sql new file mode 100644 index 00000000000..f939005ef5d --- /dev/null +++ b/scripts/sql/284_polling_plugin_v2.up.sql @@ -0,0 +1,30 @@ +-- update the container image path for the polling plugin version 1.0.0 +UPDATE plugin_pipeline_script +SET container_image_path ='quay.io/devtron/devtron-plugins:polling-plugin-v1.0.1' +WHERE container_image_path ='quay.io/devtron/poll-container-image:97a996a5-545-16654' +AND deleted = false; + +-- create plugin_parent_metadata for the polling plugin, if not exists +INSERT INTO "plugin_parent_metadata" ("id", "name", "identifier", "description", "type", "icon", "deleted", "created_on", "created_by", "updated_on", "updated_by") +SELECT nextval('id_seq_plugin_parent_metadata'), 'Pull images from container repository','pull-images-from-container-repository','Polls a container repository and pulls images stored in the repository which can be used for deployment.','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/plugin-poll-container-registry.png','f', 'now()', 1, 'now()', 1 + WHERE NOT EXISTS ( + SELECT 1 + FROM plugin_parent_metadata + WHERE identifier='pull-images-from-container-repository' + AND deleted = false +); + +-- update the plugin_metadata with the plugin_parent_metadata_id +UPDATE plugin_metadata +SET plugin_parent_metadata_id = ( + SELECT id + FROM plugin_parent_metadata + WHERE identifier='pull-images-from-container-repository' + AND deleted = false +) +WHERE name='Pull images from container repository' + AND ( + plugin_parent_metadata_id IS NULL + OR plugin_parent_metadata_id = 0 + ) + AND deleted = false; \ No newline at end of file diff --git a/scripts/sql/285_release_channel.down.sql b/scripts/sql/285_release_channel.down.sql new file mode 100644 index 00000000000..3ec91657618 --- /dev/null +++ b/scripts/sql/285_release_channel.down.sql @@ -0,0 +1,3 @@ +DELETE FROM devtron_resource_schema where devtron_resource_id in (select id from devtron_resource where kind in('release-channel')); + +DELETE FROM devtron_resource where kind in('release-channel'); \ No newline at end of file diff --git a/scripts/sql/285_release_channel.up.sql b/scripts/sql/285_release_channel.up.sql new file mode 100644 index 00000000000..cbcc0bc310b --- /dev/null +++ b/scripts/sql/285_release_channel.up.sql @@ -0,0 +1,206 @@ +INSERT INTO devtron_resource(kind, display_name, icon,is_exposed, parent_kind_id, deleted, created_on, created_by, updated_on, + updated_by) +VALUES ('release-channel', 'Release Channel', '',false, 0, false, now(), 1, now(), 1); + +INSERT INTO devtron_resource_schema(devtron_resource_id, version, schema, sample_schema, latest, created_on, created_by, updated_on, + updated_by) +VALUES ((select id from devtron_resource where kind = 'release-channel'), 'alpha1', + '{ + "type": "object", + "title": "Release Channel Schema", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "required": + [ + "version", + "kind", + "overview" + ], + "properties": + { + "kind": + { + "const": "release-channel" + }, + "version": + { + "enum": + [ + "alpha1" + ], + "type": "string" + }, + "overview": + { + "type": "object", + "properties": + { + "id": + { + "type": "number" + }, + "icon": + { + "type": "string", + "format": "uri" + }, + "name": + { + "type": "string" + }, + "tags": + { + "additionalProperties": + { + "type": "string" + } + }, + "idType": + { + "enum": + [ + "resourceObjectId", + "oldObjectId" + ], + "type": "string" + }, + "default": + { + "type": "boolean" + }, + "metadata": + { + "type": "object", + "properties": + {} + }, + "createdBy": + { + "type": "object", + "refType": "#/references/users" + }, + "createdOn": + { + "type": "string" + }, + "description": + { + "type": "string" + }, + "releaseChannelId": + { + "type": "string" + } + }, + "required": + [ + "id", + "idType", + "releaseChannelId" + ] + }, + "dependencies": + { + "type": "array" + } + } +}','{ + "type": "object", + "title": "Release Channel Schema", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "required": + [ + "version", + "kind", + "overview" + ], + "properties": + { + "kind": + { + "const": "release-channel" + }, + "version": + { + "enum": + [ + "alpha1" + ], + "type": "string" + }, + "overview": + { + "type": "object", + "properties": + { + "id": + { + "type": "number" + }, + "icon": + { + "type": "string", + "format": "uri" + }, + "name": + { + "type": "string" + }, + "tags": + { + "additionalProperties": + { + "type": "string" + } + }, + "idType": + { + "enum": + [ + "resourceObjectId", + "oldObjectId" + ], + "type": "string" + }, + "default": + { + "type": "boolean" + }, + "metadata": + { + "type": "object", + "properties": + {} + }, + "createdBy": + { + "type": "object", + "refType": "#/references/users" + }, + "createdOn": + { + "type": "string" + }, + "description": + { + "type": "string" + }, + "releaseChannelId": + { + "type": "string" + } + }, + "required": + [ + "id", + "idType", + "releaseChannelId" + ] + }, + "dependencies": + { + "type": "array" + } + } +}',true, now(), 1, now(), 1); + + diff --git a/util/helper.go b/util/helper.go index bd158ae7884..d2f632fc318 100644 --- a/util/helper.go +++ b/util/helper.go @@ -21,7 +21,6 @@ import ( "compress/gzip" "encoding/json" "fmt" - "github.com/aws/aws-sdk-go-v2/service/ecr/types" "github.com/devtron-labs/devtron/internal/middleware" "github.com/juju/errors" "io" @@ -31,7 +30,6 @@ import ( "os" "path/filepath" "regexp" - "sort" "strconv" "strings" "time" @@ -342,20 +340,6 @@ func MatchRegexExpression(exp string, text string) (bool, error) { return matched, nil } -func GetLatestImageAccToImagePushedAt(imageDetails []types.ImageDetail) types.ImageDetail { - sort.Slice(imageDetails, func(i, j int) bool { - return imageDetails[i].ImagePushedAt.After(*imageDetails[j].ImagePushedAt) - }) - return imageDetails[0] -} - -func GetReverseSortedImageDetails(imageDetails []types.ImageDetail) []types.ImageDetail { - sort.Slice(imageDetails, func(i, j int) bool { - return imageDetails[i].ImagePushedAt.Before(*imageDetails[j].ImagePushedAt) - }) - return imageDetails -} - func GetRandomStringOfGivenLength(length int) string { const charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go deleted file mode 100644 index d782c4ec9c2..00000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go +++ /dev/null @@ -1,326 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -type EncryptionType string - -// Enum values for EncryptionType -const ( - EncryptionTypeAes256 EncryptionType = "AES256" - EncryptionTypeKms EncryptionType = "KMS" -) - -// Values returns all known values for EncryptionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (EncryptionType) Values() []EncryptionType { - return []EncryptionType{ - "AES256", - "KMS", - } -} - -type FindingSeverity string - -// Enum values for FindingSeverity -const ( - FindingSeverityInformational FindingSeverity = "INFORMATIONAL" - FindingSeverityLow FindingSeverity = "LOW" - FindingSeverityMedium FindingSeverity = "MEDIUM" - FindingSeverityHigh FindingSeverity = "HIGH" - FindingSeverityCritical FindingSeverity = "CRITICAL" - FindingSeverityUndefined FindingSeverity = "UNDEFINED" -) - -// Values returns all known values for FindingSeverity. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (FindingSeverity) Values() []FindingSeverity { - return []FindingSeverity{ - "INFORMATIONAL", - "LOW", - "MEDIUM", - "HIGH", - "CRITICAL", - "UNDEFINED", - } -} - -type ImageActionType string - -// Enum values for ImageActionType -const ( - ImageActionTypeExpire ImageActionType = "EXPIRE" -) - -// Values returns all known values for ImageActionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ImageActionType) Values() []ImageActionType { - return []ImageActionType{ - "EXPIRE", - } -} - -type ImageFailureCode string - -// Enum values for ImageFailureCode -const ( - ImageFailureCodeInvalidImageDigest ImageFailureCode = "InvalidImageDigest" - ImageFailureCodeInvalidImageTag ImageFailureCode = "InvalidImageTag" - ImageFailureCodeImageTagDoesNotMatchDigest ImageFailureCode = "ImageTagDoesNotMatchDigest" - ImageFailureCodeImageNotFound ImageFailureCode = "ImageNotFound" - ImageFailureCodeMissingDigestAndTag ImageFailureCode = "MissingDigestAndTag" - ImageFailureCodeImageReferencedByManifestList ImageFailureCode = "ImageReferencedByManifestList" - ImageFailureCodeKmsError ImageFailureCode = "KmsError" -) - -// Values returns all known values for ImageFailureCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ImageFailureCode) Values() []ImageFailureCode { - return []ImageFailureCode{ - "InvalidImageDigest", - "InvalidImageTag", - "ImageTagDoesNotMatchDigest", - "ImageNotFound", - "MissingDigestAndTag", - "ImageReferencedByManifestList", - "KmsError", - } -} - -type ImageTagMutability string - -// Enum values for ImageTagMutability -const ( - ImageTagMutabilityMutable ImageTagMutability = "MUTABLE" - ImageTagMutabilityImmutable ImageTagMutability = "IMMUTABLE" -) - -// Values returns all known values for ImageTagMutability. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ImageTagMutability) Values() []ImageTagMutability { - return []ImageTagMutability{ - "MUTABLE", - "IMMUTABLE", - } -} - -type LayerAvailability string - -// Enum values for LayerAvailability -const ( - LayerAvailabilityAvailable LayerAvailability = "AVAILABLE" - LayerAvailabilityUnavailable LayerAvailability = "UNAVAILABLE" -) - -// Values returns all known values for LayerAvailability. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (LayerAvailability) Values() []LayerAvailability { - return []LayerAvailability{ - "AVAILABLE", - "UNAVAILABLE", - } -} - -type LayerFailureCode string - -// Enum values for LayerFailureCode -const ( - LayerFailureCodeInvalidLayerDigest LayerFailureCode = "InvalidLayerDigest" - LayerFailureCodeMissingLayerDigest LayerFailureCode = "MissingLayerDigest" -) - -// Values returns all known values for LayerFailureCode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (LayerFailureCode) Values() []LayerFailureCode { - return []LayerFailureCode{ - "InvalidLayerDigest", - "MissingLayerDigest", - } -} - -type LifecyclePolicyPreviewStatus string - -// Enum values for LifecyclePolicyPreviewStatus -const ( - LifecyclePolicyPreviewStatusInProgress LifecyclePolicyPreviewStatus = "IN_PROGRESS" - LifecyclePolicyPreviewStatusComplete LifecyclePolicyPreviewStatus = "COMPLETE" - LifecyclePolicyPreviewStatusExpired LifecyclePolicyPreviewStatus = "EXPIRED" - LifecyclePolicyPreviewStatusFailed LifecyclePolicyPreviewStatus = "FAILED" -) - -// Values returns all known values for LifecyclePolicyPreviewStatus. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. -func (LifecyclePolicyPreviewStatus) Values() []LifecyclePolicyPreviewStatus { - return []LifecyclePolicyPreviewStatus{ - "IN_PROGRESS", - "COMPLETE", - "EXPIRED", - "FAILED", - } -} - -type ReplicationStatus string - -// Enum values for ReplicationStatus -const ( - ReplicationStatusInProgress ReplicationStatus = "IN_PROGRESS" - ReplicationStatusComplete ReplicationStatus = "COMPLETE" - ReplicationStatusFailed ReplicationStatus = "FAILED" -) - -// Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ReplicationStatus) Values() []ReplicationStatus { - return []ReplicationStatus{ - "IN_PROGRESS", - "COMPLETE", - "FAILED", - } -} - -type RepositoryFilterType string - -// Enum values for RepositoryFilterType -const ( - RepositoryFilterTypePrefixMatch RepositoryFilterType = "PREFIX_MATCH" -) - -// Values returns all known values for RepositoryFilterType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (RepositoryFilterType) Values() []RepositoryFilterType { - return []RepositoryFilterType{ - "PREFIX_MATCH", - } -} - -type ScanFrequency string - -// Enum values for ScanFrequency -const ( - ScanFrequencyScanOnPush ScanFrequency = "SCAN_ON_PUSH" - ScanFrequencyContinuousScan ScanFrequency = "CONTINUOUS_SCAN" - ScanFrequencyManual ScanFrequency = "MANUAL" -) - -// Values returns all known values for ScanFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. -func (ScanFrequency) Values() []ScanFrequency { - return []ScanFrequency{ - "SCAN_ON_PUSH", - "CONTINUOUS_SCAN", - "MANUAL", - } -} - -type ScanningConfigurationFailureCode string - -// Enum values for ScanningConfigurationFailureCode -const ( - ScanningConfigurationFailureCodeRepositoryNotFound ScanningConfigurationFailureCode = "REPOSITORY_NOT_FOUND" -) - -// Values returns all known values for ScanningConfigurationFailureCode. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. -func (ScanningConfigurationFailureCode) Values() []ScanningConfigurationFailureCode { - return []ScanningConfigurationFailureCode{ - "REPOSITORY_NOT_FOUND", - } -} - -type ScanningRepositoryFilterType string - -// Enum values for ScanningRepositoryFilterType -const ( - ScanningRepositoryFilterTypeWildcard ScanningRepositoryFilterType = "WILDCARD" -) - -// Values returns all known values for ScanningRepositoryFilterType. Note that -// this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. -func (ScanningRepositoryFilterType) Values() []ScanningRepositoryFilterType { - return []ScanningRepositoryFilterType{ - "WILDCARD", - } -} - -type ScanStatus string - -// Enum values for ScanStatus -const ( - ScanStatusInProgress ScanStatus = "IN_PROGRESS" - ScanStatusComplete ScanStatus = "COMPLETE" - ScanStatusFailed ScanStatus = "FAILED" - ScanStatusUnsupportedImage ScanStatus = "UNSUPPORTED_IMAGE" - ScanStatusActive ScanStatus = "ACTIVE" - ScanStatusPending ScanStatus = "PENDING" - ScanStatusScanEligibilityExpired ScanStatus = "SCAN_ELIGIBILITY_EXPIRED" - ScanStatusFindingsUnavailable ScanStatus = "FINDINGS_UNAVAILABLE" -) - -// Values returns all known values for ScanStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. -func (ScanStatus) Values() []ScanStatus { - return []ScanStatus{ - "IN_PROGRESS", - "COMPLETE", - "FAILED", - "UNSUPPORTED_IMAGE", - "ACTIVE", - "PENDING", - "SCAN_ELIGIBILITY_EXPIRED", - "FINDINGS_UNAVAILABLE", - } -} - -type ScanType string - -// Enum values for ScanType -const ( - ScanTypeBasic ScanType = "BASIC" - ScanTypeEnhanced ScanType = "ENHANCED" -) - -// Values returns all known values for ScanType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. -func (ScanType) Values() []ScanType { - return []ScanType{ - "BASIC", - "ENHANCED", - } -} - -type TagStatus string - -// Enum values for TagStatus -const ( - TagStatusTagged TagStatus = "TAGGED" - TagStatusUntagged TagStatus = "UNTAGGED" - TagStatusAny TagStatus = "ANY" -) - -// Values returns all known values for TagStatus. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. -func (TagStatus) Values() []TagStatus { - return []TagStatus{ - "TAGGED", - "UNTAGGED", - "ANY", - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go deleted file mode 100644 index 4b4782c5a5d..00000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/errors.go +++ /dev/null @@ -1,905 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - "fmt" - smithy "github.com/aws/smithy-go" -) - -// The specified layer upload does not contain any layer parts. -type EmptyUploadException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *EmptyUploadException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *EmptyUploadException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *EmptyUploadException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "EmptyUploadException" - } - return *e.ErrorCodeOverride -} -func (e *EmptyUploadException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image has already been pushed, and there were no changes to the -// manifest or image tag after the last push. -type ImageAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *ImageAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image digest does not match the digest that Amazon ECR calculated -// for the image. -type ImageDigestDoesNotMatchException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageDigestDoesNotMatchException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageDigestDoesNotMatchException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageDigestDoesNotMatchException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageDigestDoesNotMatchException" - } - return *e.ErrorCodeOverride -} -func (e *ImageDigestDoesNotMatchException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image requested does not exist in the specified repository. -type ImageNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ImageNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image is tagged with a tag that already exists. The repository is -// configured for tag immutability. -type ImageTagAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ImageTagAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ImageTagAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ImageTagAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ImageTagAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *ImageTagAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The layer digest calculation performed by Amazon ECR upon receipt of the image -// layer does not match the digest specified. -type InvalidLayerException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidLayerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidLayerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidLayerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidLayerException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidLayerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The layer part size is not valid, or the first byte specified is not -// consecutive to the last byte of a previous layer part upload. -type InvalidLayerPartException struct { - Message *string - - ErrorCodeOverride *string - - RegistryId *string - RepositoryName *string - UploadId *string - LastValidByteReceived *int64 - - noSmithyDocumentSerde -} - -func (e *InvalidLayerPartException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidLayerPartException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidLayerPartException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidLayerPartException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidLayerPartException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified parameter is invalid. Review the available parameters for the API -// request. -type InvalidParameterException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidParameterException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidParameterException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidParameterException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidParameterException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// An invalid parameter has been specified. Tag keys can have a maximum character -// length of 128 characters, and tag values can have a maximum length of 256 -// characters. -type InvalidTagParameterException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *InvalidTagParameterException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *InvalidTagParameterException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *InvalidTagParameterException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "InvalidTagParameterException" - } - return *e.ErrorCodeOverride -} -func (e *InvalidTagParameterException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The operation failed due to a KMS exception. -type KmsException struct { - Message *string - - ErrorCodeOverride *string - - KmsError *string - - noSmithyDocumentSerde -} - -func (e *KmsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *KmsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *KmsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "KmsException" - } - return *e.ErrorCodeOverride -} -func (e *KmsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image layer already exists in the associated repository. -type LayerAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *LayerAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified layer is not available because it is not associated with an -// image. Unassociated image layers may be cleaned up at any time. -type LayerInaccessibleException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerInaccessibleException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerInaccessibleException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerInaccessibleException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerInaccessibleException" - } - return *e.ErrorCodeOverride -} -func (e *LayerInaccessibleException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// Layer parts must be at least 5 MiB in size. -type LayerPartTooSmallException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayerPartTooSmallException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayerPartTooSmallException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayerPartTooSmallException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayerPartTooSmallException" - } - return *e.ErrorCodeOverride -} -func (e *LayerPartTooSmallException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified layers could not be found, or the specified layer is not valid -// for this repository. -type LayersNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LayersNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LayersNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LayersNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LayersNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *LayersNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The lifecycle policy could not be found, and no policy is set to the repository. -type LifecyclePolicyNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LifecyclePolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LifecyclePolicyNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LifecyclePolicyNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LifecyclePolicyNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *LifecyclePolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The previous lifecycle policy preview request has not completed. Wait and try -// again. -type LifecyclePolicyPreviewInProgressException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LifecyclePolicyPreviewInProgressException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LifecyclePolicyPreviewInProgressException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LifecyclePolicyPreviewInProgressException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LifecyclePolicyPreviewInProgressException" - } - return *e.ErrorCodeOverride -} -func (e *LifecyclePolicyPreviewInProgressException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// There is no dry run for this repository. -type LifecyclePolicyPreviewNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LifecyclePolicyPreviewNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LifecyclePolicyPreviewNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LifecyclePolicyPreviewNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LifecyclePolicyPreviewNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *LifecyclePolicyPreviewNotFoundException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR service quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) -// in the Amazon Elastic Container Registry User Guide. -type LimitExceededException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *LimitExceededException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *LimitExceededException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "LimitExceededException" - } - return *e.ErrorCodeOverride -} -func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// A pull through cache rule with these settings already exists for the private -// registry. -type PullThroughCacheRuleAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *PullThroughCacheRuleAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *PullThroughCacheRuleAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *PullThroughCacheRuleAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "PullThroughCacheRuleAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *PullThroughCacheRuleAlreadyExistsException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The pull through cache rule was not found. Specify a valid pull through cache -// rule and try again. -type PullThroughCacheRuleNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *PullThroughCacheRuleNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *PullThroughCacheRuleNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *PullThroughCacheRuleNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "PullThroughCacheRuleNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *PullThroughCacheRuleNotFoundException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The manifest list is referencing an image that does not exist. -type ReferencedImagesNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ReferencedImagesNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ReferencedImagesNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ReferencedImagesNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ReferencedImagesNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ReferencedImagesNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The registry doesn't have an associated registry policy. -type RegistryPolicyNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RegistryPolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RegistryPolicyNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RegistryPolicyNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RegistryPolicyNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RegistryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository already exists in the specified registry. -type RepositoryAlreadyExistsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryAlreadyExistsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryAlreadyExistsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryAlreadyExistsException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository contains images. To delete a repository that contains -// images, you must force the deletion with the force parameter. -type RepositoryNotEmptyException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryNotEmptyException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryNotEmptyException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryNotEmptyException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryNotEmptyException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryNotEmptyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository could not be found. Check the spelling of the -// specified repository and ensure that you are performing operations on the -// correct registry. -type RepositoryNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified repository and registry combination does not have an associated -// repository policy. -type RepositoryPolicyNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *RepositoryPolicyNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *RepositoryPolicyNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *RepositoryPolicyNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "RepositoryPolicyNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *RepositoryPolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified image scan could not be found. Ensure that image scanning is -// enabled on the repository and try again. -type ScanNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ScanNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ScanNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ScanNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ScanNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *ScanNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// These errors are usually caused by a server-side issue. -type ServerException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ServerException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ServerException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ServerException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ServerException" - } - return *e.ErrorCodeOverride -} -func (e *ServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } - -// The list of tags on the repository is over the limit. The maximum number of -// tags that can be applied to a repository is 50. -type TooManyTagsException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *TooManyTagsException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *TooManyTagsException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *TooManyTagsException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "TooManyTagsException" - } - return *e.ErrorCodeOverride -} -func (e *TooManyTagsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The image is of a type that cannot be scanned. -type UnsupportedImageTypeException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedImageTypeException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedImageTypeException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedImageTypeException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedImageTypeException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedImageTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// The specified upstream registry isn't supported. -type UnsupportedUpstreamRegistryException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UnsupportedUpstreamRegistryException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UnsupportedUpstreamRegistryException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UnsupportedUpstreamRegistryException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UnsupportedUpstreamRegistryException" - } - return *e.ErrorCodeOverride -} -func (e *UnsupportedUpstreamRegistryException) ErrorFault() smithy.ErrorFault { - return smithy.FaultClient -} - -// The upload could not be found, or the specified upload ID is not valid for this -// repository. -type UploadNotFoundException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *UploadNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *UploadNotFoundException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *UploadNotFoundException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "UploadNotFoundException" - } - return *e.ErrorCodeOverride -} -func (e *UploadNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } - -// There was an exception validating this request. -type ValidationException struct { - Message *string - - ErrorCodeOverride *string - - noSmithyDocumentSerde -} - -func (e *ValidationException) Error() string { - return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) -} -func (e *ValidationException) ErrorMessage() string { - if e.Message == nil { - return "" - } - return *e.Message -} -func (e *ValidationException) ErrorCode() string { - if e == nil || e.ErrorCodeOverride == nil { - return "ValidationException" - } - return *e.ErrorCodeOverride -} -func (e *ValidationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go deleted file mode 100644 index 1dbaf772597..00000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/types.go +++ /dev/null @@ -1,882 +0,0 @@ -// Code generated by smithy-go-codegen DO NOT EDIT. - -package types - -import ( - smithydocument "github.com/aws/smithy-go/document" - "time" -) - -// This data type is used in the ImageScanFinding data type. -type Attribute struct { - - // The attribute key. - // - // This member is required. - Key *string - - // The value assigned to the attribute key. - Value *string - - noSmithyDocumentSerde -} - -// An object representing authorization data for an Amazon ECR registry. -type AuthorizationData struct { - - // A base64-encoded string that contains authorization data for the specified - // Amazon ECR registry. When the string is decoded, it is presented in the format - // user:password for private registry authentication using docker login . - AuthorizationToken *string - - // The Unix time in seconds and milliseconds when the authorization token expires. - // Authorization tokens are valid for 12 hours. - ExpiresAt *time.Time - - // The registry URL to use for this authorization token in a docker login command. - // The Amazon ECR registry URL format is - // https://aws_account_id.dkr.ecr.region.amazonaws.com . For example, - // https://012345678910.dkr.ecr.us-east-1.amazonaws.com .. - ProxyEndpoint *string - - noSmithyDocumentSerde -} - -// The image details of the Amazon ECR container image. -type AwsEcrContainerImageDetails struct { - - // The architecture of the Amazon ECR container image. - Architecture *string - - // The image author of the Amazon ECR container image. - Author *string - - // The image hash of the Amazon ECR container image. - ImageHash *string - - // The image tags attached to the Amazon ECR container image. - ImageTags []string - - // The platform of the Amazon ECR container image. - Platform *string - - // The date and time the Amazon ECR container image was pushed. - PushedAt *time.Time - - // The registry the Amazon ECR container image belongs to. - Registry *string - - // The name of the repository the Amazon ECR container image resides in. - RepositoryName *string - - noSmithyDocumentSerde -} - -// The CVSS score for a finding. -type CvssScore struct { - - // The base CVSS score used for the finding. - BaseScore float64 - - // The vector string of the CVSS score. - ScoringVector *string - - // The source of the CVSS score. - Source *string - - // The version of CVSS used for the score. - Version *string - - noSmithyDocumentSerde -} - -// Details on adjustments Amazon Inspector made to the CVSS score for a finding. -type CvssScoreAdjustment struct { - - // The metric used to adjust the CVSS score. - Metric *string - - // The reason the CVSS score has been adjustment. - Reason *string - - noSmithyDocumentSerde -} - -// Information about the CVSS score. -type CvssScoreDetails struct { - - // An object that contains details about adjustment Amazon Inspector made to the - // CVSS score. - Adjustments []CvssScoreAdjustment - - // The CVSS score. - Score float64 - - // The source for the CVSS score. - ScoreSource *string - - // The vector for the CVSS score. - ScoringVector *string - - // The CVSS version used in scoring. - Version *string - - noSmithyDocumentSerde -} - -// An object representing a filter on a DescribeImages operation. -type DescribeImagesFilter struct { - - // The tag status with which to filter your DescribeImages results. You can filter - // results based on whether they are TAGGED or UNTAGGED . - TagStatus TagStatus - - noSmithyDocumentSerde -} - -// The encryption configuration for the repository. This determines how the -// contents of your repository are encrypted at rest. By default, when no -// encryption configuration is set or the AES256 encryption type is used, Amazon -// ECR uses server-side encryption with Amazon S3-managed encryption keys which -// encrypts your data at rest using an AES-256 encryption algorithm. This does not -// require any action on your part. For more control over the encryption of the -// contents of your repository, you can use server-side encryption with Key -// Management Service key stored in Key Management Service (KMS) to encrypt your -// images. For more information, see Amazon ECR encryption at rest (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) -// in the Amazon Elastic Container Registry User Guide. -type EncryptionConfiguration struct { - - // The encryption type to use. If you use the KMS encryption type, the contents of - // the repository will be encrypted using server-side encryption with Key - // Management Service key stored in KMS. When you use KMS to encrypt your data, you - // can either use the default Amazon Web Services managed KMS key for Amazon ECR, - // or specify your own KMS key, which you already created. For more information, - // see Protecting data using server-side encryption with an KMS key stored in Key - // Management Service (SSE-KMS) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) - // in the Amazon Simple Storage Service Console Developer Guide. If you use the - // AES256 encryption type, Amazon ECR uses server-side encryption with Amazon - // S3-managed encryption keys which encrypts the images in the repository using an - // AES-256 encryption algorithm. For more information, see Protecting data using - // server-side encryption with Amazon S3-managed encryption keys (SSE-S3) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) - // in the Amazon Simple Storage Service Console Developer Guide. - // - // This member is required. - EncryptionType EncryptionType - - // If you use the KMS encryption type, specify the KMS key to use for encryption. - // The alias, key ID, or full ARN of the KMS key can be specified. The key must - // exist in the same Region as the repository. If no key is specified, the default - // Amazon Web Services managed KMS key for Amazon ECR will be used. - KmsKey *string - - noSmithyDocumentSerde -} - -// The details of an enhanced image scan. This is returned when enhanced scanning -// is enabled for your private registry. -type EnhancedImageScanFinding struct { - - // The Amazon Web Services account ID associated with the image. - AwsAccountId *string - - // The description of the finding. - Description *string - - // The Amazon Resource Number (ARN) of the finding. - FindingArn *string - - // The date and time that the finding was first observed. - FirstObservedAt *time.Time - - // The date and time that the finding was last observed. - LastObservedAt *time.Time - - // An object that contains the details of a package vulnerability finding. - PackageVulnerabilityDetails *PackageVulnerabilityDetails - - // An object that contains the details about how to remediate a finding. - Remediation *Remediation - - // Contains information on the resources involved in a finding. - Resources []Resource - - // The Amazon Inspector score given to the finding. - Score float64 - - // An object that contains details of the Amazon Inspector score. - ScoreDetails *ScoreDetails - - // The severity of the finding. - Severity *string - - // The status of the finding. - Status *string - - // The title of the finding. - Title *string - - // The type of the finding. - Type *string - - // The date and time the finding was last updated at. - UpdatedAt *time.Time - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image. -type Image struct { - - // An object containing the image tag and image digest associated with an image. - ImageId *ImageIdentifier - - // The image manifest associated with the image. - ImageManifest *string - - // The manifest media type of the image. - ImageManifestMediaType *string - - // The Amazon Web Services account ID associated with the registry containing the - // image. - RegistryId *string - - // The name of the repository associated with the image. - RepositoryName *string - - noSmithyDocumentSerde -} - -// An object that describes an image returned by a DescribeImages operation. -type ImageDetail struct { - - // The artifact media type of the image. - ArtifactMediaType *string - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The media type of the image manifest. - ImageManifestMediaType *string - - // The date and time, expressed in standard JavaScript date format, at which the - // current image was pushed to the repository. - ImagePushedAt *time.Time - - // A summary of the last completed image scan. - ImageScanFindingsSummary *ImageScanFindingsSummary - - // The current state of the scan. - ImageScanStatus *ImageScanStatus - - // The size, in bytes, of the image in the repository. If the image is a manifest - // list, this will be the max size of all manifests in the list. Beginning with - // Docker version 1.9, the Docker client compresses image layers before pushing - // them to a V2 Docker registry. The output of the docker images command shows the - // uncompressed image size, so it may return a larger image size than the image - // sizes returned by DescribeImages . - ImageSizeInBytes *int64 - - // The list of tags associated with this image. - ImageTags []string - - // The date and time, expressed in standard JavaScript date format, when Amazon - // ECR recorded the last image pull. Amazon ECR refreshes the last image pull - // timestamp at least once every 24 hours. For example, if you pull an image once a - // day then the lastRecordedPullTime timestamp will indicate the exact time that - // the image was last pulled. However, if you pull an image once an hour, because - // Amazon ECR refreshes the lastRecordedPullTime timestamp at least once every 24 - // hours, the result may not be the exact time that the image was last pulled. - LastRecordedPullTime *time.Time - - // The Amazon Web Services account ID associated with the registry to which this - // image belongs. - RegistryId *string - - // The name of the repository to which this image belongs. - RepositoryName *string - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image failure. -type ImageFailure struct { - - // The code associated with the failure. - FailureCode ImageFailureCode - - // The reason for the failure. - FailureReason *string - - // The image ID associated with the failure. - ImageId *ImageIdentifier - - noSmithyDocumentSerde -} - -// An object with identifying information for an image in an Amazon ECR repository. -type ImageIdentifier struct { - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The tag used for the image. - ImageTag *string - - noSmithyDocumentSerde -} - -// The status of the replication process for an image. -type ImageReplicationStatus struct { - - // The failure code for a replication that has failed. - FailureCode *string - - // The destination Region for the image replication. - Region *string - - // The Amazon Web Services account ID associated with the registry to which the - // image belongs. - RegistryId *string - - // The image replication status. - Status ReplicationStatus - - noSmithyDocumentSerde -} - -// Contains information about an image scan finding. -type ImageScanFinding struct { - - // A collection of attributes of the host from which the finding is generated. - Attributes []Attribute - - // The description of the finding. - Description *string - - // The name associated with the finding, usually a CVE number. - Name *string - - // The finding severity. - Severity FindingSeverity - - // A link containing additional details about the security vulnerability. - Uri *string - - noSmithyDocumentSerde -} - -// The details of an image scan. -type ImageScanFindings struct { - - // Details about the enhanced scan findings from Amazon Inspector. - EnhancedFindings []EnhancedImageScanFinding - - // The image vulnerability counts, sorted by severity. - FindingSeverityCounts map[string]int32 - - // The findings from the image scan. - Findings []ImageScanFinding - - // The time of the last completed image scan. - ImageScanCompletedAt *time.Time - - // The time when the vulnerability data was last scanned. - VulnerabilitySourceUpdatedAt *time.Time - - noSmithyDocumentSerde -} - -// A summary of the last completed image scan. -type ImageScanFindingsSummary struct { - - // The image vulnerability counts, sorted by severity. - FindingSeverityCounts map[string]int32 - - // The time of the last completed image scan. - ImageScanCompletedAt *time.Time - - // The time when the vulnerability data was last scanned. - VulnerabilitySourceUpdatedAt *time.Time - - noSmithyDocumentSerde -} - -// The image scanning configuration for a repository. -type ImageScanningConfiguration struct { - - // The setting that determines whether images are scanned after being pushed to a - // repository. If set to true , images will be scanned after being pushed. If this - // parameter is not specified, it will default to false and images will not be - // scanned unless a scan is manually started with the API_StartImageScan (https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_StartImageScan.html) - // API. - ScanOnPush bool - - noSmithyDocumentSerde -} - -// The current status of an image scan. -type ImageScanStatus struct { - - // The description of the image scan status. - Description *string - - // The current state of an image scan. - Status ScanStatus - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image layer. -type Layer struct { - - // The availability status of the image layer. - LayerAvailability LayerAvailability - - // The sha256 digest of the image layer. - LayerDigest *string - - // The size, in bytes, of the image layer. - LayerSize *int64 - - // The media type of the layer, such as - // application/vnd.docker.image.rootfs.diff.tar.gzip or - // application/vnd.oci.image.layer.v1.tar+gzip . - MediaType *string - - noSmithyDocumentSerde -} - -// An object representing an Amazon ECR image layer failure. -type LayerFailure struct { - - // The failure code associated with the failure. - FailureCode LayerFailureCode - - // The reason for the failure. - FailureReason *string - - // The layer digest associated with the failure. - LayerDigest *string - - noSmithyDocumentSerde -} - -// The filter for the lifecycle policy preview. -type LifecyclePolicyPreviewFilter struct { - - // The tag status of the image. - TagStatus TagStatus - - noSmithyDocumentSerde -} - -// The result of the lifecycle policy preview. -type LifecyclePolicyPreviewResult struct { - - // The type of action to be taken. - Action *LifecyclePolicyRuleAction - - // The priority of the applied rule. - AppliedRulePriority *int32 - - // The sha256 digest of the image manifest. - ImageDigest *string - - // The date and time, expressed in standard JavaScript date format, at which the - // current image was pushed to the repository. - ImagePushedAt *time.Time - - // The list of tags associated with this image. - ImageTags []string - - noSmithyDocumentSerde -} - -// The summary of the lifecycle policy preview request. -type LifecyclePolicyPreviewSummary struct { - - // The number of expiring images. - ExpiringImageTotalCount *int32 - - noSmithyDocumentSerde -} - -// The type of action to be taken. -type LifecyclePolicyRuleAction struct { - - // The type of action to be taken. - Type ImageActionType - - noSmithyDocumentSerde -} - -// An object representing a filter on a ListImages operation. -type ListImagesFilter struct { - - // The tag status with which to filter your ListImages results. You can filter - // results based on whether they are TAGGED or UNTAGGED . - TagStatus TagStatus - - noSmithyDocumentSerde -} - -// Information about a package vulnerability finding. -type PackageVulnerabilityDetails struct { - - // An object that contains details about the CVSS score of a finding. - Cvss []CvssScore - - // One or more URLs that contain details about this vulnerability type. - ReferenceUrls []string - - // One or more vulnerabilities related to the one identified in this finding. - RelatedVulnerabilities []string - - // The source of the vulnerability information. - Source *string - - // A URL to the source of the vulnerability information. - SourceUrl *string - - // The date and time that this vulnerability was first added to the vendor's - // database. - VendorCreatedAt *time.Time - - // The severity the vendor has given to this vulnerability type. - VendorSeverity *string - - // The date and time the vendor last updated this vulnerability in their database. - VendorUpdatedAt *time.Time - - // The ID given to this vulnerability. - VulnerabilityId *string - - // The packages impacted by this vulnerability. - VulnerablePackages []VulnerablePackage - - noSmithyDocumentSerde -} - -// The details of a pull through cache rule. -type PullThroughCacheRule struct { - - // The date and time the pull through cache was created. - CreatedAt *time.Time - - // The Amazon ECR repository prefix associated with the pull through cache rule. - EcrRepositoryPrefix *string - - // The Amazon Web Services account ID associated with the registry the pull - // through cache rule is associated with. - RegistryId *string - - // The upstream registry URL associated with the pull through cache rule. - UpstreamRegistryUrl *string - - noSmithyDocumentSerde -} - -// Details about the recommended course of action to remediate the finding. -type Recommendation struct { - - // The recommended course of action to remediate the finding. - Text *string - - // The URL address to the CVE remediation recommendations. - Url *string - - noSmithyDocumentSerde -} - -// The scanning configuration for a private registry. -type RegistryScanningConfiguration struct { - - // The scanning rules associated with the registry. - Rules []RegistryScanningRule - - // The type of scanning configured for the registry. - ScanType ScanType - - noSmithyDocumentSerde -} - -// The details of a scanning rule for a private registry. -type RegistryScanningRule struct { - - // The repository filters associated with the scanning configuration for a private - // registry. - // - // This member is required. - RepositoryFilters []ScanningRepositoryFilter - - // The frequency that scans are performed at for a private registry. When the - // ENHANCED scan type is specified, the supported scan frequencies are - // CONTINUOUS_SCAN and SCAN_ON_PUSH . When the BASIC scan type is specified, the - // SCAN_ON_PUSH scan frequency is supported. If scan on push is not specified, then - // the MANUAL scan frequency is set by default. - // - // This member is required. - ScanFrequency ScanFrequency - - noSmithyDocumentSerde -} - -// Information on how to remediate a finding. -type Remediation struct { - - // An object that contains information about the recommended course of action to - // remediate the finding. - Recommendation *Recommendation - - noSmithyDocumentSerde -} - -// The replication configuration for a registry. -type ReplicationConfiguration struct { - - // An array of objects representing the replication destinations and repository - // filters for a replication configuration. - // - // This member is required. - Rules []ReplicationRule - - noSmithyDocumentSerde -} - -// An array of objects representing the destination for a replication rule. -type ReplicationDestination struct { - - // The Region to replicate to. - // - // This member is required. - Region *string - - // The Amazon Web Services account ID of the Amazon ECR private registry to - // replicate to. When configuring cross-Region replication within your own - // registry, specify your own account ID. - // - // This member is required. - RegistryId *string - - noSmithyDocumentSerde -} - -// An array of objects representing the replication destinations and repository -// filters for a replication configuration. -type ReplicationRule struct { - - // An array of objects representing the destination for a replication rule. - // - // This member is required. - Destinations []ReplicationDestination - - // An array of objects representing the filters for a replication rule. Specifying - // a repository filter for a replication rule provides a method for controlling - // which repositories in a private registry are replicated. - RepositoryFilters []RepositoryFilter - - noSmithyDocumentSerde -} - -// An object representing a repository. -type Repository struct { - - // The date and time, in JavaScript date format, when the repository was created. - CreatedAt *time.Time - - // The encryption configuration for the repository. This determines how the - // contents of your repository are encrypted at rest. - EncryptionConfiguration *EncryptionConfiguration - - // The image scanning configuration for a repository. - ImageScanningConfiguration *ImageScanningConfiguration - - // The tag mutability setting for the repository. - ImageTagMutability ImageTagMutability - - // The Amazon Web Services account ID associated with the registry that contains - // the repository. - RegistryId *string - - // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains - // the arn:aws:ecr namespace, followed by the region of the repository, Amazon Web - // Services account ID of the repository owner, repository namespace, and - // repository name. For example, - // arn:aws:ecr:region:012345678910:repository-namespace/repository-name . - RepositoryArn *string - - // The name of the repository. - RepositoryName *string - - // The URI for the repository. You can use this URI for container image push and - // pull operations. - RepositoryUri *string - - noSmithyDocumentSerde -} - -// The filter settings used with image replication. Specifying a repository filter -// to a replication rule provides a method for controlling which repositories in a -// private registry are replicated. If no filters are added, the contents of all -// repositories are replicated. -type RepositoryFilter struct { - - // The repository filter details. When the PREFIX_MATCH filter type is specified, - // this value is required and should be the repository name prefix to configure - // replication for. - // - // This member is required. - Filter *string - - // The repository filter type. The only supported value is PREFIX_MATCH , which is - // a repository name prefix specified with the filter parameter. - // - // This member is required. - FilterType RepositoryFilterType - - noSmithyDocumentSerde -} - -// The details of the scanning configuration for a repository. -type RepositoryScanningConfiguration struct { - - // The scan filters applied to the repository. - AppliedScanFilters []ScanningRepositoryFilter - - // The ARN of the repository. - RepositoryArn *string - - // The name of the repository. - RepositoryName *string - - // The scan frequency for the repository. - ScanFrequency ScanFrequency - - // Whether or not scan on push is configured for the repository. - ScanOnPush bool - - noSmithyDocumentSerde -} - -// The details about any failures associated with the scanning configuration of a -// repository. -type RepositoryScanningConfigurationFailure struct { - - // The failure code. - FailureCode ScanningConfigurationFailureCode - - // The reason for the failure. - FailureReason *string - - // The name of the repository. - RepositoryName *string - - noSmithyDocumentSerde -} - -// Details about the resource involved in a finding. -type Resource struct { - - // An object that contains details about the resource involved in a finding. - Details *ResourceDetails - - // The ID of the resource. - Id *string - - // The tags attached to the resource. - Tags map[string]string - - // The type of resource. - Type *string - - noSmithyDocumentSerde -} - -// Contains details about the resource involved in the finding. -type ResourceDetails struct { - - // An object that contains details about the Amazon ECR container image involved - // in the finding. - AwsEcrContainerImage *AwsEcrContainerImageDetails - - noSmithyDocumentSerde -} - -// The details of a scanning repository filter. For more information on how to use -// filters, see Using filters (https://docs.aws.amazon.com/AmazonECR/latest/userguide/image-scanning.html#image-scanning-filters) -// in the Amazon Elastic Container Registry User Guide. -type ScanningRepositoryFilter struct { - - // The filter to use when scanning. - // - // This member is required. - Filter *string - - // The type associated with the filter. - // - // This member is required. - FilterType ScanningRepositoryFilterType - - noSmithyDocumentSerde -} - -// Information about the Amazon Inspector score given to a finding. -type ScoreDetails struct { - - // An object that contains details about the CVSS score given to a finding. - Cvss *CvssScoreDetails - - noSmithyDocumentSerde -} - -// The metadata to apply to a resource to help you categorize and organize them. -// Each tag consists of a key and a value, both of which you define. Tag keys can -// have a maximum character length of 128 characters, and tag values can have a -// maximum length of 256 characters. -type Tag struct { - - // One part of a key-value pair that make up a tag. A key is a general label that - // acts like a category for more specific tag values. - // - // This member is required. - Key *string - - // A value acts as a descriptor within a tag category (key). - // - // This member is required. - Value *string - - noSmithyDocumentSerde -} - -// Information on the vulnerable package identified by a finding. -type VulnerablePackage struct { - - // The architecture of the vulnerable package. - Arch *string - - // The epoch of the vulnerable package. - Epoch *int32 - - // The file path of the vulnerable package. - FilePath *string - - // The name of the vulnerable package. - Name *string - - // The package manager of the vulnerable package. - PackageManager *string - - // The release of the vulnerable package. - Release *string - - // The source layer hash of the vulnerable package. - SourceLayerHash *string - - // The version of the vulnerable package. - Version *string - - noSmithyDocumentSerde -} - -type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore deleted file mode 100644 index c92d6105eb3..00000000000 --- a/vendor/github.com/aws/smithy-go/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Eclipse -.classpath -.project -.settings/ - -# Intellij -.idea/ -*.iml -*.iws - -# Mac -.DS_Store - -# Maven -target/ -**/dependency-reduced-pom.xml - -# Gradle -/.gradle -build/ -*/out/ -*/*/out/ - -# VS Code -bin/ -.vscode/ diff --git a/vendor/github.com/aws/smithy-go/.travis.yml b/vendor/github.com/aws/smithy-go/.travis.yml deleted file mode 100644 index f8d1035cc33..00000000000 --- a/vendor/github.com/aws/smithy-go/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go -sudo: true -dist: bionic - -branches: - only: - - main - -os: - - linux - - osx - # Travis doesn't work with windows and Go tip - #- windows - -go: - - tip - -matrix: - allow_failures: - - go: tip - -before_install: - - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi - - (cd /tmp/; go get golang.org/x/lint/golint) - -script: - - make go test -v ./...; - diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md deleted file mode 100644 index b9171b88b90..00000000000 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ /dev/null @@ -1,182 +0,0 @@ -# Release (2023-08-18) - -* No change notes available for this release. - -# Release (2023-08-07) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.14.1 - * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation. - -# Release (2023-07-31) - -## General Highlights -* **Feature**: Adds support for smithy-modeled endpoint resolution. - -# Release (2022-12-02) - -* No change notes available for this release. - -# Release (2022-10-24) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.4 - * **Bug Fix**: fixed document type checking for encoding nested types - -# Release (2022-09-14) - -* No change notes available for this release. - -# Release (v1.13.2) - -* No change notes available for this release. - -# Release (v1.13.1) - -* No change notes available for this release. - -# Release (v1.13.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.13.0 - * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. - -# Release (v1.12.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.1 - * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. - -# Release (v1.12.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.12.0 - * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. - -# Release (v1.11.3) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.3 - * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. - -# Release (v1.11.2) - -* No change notes available for this release. - -# Release (v1.11.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.1 - * **Bug Fix**: Updates the smithy-go HTTP Request to correctly handle building the request to an http.Request. Related to [aws/aws-sdk-go-v2#1583](https://github.com/aws/aws-sdk-go-v2/issues/1583) - -# Release (v1.11.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.11.0 - * **Feature**: Updates deserialization of header list to supported quoted strings - -# Release (v1.10.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.10.0 - * **Feature**: Add `ptr.Duration`, `ptr.ToDuration`, `ptr.DurationSlice`, `ptr.ToDurationSlice`, `ptr.DurationMap`, and `ptr.ToDurationMap` functions for the `time.Duration` type. - -# Release (v1.9.1) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.1 - * **Documentation**: Fixes various typos in Go package documentation. - -# Release (v1.9.0) - -## Module Highlights -* `github.com/aws/smithy-go`: v1.9.0 - * **Feature**: sync: OnceErr, can be used to concurrently record a signal when an error has occurred. - * **Bug Fix**: `transport/http`: CloseResponseBody and ErrorCloseResponseBody middleware have been updated to ensure that the body is fully drained before closing. - -# Release v1.8.1 - -### Smithy Go Module -* **Bug Fix**: Fixed an issue that would cause the HTTP Content-Length to be set to 0 if the stream body was not set. - * Fixes [aws/aws-sdk-go-v2#1418](https://github.com/aws/aws-sdk-go-v2/issues/1418) - -# Release v1.8.0 - -### Smithy Go Module - -* `time`: Add support for parsing additional DateTime timestamp format ([#324](https://github.com/aws/smithy-go/pull/324)) - * Adds support for parsing DateTime timestamp formatted time similar to RFC 3339, but without the `Z` character, nor UTC offset. - * Fixes [#1387](https://github.com/aws/aws-sdk-go-v2/issues/1387) - -# Release v1.7.0 - -### Smithy Go Module -* `ptr`: Handle error for deferred file close call ([#314](https://github.com/aws/smithy-go/pull/314)) - * Handle error for defer close call -* `middleware`: Add Clone to Metadata ([#318](https://github.com/aws/smithy-go/pull/318)) - * Adds a new Clone method to the middleware Metadata type. This provides a shallow clone of the entries in the Metadata. -* `document`: Add new package for document shape serialization support ([#310](https://github.com/aws/smithy-go/pull/310)) - -### Codegen -* Add Smithy Document Shape Support ([#310](https://github.com/aws/smithy-go/pull/310)) - * Adds support for Smithy Document shapes and supporting types for protocols to implement support - -# Release v1.6.0 (2021-07-15) - -### Smithy Go Module -* `encoding/httpbinding`: Support has been added for encoding `float32` and `float64` values that are `NaN`, `Infinity`, or `-Infinity`. ([#316](https://github.com/aws/smithy-go/pull/316)) - -### Codegen -* Adds support for handling `float32` and `float64` `NaN` values in HTTP Protocol Unit Tests. ([#316](https://github.com/aws/smithy-go/pull/316)) -* Adds support protocol generator implementations to override the error code string returned by `ErrorCode` methods on generated error types. ([#315](https://github.com/aws/smithy-go/pull/315)) - -# Release v1.5.0 (2021-06-25) - -### Smithy Go module -* `time`: Update time parsing to not be as strict for HTTPDate and DateTime ([#307](https://github.com/aws/smithy-go/pull/307)) - * Fixes [#302](https://github.com/aws/smithy-go/issues/302) by changing time to UTC before formatting so no local offset time is lost. - -### Codegen -* Adds support for integrating client members via plugins ([#301](https://github.com/aws/smithy-go/pull/301)) -* Fix serialization of enum types marked with payload trait ([#296](https://github.com/aws/smithy-go/pull/296)) -* Update generation of API client modules to include a manifest of files generated ([#283](https://github.com/aws/smithy-go/pull/283)) -* Update Group Java group ID for smithy-go generator ([#298](https://github.com/aws/smithy-go/pull/298)) -* Support the delegation of determining the errors that can occur for an operation ([#304](https://github.com/aws/smithy-go/pull/304)) -* Support for marking and documenting deprecated client config fields. ([#303](https://github.com/aws/smithy-go/pull/303)) - -# Release v1.4.0 (2021-05-06) - -### Smithy Go module -* `encoding/xml`: Fix escaping of Next Line and Line Start in XML Encoder ([#267](https://github.com/aws/smithy-go/pull/267)) - -### Codegen -* Add support for Smithy 1.7 ([#289](https://github.com/aws/smithy-go/pull/289)) -* Add support for httpQueryParams location -* Add support for model renaming conflict resolution with service closure - -# Release v1.3.1 (2021-04-08) - -### Smithy Go module -* `transport/http`: Loosen endpoint hostname validation to allow specifying port numbers. ([#279](https://github.com/aws/smithy-go/pull/279)) -* `io`: Fix RingBuffer panics due to out of bounds index. ([#282](https://github.com/aws/smithy-go/pull/282)) - -# Release v1.3.0 (2021-04-01) - -### Smithy Go module -* `transport/http`: Add utility to safely join string to url path, and url raw query. - -### Codegen -* Update HttpBindingProtocolGenerator to use http/transport JoinPath and JoinQuery utility. - -# Release v1.2.0 (2021-03-12) - -### Smithy Go module -* Fix support for parsing shortened year format in HTTP Date header. -* Fix GitHub APIDiff action workflow to get gorelease tool correctly. -* Fix codegen artifact unit test for Go 1.16 - -### Codegen -* Fix generating paginator nil parameter handling before usage. -* Fix Serialize unboxed members decorated as required. -* Add ability to define resolvers at both client construction and operation invocation. -* Support for extending paginators with custom runtime trait diff --git a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md b/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md deleted file mode 100644 index 5b627cfa60b..00000000000 --- a/vendor/github.com/aws/smithy-go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md deleted file mode 100644 index c4b6a1c5081..00000000000 --- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md +++ /dev/null @@ -1,59 +0,0 @@ -# Contributing Guidelines - -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. - -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. - - -## Reporting Bugs/Feature Requests - -We welcome you to use the GitHub issue tracker to report bugs or suggest features. - -When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: - -* A reproducible test case or series of steps -* The version of our code being used -* Any modifications you've made relevant to the bug -* Anything unusual about your environment or deployment - - -## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: - -1. You are working against the latest source on the *main* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. - -To send us a pull request, please: - -1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. -3. Ensure local tests pass. -4. Commit to your fork using clear commit messages. -5. Send us a pull request, answering any default questions in the pull request interface. -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. - -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). - - -## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. - - -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. - - -## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. - - -## Licensing - -See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. diff --git a/vendor/github.com/aws/smithy-go/LICENSE b/vendor/github.com/aws/smithy-go/LICENSE deleted file mode 100644 index 67db8588217..00000000000 --- a/vendor/github.com/aws/smithy-go/LICENSE +++ /dev/null @@ -1,175 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile deleted file mode 100644 index 4b3c209373c..00000000000 --- a/vendor/github.com/aws/smithy-go/Makefile +++ /dev/null @@ -1,97 +0,0 @@ -PRE_RELEASE_VERSION ?= - -RELEASE_MANIFEST_FILE ?= -RELEASE_CHGLOG_DESC_FILE ?= - -REPOTOOLS_VERSION ?= latest -REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools -REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS ?= -REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION} -REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION} - -UNIT_TEST_TAGS= -BUILD_TAGS= - -ifneq ($(PRE_RELEASE_VERSION),) - REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION} -endif - -smithy-publish-local: - cd codegen && ./gradlew publishToMavenLocal - -smithy-build: - cd codegen && ./gradlew build - -smithy-clean: - cd codegen && ./gradlew clean - -################## -# Linting/Verify # -################## -.PHONY: verify vet - -verify: vet - -vet: - go vet ${BUILD_TAGS} --all ./... - -################ -# Unit Testing # -################ -.PHONY: unit unit-race unit-test unit-race-test - -unit: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race: verify - go vet ${BUILD_TAGS} --all ./... && \ - go test ${BUILD_TAGS} ${RUN_NONE} ./... && \ - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -unit-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} ./... - -unit-race-test: verify - go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./... - -##################### -# Release Process # -##################### -.PHONY: preview-release pre-release-validation release - -preview-release: - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - -pre-release-validation: - @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \ - echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \ - fi - @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \ - echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \ - fi - -release: pre-release-validation - go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS} - go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE} - go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE} - go run ${REPOTOOLS_CMD_CHANGELOG} rm -all - go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE} - -module-version: - @go run ${REPOTOOLS_CMD_MODULE_VERSION} . - -############## -# Repo Tools # -############## -.PHONY: install-changelog - -install-changelog: - go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION} diff --git a/vendor/github.com/aws/smithy-go/NOTICE b/vendor/github.com/aws/smithy-go/NOTICE deleted file mode 100644 index 616fc588945..00000000000 --- a/vendor/github.com/aws/smithy-go/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md deleted file mode 100644 index a4bb43fbe9b..00000000000 --- a/vendor/github.com/aws/smithy-go/README.md +++ /dev/null @@ -1,12 +0,0 @@ -## Smithy Go - -[![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) - -[Smithy](https://smithy.io/) code generators for Go. - -**WARNING: All interfaces are subject to change.** - -## License - -This project is licensed under the Apache-2.0 License. - diff --git a/vendor/github.com/aws/smithy-go/doc.go b/vendor/github.com/aws/smithy-go/doc.go deleted file mode 100644 index 87b0c74b75c..00000000000 --- a/vendor/github.com/aws/smithy-go/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package smithy provides the core components for a Smithy SDK. -package smithy diff --git a/vendor/github.com/aws/smithy-go/document.go b/vendor/github.com/aws/smithy-go/document.go deleted file mode 100644 index dec498c57bf..00000000000 --- a/vendor/github.com/aws/smithy-go/document.go +++ /dev/null @@ -1,10 +0,0 @@ -package smithy - -// Document provides access to loosely structured data in a document-like -// format. -// -// Deprecated: See the github.com/aws/smithy-go/document package. -type Document interface { - UnmarshalDocument(interface{}) error - GetValue() (interface{}, error) -} diff --git a/vendor/github.com/aws/smithy-go/document/doc.go b/vendor/github.com/aws/smithy-go/document/doc.go deleted file mode 100644 index 03055b7a1c2..00000000000 --- a/vendor/github.com/aws/smithy-go/document/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package document provides interface definitions and error types for document types. -// -// A document is a protocol-agnostic type which supports a JSON-like data-model. You can use this type to send -// UTF-8 strings, arbitrary precision numbers, booleans, nulls, a list of these values, and a map of UTF-8 -// strings to these values. -// -// API Clients expose document constructors in their respective client document packages which must be used to -// Marshal and Unmarshal Go types to and from their respective protocol representations. -// -// See the Marshaler and Unmarshaler type documentation for more details on how to Go types can be converted to and from -// document types. -package document diff --git a/vendor/github.com/aws/smithy-go/document/document.go b/vendor/github.com/aws/smithy-go/document/document.go deleted file mode 100644 index 8f852d95c69..00000000000 --- a/vendor/github.com/aws/smithy-go/document/document.go +++ /dev/null @@ -1,153 +0,0 @@ -package document - -import ( - "fmt" - "math/big" - "strconv" -) - -// Marshaler is an interface for a type that marshals a document to its protocol-specific byte representation and -// returns the resulting bytes. A non-nil error will be returned if an error is encountered during marshaling. -// -// Marshal supports basic scalars (int,uint,float,bool,string), big.Int, and big.Float, maps, slices, and structs. -// Anonymous nested types are flattened based on Go anonymous type visibility. -// -// When defining struct types. the `document` struct tag can be used to control how the value will be -// marshaled into the resulting protocol document. -// -// // Field is ignored -// Field int `document:"-"` -// -// // Field object of key "myName" -// Field int `document:"myName"` -// -// // Field object key of key "myName", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:"myName,omitempty"` -// -// // Field object key of "Field", and -// // Field is omitted if the field is a zero value for the type. -// Field int `document:",omitempty"` -// -// All struct fields, including anonymous fields, are marshaled unless the -// any of the following conditions are meet. -// -// - the field is not exported -// - document field tag is "-" -// - document field tag specifies "omitempty", and is a zero value. -// -// Pointer and interface values are encoded as the value pointed to or -// contained in the interface. A nil value encodes as a null -// value unless `omitempty` struct tag is provided. -// -// Channel, complex, and function values are not encoded and will be skipped -// when walking the value to be marshaled. -// -// time.Time is not supported and will cause the Marshaler to return an error. These values should be represented -// by your application as a string or numerical representation. -// -// Errors that occur when marshaling will stop the marshaler, and return the error. -// -// Marshal cannot represent cyclic data structures and will not handle them. -// Passing cyclic structures to Marshal will result in an infinite recursion. -type Marshaler interface { - MarshalSmithyDocument() ([]byte, error) -} - -// Unmarshaler is an interface for a type that unmarshals a document from its protocol-specific representation, and -// stores the result into the value pointed by v. If v is nil or not a pointer then InvalidUnmarshalError will be -// returned. -// -// Unmarshaler supports the same encodings produced by a document Marshaler. This includes support for the `document` -// struct field tag for controlling how struct fields are unmarshaled. -// -// Both generic interface{} and concrete types are valid unmarshal destination types. When unmarshaling a document -// into an empty interface the Unmarshaler will store one of these values: -// bool, for boolean values -// document.Number, for arbitrary-precision numbers (int64, float64, big.Int, big.Float) -// string, for string values -// []interface{}, for array values -// map[string]interface{}, for objects -// nil, for null values -// -// When unmarshaling, any error that occurs will halt the unmarshal and return the error. -type Unmarshaler interface { - UnmarshalSmithyDocument(v interface{}) error -} - -type noSerde interface { - noSmithyDocumentSerde() -} - -// NoSerde is a sentinel value to indicate that a given type should not be marshaled or unmarshaled -// into a protocol document. -type NoSerde struct{} - -func (n NoSerde) noSmithyDocumentSerde() {} - -var _ noSerde = (*NoSerde)(nil) - -// IsNoSerde returns whether the given type implements the no smithy document serde interface. -func IsNoSerde(x interface{}) bool { - _, ok := x.(noSerde) - return ok -} - -// Number is an arbitrary precision numerical value -type Number string - -// Int64 returns the number as a string. -func (n Number) String() string { - return string(n) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return n.intOfBitSize(64) -} - -func (n Number) intOfBitSize(bitSize int) (int64, error) { - return strconv.ParseInt(string(n), 10, bitSize) -} - -// Uint64 returns the number as a uint64. -func (n Number) Uint64() (uint64, error) { - return n.uintOfBitSize(64) -} - -func (n Number) uintOfBitSize(bitSize int) (uint64, error) { - return strconv.ParseUint(string(n), 10, bitSize) -} - -// Float32 returns the number parsed as a 32-bit float, returns a float64. -func (n Number) Float32() (float64, error) { - return n.floatOfBitSize(32) -} - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return n.floatOfBitSize(64) -} - -// Float64 returns the number as a float64. -func (n Number) floatOfBitSize(bitSize int) (float64, error) { - return strconv.ParseFloat(string(n), bitSize) -} - -// BigFloat attempts to convert the number to a big.Float, returns an error if the operation fails. -func (n Number) BigFloat() (*big.Float, error) { - f, ok := (&big.Float{}).SetString(string(n)) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} - -// BigInt attempts to convert the number to a big.Int, returns an error if the operation fails. -func (n Number) BigInt() (*big.Int, error) { - f, ok := (&big.Int{}).SetString(string(n), 10) - if !ok { - return nil, fmt.Errorf("failed to convert to big.Float") - } - return f, nil -} diff --git a/vendor/github.com/aws/smithy-go/document/errors.go b/vendor/github.com/aws/smithy-go/document/errors.go deleted file mode 100644 index 046a7a76531..00000000000 --- a/vendor/github.com/aws/smithy-go/document/errors.go +++ /dev/null @@ -1,75 +0,0 @@ -package document - -import ( - "fmt" - "reflect" -) - -// UnmarshalTypeError is an error type representing an error -// unmarshaling a Smithy document to a Go value type. This is different -// from UnmarshalError in that it does not wrap an underlying error type. -type UnmarshalTypeError struct { - Value string - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalTypeError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %s into Go value type %s", - e.Value, e.Type.String()) -} - -// An InvalidUnmarshalError is an error type representing an invalid type -// encountered while unmarshaling a Smithy document to a Go value type. -type InvalidUnmarshalError struct { - Type reflect.Type -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidUnmarshalError) Error() string { - var msg string - if e.Type == nil { - msg = "cannot unmarshal to nil value" - } else if e.Type.Kind() != reflect.Ptr { - msg = fmt.Sprintf("cannot unmarshal to non-pointer value, got %s", e.Type.String()) - } else { - msg = fmt.Sprintf("cannot unmarshal to nil value, %s", e.Type.String()) - } - - return fmt.Sprintf("unmarshal failed, %s", msg) -} - -// An UnmarshalError wraps an error that occurred while unmarshaling a -// Smithy document into a Go type. This is different from -// UnmarshalTypeError in that it wraps the underlying error that occurred. -type UnmarshalError struct { - Err error - Value string - Type reflect.Type -} - -// Unwrap returns the underlying unmarshaling error -func (e *UnmarshalError) Unwrap() error { - return e.Err -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *UnmarshalError) Error() string { - return fmt.Sprintf("unmarshal failed, cannot unmarshal %q into %s, %v", - e.Value, e.Type.String(), e.Err) -} - -// An InvalidMarshalError is an error type representing an error -// occurring when marshaling a Go value type. -type InvalidMarshalError struct { - Message string -} - -// Error returns the string representation of the error. -// Satisfying the error interface. -func (e *InvalidMarshalError) Error() string { - return fmt.Sprintf("marshal failed, %s", e.Message) -} diff --git a/vendor/github.com/aws/smithy-go/errors.go b/vendor/github.com/aws/smithy-go/errors.go deleted file mode 100644 index d6948d02062..00000000000 --- a/vendor/github.com/aws/smithy-go/errors.go +++ /dev/null @@ -1,137 +0,0 @@ -package smithy - -import "fmt" - -// APIError provides the generic API and protocol agnostic error type all SDK -// generated exception types will implement. -type APIError interface { - error - - // ErrorCode returns the error code for the API exception. - ErrorCode() string - // ErrorMessage returns the error message for the API exception. - ErrorMessage() string - // ErrorFault returns the fault for the API exception. - ErrorFault() ErrorFault -} - -// GenericAPIError provides a generic concrete API error type that SDKs can use -// to deserialize error responses into. Should be used for unmodeled or untyped -// errors. -type GenericAPIError struct { - Code string - Message string - Fault ErrorFault -} - -// ErrorCode returns the error code for the API exception. -func (e *GenericAPIError) ErrorCode() string { return e.Code } - -// ErrorMessage returns the error message for the API exception. -func (e *GenericAPIError) ErrorMessage() string { return e.Message } - -// ErrorFault returns the fault for the API exception. -func (e *GenericAPIError) ErrorFault() ErrorFault { return e.Fault } - -func (e *GenericAPIError) Error() string { - return fmt.Sprintf("api error %s: %s", e.Code, e.Message) -} - -var _ APIError = (*GenericAPIError)(nil) - -// OperationError decorates an underlying error which occurred while invoking -// an operation with names of the operation and API. -type OperationError struct { - ServiceID string - OperationName string - Err error -} - -// Service returns the name of the API service the error occurred with. -func (e *OperationError) Service() string { return e.ServiceID } - -// Operation returns the name of the API operation the error occurred with. -func (e *OperationError) Operation() string { return e.OperationName } - -// Unwrap returns the nested error if any, or nil. -func (e *OperationError) Unwrap() error { return e.Err } - -func (e *OperationError) Error() string { - return fmt.Sprintf("operation error %s: %s, %v", e.ServiceID, e.OperationName, e.Err) -} - -// DeserializationError provides a wrapper for an error that occurs during -// deserialization. -type DeserializationError struct { - Err error // original error - Snapshot []byte -} - -// Error returns a formatted error for DeserializationError -func (e *DeserializationError) Error() string { - const msg = "deserialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s, %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in DeserializationError -func (e *DeserializationError) Unwrap() error { return e.Err } - -// ErrorFault provides the type for a Smithy API error fault. -type ErrorFault int - -// ErrorFault enumeration values -const ( - FaultUnknown ErrorFault = iota - FaultServer - FaultClient -) - -func (f ErrorFault) String() string { - switch f { - case FaultServer: - return "server" - case FaultClient: - return "client" - default: - return "unknown" - } -} - -// SerializationError represents an error that occurred while attempting to serialize a request -type SerializationError struct { - Err error // original error -} - -// Error returns a formatted error for SerializationError -func (e *SerializationError) Error() string { - const msg = "serialization failed" - if e.Err == nil { - return msg - } - return fmt.Sprintf("%s: %v", msg, e.Err) -} - -// Unwrap returns the underlying Error in SerializationError -func (e *SerializationError) Unwrap() error { return e.Err } - -// CanceledError is the error that will be returned by an API request that was -// canceled. API operations given a Context may return this error when -// canceled. -type CanceledError struct { - Err error -} - -// CanceledError returns true to satisfy interfaces checking for canceled errors. -func (*CanceledError) CanceledError() bool { return true } - -// Unwrap returns the underlying error, if there was one. -func (e *CanceledError) Unwrap() error { - return e.Err -} - -func (e *CanceledError) Error() string { - return fmt.Sprintf("canceled, %v", e.Err) -} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go deleted file mode 100644 index 997c3092464..00000000000 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. - -package smithy - -// goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.2" diff --git a/vendor/github.com/aws/smithy-go/local-mod-replace.sh b/vendor/github.com/aws/smithy-go/local-mod-replace.sh deleted file mode 100644 index 800bf376954..00000000000 --- a/vendor/github.com/aws/smithy-go/local-mod-replace.sh +++ /dev/null @@ -1,39 +0,0 @@ -#1/usr/bin/env bash - -PROJECT_DIR="" -SMITHY_SOURCE_DIR=$(cd `dirname $0` && pwd) - -usage() { - echo "Usage: $0 [-s SMITHY_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2 - exit 1 -} - -while getopts "hs:d:" options; do - case "${options}" in - s) - SMITHY_SOURCE_DIR=${OPTARG} - if [ "$SMITHY_SOURCE_DIR" == "" ]; then - echo "path to smithy-go source directory is required" || exit - usage - fi - ;; - d) - PROJECT_DIR=${OPTARG} - ;; - h) - usage - ;; - *) - usage - ;; - esac -done - -if [ "$PROJECT_DIR" != "" ]; then - cd $PROJECT_DIR || exit -fi - -go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/smithy-go" | while read x; do - repPath=${x/github.com\/aws\/smithy-go/${SMITHY_SOURCE_DIR}} - echo -replace $x=$repPath -done | xargs go mod edit diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml deleted file mode 100644 index 20295cdd2aa..00000000000 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ /dev/null @@ -1,11 +0,0 @@ -[dependencies] - "github.com/google/go-cmp" = "v0.5.8" - "github.com/jmespath/go-jmespath" = "v0.4.0" - -[modules] - - [modules.codegen] - no_tag = true - - [modules."codegen/smithy-go-codegen/build/test-generated/go/internal/testmodule"] - no_tag = true diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go deleted file mode 100644 index 17d659c539e..00000000000 --- a/vendor/github.com/aws/smithy-go/properties.go +++ /dev/null @@ -1,52 +0,0 @@ -package smithy - -// PropertiesReader provides an interface for reading metadata from the -// underlying metadata container. -type PropertiesReader interface { - Get(key interface{}) interface{} -} - -// Properties provides storing and reading metadata values. Keys may be any -// comparable value type. Get and set will panic if key is not a comparable -// value type. -// -// Properties uses lazy initialization, and Set method must be called as an -// addressable value, or pointer. Not doing so may cause key/value pair to not -// be set. -type Properties struct { - values map[interface{}]interface{} -} - -// Get attempts to retrieve the value the key points to. Returns nil if the -// key was not found. -// -// Panics if key type is not comparable. -func (m *Properties) Get(key interface{}) interface{} { - return m.values[key] -} - -// Set stores the value pointed to by the key. If a value already exists at -// that key it will be replaced with the new value. -// -// Set method must be called as an addressable value, or pointer. If Set is not -// called as an addressable value or pointer, the key value pair being set may -// be lost. -// -// Panics if the key type is not comparable. -func (m *Properties) Set(key, value interface{}) { - if m.values == nil { - m.values = map[interface{}]interface{}{} - } - m.values[key] = value -} - -// Has returns whether the key exists in the metadata. -// -// Panics if the key type is not comparable. -func (m *Properties) Has(key interface{}) bool { - if m.values == nil { - return false - } - _, ok := m.values[key] - return ok -} diff --git a/vendor/github.com/aws/smithy-go/validation.go b/vendor/github.com/aws/smithy-go/validation.go deleted file mode 100644 index b5eedc1f90a..00000000000 --- a/vendor/github.com/aws/smithy-go/validation.go +++ /dev/null @@ -1,140 +0,0 @@ -package smithy - -import ( - "bytes" - "fmt" - "strings" -) - -// An InvalidParamsError provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type InvalidParamsError struct { - // Context is the base context of the invalid parameter group. - Context string - errs []InvalidParamError -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *InvalidParamsError) Add(err InvalidParamError) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another InvalidParamsError -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *InvalidParamsError) AddNested(nestedCtx string, nested InvalidParamsError) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e *InvalidParamsError) Len() int { - return len(e.errs) -} - -// Error returns the string formatted form of the invalid parameters. -func (e InvalidParamsError) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%d validation error(s) found.\n", len(e.errs)) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Error()) - } - - return w.String() -} - -// Errs returns a slice of the invalid parameters -func (e InvalidParamsError) Errs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An InvalidParamError represents an invalid parameter error type. -type InvalidParamError interface { - error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type invalidParamError struct { - context string - nestedContext string - field string - reason string -} - -// Error returns the string version of the invalid parameter error. -func (e invalidParamError) Error() string { - return fmt.Sprintf("%s, %s.", e.reason, e.Field()) -} - -// Field Returns the field and context the error occurred. -func (e invalidParamError) Field() string { - sb := &strings.Builder{} - sb.WriteString(e.context) - if sb.Len() > 0 { - if len(e.nestedContext) == 0 || (len(e.nestedContext) > 0 && e.nestedContext[:1] != "[") { - sb.WriteRune('.') - } - } - if len(e.nestedContext) > 0 { - sb.WriteString(e.nestedContext) - sb.WriteRune('.') - } - sb.WriteString(e.field) - return sb.String() -} - -// SetContext updates the base context of the error. -func (e *invalidParamError) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *invalidParamError) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - return - } - // Check if our nested context is an index into a slice or map - if e.nestedContext[:1] != "[" { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - return - } - e.nestedContext = ctx + e.nestedContext -} - -// An ParamRequiredError represents an required parameter error. -type ParamRequiredError struct { - invalidParamError -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ParamRequiredError { - return &ParamRequiredError{ - invalidParamError{ - field: field, - reason: fmt.Sprintf("missing required field"), - }, - } -} diff --git a/vendor/github.com/devtron-labs/common-lib/blob-storage/GCPBlob.go b/vendor/github.com/devtron-labs/common-lib/blob-storage/GCPBlob.go index 1a50e4bf161..dc64c2310a7 100644 --- a/vendor/github.com/devtron-labs/common-lib/blob-storage/GCPBlob.go +++ b/vendor/github.com/devtron-labs/common-lib/blob-storage/GCPBlob.go @@ -19,6 +19,7 @@ package blob_storage import ( "cloud.google.com/go/storage" "context" + "errors" "fmt" "google.golang.org/api/iterator" "google.golang.org/api/option" @@ -91,8 +92,10 @@ func (impl *GCPBlob) getLatestVersion(storageClient *storage.Client, request *Bl var latestTimestampInMillis int64 = 0 for { objectAttrs, err := objects.Next() - if err == iterator.Done { + if errors.Is(err, iterator.Done) { break + } else if err != nil { + return 0, err } objectName := objectAttrs.Name if objectName != fileName { diff --git a/vendor/github.com/devtron-labs/common-lib/git-manager/GitCliManager.go b/vendor/github.com/devtron-labs/common-lib/git-manager/GitCliManager.go index f49f8b19034..38b27f4d712 100644 --- a/vendor/github.com/devtron-labs/common-lib/git-manager/GitCliManager.go +++ b/vendor/github.com/devtron-labs/common-lib/git-manager/GitCliManager.go @@ -19,6 +19,7 @@ package git_manager import ( "fmt" "github.com/devtron-labs/common-lib/git-manager/util" + "github.com/sirupsen/logrus" "log" "os" "os/exec" @@ -286,14 +287,16 @@ func (impl *GitCliManagerImpl) Clone(gitContext GitContext, prj CiProjectDetails } _, msgMsg, cErr = impl.shallowClone(gitContext, checkoutPath, prj.GitRepository, checkoutBranch) if cErr != nil { - log.Fatal("could not clone repo ", " err: ", cErr, "msgMsg: ", msgMsg) + logrus.Error("could not clone repo ", "msgMsg: ", msgMsg, " err: ", cErr) + return "", msgMsg, cErr } projectName := util.GetProjectName(prj.GitRepository) projRootDir := filepath.Join(checkoutPath, projectName) _, msgMsg, cErr = impl.moveFilesFromSourceToDestination(projRootDir, checkoutPath) if cErr != nil { - log.Fatal("could not move files between files ", "err: ", cErr, "msgMsg: ", msgMsg) + logrus.Error("could not move files between files ", "msgMsg: ", msgMsg, "err: ", cErr) + return "", msgMsg, cErr } return response, msgMsg, cErr } diff --git a/vendor/github.com/devtron-labs/common-lib/git-manager/GitManager.go b/vendor/github.com/devtron-labs/common-lib/git-manager/GitManager.go index e41e9fd7166..38ead6bed48 100644 --- a/vendor/github.com/devtron-labs/common-lib/git-manager/GitManager.go +++ b/vendor/github.com/devtron-labs/common-lib/git-manager/GitManager.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/devtron-labs/common-lib/git-manager/util" "github.com/devtron-labs/common-lib/utils" + "github.com/sirupsen/logrus" "log" "os" "path/filepath" @@ -129,13 +130,15 @@ func (impl *GitManager) CloneAndCheckout(ciProjectDetails []CiProjectDetails, wo cErr = util.CreateSshPrivateKeyOnDisk(index, prj.GitOptions.SshPrivateKey) cErr = util.CreateSshPrivateKeyOnDisk(index, prj.GitOptions.SshPrivateKey) if cErr != nil { - log.Fatal("could not create ssh private key on disk ", " err ", cErr) + logrus.Error("could not create ssh private key on disk ", " err ", cErr) + return cErr } } _, msgMsg, cErr := impl.GitCliManager.Clone(gitContext, prj) if cErr != nil { - log.Fatal("could not clone repo ", " err ", cErr, "msgMsg", msgMsg) + logrus.Error("could not clone repo ", "msgMsg", msgMsg, " err ", cErr) + return cErr } // checkout code @@ -153,7 +156,8 @@ func (impl *GitManager) CloneAndCheckout(ciProjectDetails []CiProjectDetails, wo log.Println("checkout commit in branch fix : ", checkoutSource) msgMsg, cErr = impl.GitCliManager.GitCheckout(gitContext, prj.CheckoutPath, checkoutSource, authMode, prj.FetchSubmodules, prj.GitRepository) if cErr != nil { - log.Fatal("could not checkout hash ", " err ", cErr, "msgMsg", msgMsg) + logrus.Error("could not checkout hash ", "errMsg", msgMsg, "err ", cErr) + return cErr } } else if prj.SourceType == SOURCE_TYPE_WEBHOOK { @@ -163,7 +167,8 @@ func (impl *GitManager) CloneAndCheckout(ciProjectDetails []CiProjectDetails, wo targetCheckout := webhookDataData[WEBHOOK_SELECTOR_TARGET_CHECKOUT_NAME] if len(targetCheckout) == 0 { - log.Fatal("could not get target checkout from request data") + logrus.Error("could not get 'target checkout' from request data", "webhookData", webhookDataData) + return fmt.Errorf("could not get 'target checkout' from request data") } log.Println("checkout commit in webhook : ", targetCheckout) @@ -171,7 +176,7 @@ func (impl *GitManager) CloneAndCheckout(ciProjectDetails []CiProjectDetails, wo // checkout target hash msgMsg, cErr = impl.GitCliManager.GitCheckout(gitContext, prj.CheckoutPath, targetCheckout, authMode, prj.FetchSubmodules, prj.GitRepository) if cErr != nil { - log.Fatal("could not checkout ", "targetCheckout ", targetCheckout, " err ", cErr, " msgMsg", msgMsg) + logrus.Error("could not checkout ", "targetCheckout ", targetCheckout, " errMsg", msgMsg, " err ", cErr) return cErr } @@ -181,7 +186,8 @@ func (impl *GitManager) CloneAndCheckout(ciProjectDetails []CiProjectDetails, wo // throw error if source checkout is empty if len(sourceCheckout) == 0 { - log.Fatal("sourceCheckout is empty") + logrus.Error("'source checkout' is empty", "webhookData", webhookDataData) + return fmt.Errorf("'source checkout' is empty") } log.Println("merge commit in webhook : ", sourceCheckout) @@ -189,14 +195,11 @@ func (impl *GitManager) CloneAndCheckout(ciProjectDetails []CiProjectDetails, wo // merge source _, msgMsg, cErr = impl.GitCliManager.Merge(filepath.Join(gitContext.WorkingDir, prj.CheckoutPath), sourceCheckout) if cErr != nil { - log.Fatal("could not merge ", "sourceCheckout ", sourceCheckout, " err ", cErr, " msgMsg", msgMsg) + logrus.Error("could not merge ", "sourceCheckout ", sourceCheckout, " errMsg", msgMsg, " err ", cErr) return cErr } - } - } - } return nil } diff --git a/vendor/github.com/devtron-labs/common-lib/utils/CommonUtils.go b/vendor/github.com/devtron-labs/common-lib/utils/CommonUtils.go index 671e4502fd7..95c0f3b7470 100644 --- a/vendor/github.com/devtron-labs/common-lib/utils/CommonUtils.go +++ b/vendor/github.com/devtron-labs/common-lib/utils/CommonUtils.go @@ -18,13 +18,20 @@ package utils import ( "fmt" + "github.com/devtron-labs/common-lib/git-manager/util" + "github.com/devtron-labs/common-lib/utils/bean" + "log" "math/rand" + "path" + "regexp" "strings" "time" ) var chars = []rune("abcdefghijklmnopqrstuvwxyz0123456789") +const DOCKER_REGISTRY_TYPE_DOCKERHUB = "docker-hub" + // Generates random string func Generate(size int) string { rand.Seed(time.Now().UnixNano()) @@ -47,3 +54,31 @@ func GetUrlWithScheme(url string) (urlWithScheme string) { } return urlWithScheme } + +func IsValidDockerTagName(tagName string) bool { + regString := "^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$" + regexpCompile := regexp.MustCompile(regString) + if regexpCompile.MatchString(tagName) { + return true + } else { + return false + } +} + +func BuildDockerImagePath(dockerInfo bean.DockerRegistryInfo) (string, error) { + dest := "" + if DOCKER_REGISTRY_TYPE_DOCKERHUB == dockerInfo.DockerRegistryType { + dest = dockerInfo.DockerRepository + ":" + dockerInfo.DockerImageTag + } else { + registryUrl := dockerInfo.DockerRegistryURL + u, err := util.ParseUrl(registryUrl) + if err != nil { + log.Println("not a valid docker repository url") + return "", err + } + u.Path = path.Join(u.Path, "/", dockerInfo.DockerRepository) + dockerRegistryURL := u.Host + u.Path + dest = dockerRegistryURL + ":" + dockerInfo.DockerImageTag + } + return dest, nil +} diff --git a/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go b/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go index 1c018920e89..11a4015f40e 100644 --- a/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go +++ b/vendor/github.com/devtron-labs/common-lib/utils/bean/bean.go @@ -53,3 +53,11 @@ func (r *DockerAuthConfig) GetEncodedRegistryAuth() (string, error) { } return base64.StdEncoding.EncodeToString(encodedJSON), nil } + +type DockerRegistryInfo struct { + DockerImageTag string `json:"dockerImageTag"` + DockerRegistryId string `json:"dockerRegistryId"` + DockerRegistryType string `json:"dockerRegistryType"` + DockerRegistryURL string `json:"dockerRegistryURL"` + DockerRepository string `json:"dockerRepository"` +} diff --git a/vendor/github.com/devtron-labs/common-lib/utils/registry/bean.go b/vendor/github.com/devtron-labs/common-lib/utils/registry/bean.go new file mode 100644 index 00000000000..457453268fa --- /dev/null +++ b/vendor/github.com/devtron-labs/common-lib/utils/registry/bean.go @@ -0,0 +1,18 @@ +package registry + +type registry string + +func (r registry) String() string { + return string(r) +} + +const ( + DOCKER_REGISTRY_TYPE_ECR registry = "ecr" + DOCKER_REGISTRY_TYPE_ACR registry = "acr" + DOCKER_REGISTRY_TYPE_DOCKERHUB registry = "docker-hub" + DOCKER_REGISTRY_TYPE_OTHER registry = "other" + REGISTRY_TYPE_ARTIFACT_REGISTRY registry = "artifact-registry" + REGISTRY_TYPE_GCR registry = "gcr" +) + +const JSON_KEY_USERNAME = "_json_key" diff --git a/vendor/github.com/devtron-labs/common-lib/utils/registry/pluginArtifact.go b/vendor/github.com/devtron-labs/common-lib/utils/registry/pluginArtifact.go new file mode 100644 index 00000000000..4d8f6ae2cbe --- /dev/null +++ b/vendor/github.com/devtron-labs/common-lib/utils/registry/pluginArtifact.go @@ -0,0 +1,101 @@ +package registry + +import ( + "fmt" + "sort" + "time" +) + +type version string + +const ( + V1 version = "v1" + V2 version = "v2" +) + +type ImageDetailsFromCR struct { + Version version `json:"version"` + ImageDetails []*GenericImageDetail `json:"imageDetails"` +} + +func NewImageDetailsFromCR(version version) *ImageDetailsFromCR { + return &ImageDetailsFromCR{ + Version: version, + } +} + +func (i *ImageDetailsFromCR) AddImageDetails(imageDetails ...*GenericImageDetail) *ImageDetailsFromCR { + if i == nil { + return i + } + i.ImageDetails = append(i.ImageDetails, imageDetails...) + return i +} + +type GenericImageDetail struct { + Image string `json:"image"` + ImageDigest string `json:"imageDigest"` + LastUpdatedOn time.Time `json:"imagePushedAt"` +} + +func (g *GenericImageDetail) SetImage(image *string) *GenericImageDetail { + if image == nil { + return g + } + g.Image = *image + return g +} + +func (g *GenericImageDetail) SetImageDigest(imageDigest *string) *GenericImageDetail { + if imageDigest == nil { + return g + } + g.ImageDigest = *imageDigest + return g +} + +func (g *GenericImageDetail) SetLastUpdatedOn(imagePushedAt *time.Time) *GenericImageDetail { + if imagePushedAt == nil { + return g + } + g.LastUpdatedOn = *imagePushedAt + return g +} + +func (g *GenericImageDetail) GetGenericImageDetailIdentifier() string { + if g == nil { + return "" + } + return fmt.Sprintf("%s-%s", g.Image, g.ImageDigest) +} + +func NewGenericImageDetailFromPlugin() *GenericImageDetail { + return &GenericImageDetail{} +} + +type OrderBy string + +const ( + Ascending = "ASC" + Descending = "DSC" // default +) + +// SortGenericImageDetailByCreatedOn is used to sort the list of GenericImageDetail by GenericImageDetail.LastUpdatedOn +// - OrderBy - default value Descending +// - Original Slice is not manipulated, returns a new slice +func SortGenericImageDetailByCreatedOn(images []*GenericImageDetail, orderBy OrderBy) []*GenericImageDetail { + if len(images) == 0 { + return images + } + // don't modify the original slice + sortedImages := make([]*GenericImageDetail, len(images)) + copy(sortedImages, images) + // sort by createdOn in descending order + sort.Slice(sortedImages, func(i, j int) bool { + if orderBy == Ascending { + return sortedImages[i].LastUpdatedOn.Before(sortedImages[j].LastUpdatedOn) + } + return sortedImages[i].LastUpdatedOn.After(sortedImages[j].LastUpdatedOn) + }) + return sortedImages +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8d25722e995..7c6ee426b7c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -240,13 +240,6 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2/service/ecr v1.20.0 -## explicit; go 1.15 -github.com/aws/aws-sdk-go-v2/service/ecr/types -# github.com/aws/smithy-go v1.14.2 -## explicit; go 1.15 -github.com/aws/smithy-go -github.com/aws/smithy-go/document # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -351,7 +344,7 @@ github.com/devtron-labs/authenticator/jwt github.com/devtron-labs/authenticator/middleware github.com/devtron-labs/authenticator/oidc github.com/devtron-labs/authenticator/password -# github.com/devtron-labs/common-lib v0.16.1-0.20240904133334-7918e7c25b63 +# github.com/devtron-labs/common-lib v0.16.1-0.20240911071031-2625327bc7b4 ## explicit; go 1.21 github.com/devtron-labs/common-lib/async github.com/devtron-labs/common-lib/blob-storage @@ -373,6 +366,7 @@ github.com/devtron-labs/common-lib/utils/k8s github.com/devtron-labs/common-lib/utils/k8s/commonBean github.com/devtron-labs/common-lib/utils/k8s/health github.com/devtron-labs/common-lib/utils/k8sObjectsUtil +github.com/devtron-labs/common-lib/utils/registry github.com/devtron-labs/common-lib/utils/remoteConnection/bean github.com/devtron-labs/common-lib/utils/runTime github.com/devtron-labs/common-lib/utils/yaml diff --git a/wire_gen.go b/wire_gen.go index 76510b4eab7..9ad1f1575a8 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run github.com/google/wire/cmd/wire +//go:generate go run -mod=mod github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject @@ -119,6 +119,7 @@ import ( service3 "github.com/devtron-labs/devtron/pkg/appStore/values/service" appWorkflow2 "github.com/devtron-labs/devtron/pkg/appWorkflow" "github.com/devtron-labs/devtron/pkg/argoApplication" + "github.com/devtron-labs/devtron/pkg/argoApplication/read" "github.com/devtron-labs/devtron/pkg/argoRepositoryCreds" "github.com/devtron-labs/devtron/pkg/asyncProvider" "github.com/devtron-labs/devtron/pkg/attributes" @@ -157,7 +158,7 @@ import ( "github.com/devtron-labs/devtron/pkg/deploymentGroup" "github.com/devtron-labs/devtron/pkg/devtronResource" "github.com/devtron-labs/devtron/pkg/devtronResource/history/deployment/cdPipeline" - "github.com/devtron-labs/devtron/pkg/devtronResource/read" + read2 "github.com/devtron-labs/devtron/pkg/devtronResource/read" repository8 "github.com/devtron-labs/devtron/pkg/devtronResource/repository" "github.com/devtron-labs/devtron/pkg/dockerRegistry" "github.com/devtron-labs/devtron/pkg/eventProcessor" @@ -399,8 +400,8 @@ func InitializeApp() (*App, error) { if err != nil { return nil, err } - argoApplicationServiceImpl := argoApplication.NewArgoApplicationServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl) - k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(sugaredLogger, k8sServiceImpl, clusterServiceImplExtended, argoApplicationServiceImpl) + argoApplicationReadServiceImpl := read.NewArgoApplicationReadServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl) + k8sCommonServiceImpl := k8s2.NewK8sCommonServiceImpl(sugaredLogger, k8sServiceImpl, clusterServiceImplExtended, argoApplicationReadServiceImpl) environmentRestHandlerImpl := cluster3.NewEnvironmentRestHandlerImpl(environmentServiceImpl, sugaredLogger, userServiceImpl, validate, enforcerImpl, deleteServiceExtendedImpl, k8sServiceImpl, k8sCommonServiceImpl) environmentRouterImpl := cluster3.NewEnvironmentRouterImpl(environmentRestHandlerImpl) transactionUtilImpl := sql.NewTransactionUtilImpl(db) @@ -460,7 +461,7 @@ func InitializeApp() (*App, error) { appStatusServiceImpl := appStatus2.NewAppStatusServiceImpl(appStatusRepositoryImpl, sugaredLogger, enforcerImpl, enforcerUtilImpl) scopedVariableRepositoryImpl := repository7.NewScopedVariableRepository(db, sugaredLogger, transactionUtilImpl) devtronResourceSearchableKeyRepositoryImpl := repository8.NewDevtronResourceSearchableKeyRepositoryImpl(sugaredLogger, db) - devtronResourceSearchableKeyServiceImpl, err := read.NewDevtronResourceSearchableKeyServiceImpl(sugaredLogger, devtronResourceSearchableKeyRepositoryImpl) + devtronResourceSearchableKeyServiceImpl, err := read2.NewDevtronResourceSearchableKeyServiceImpl(sugaredLogger, devtronResourceSearchableKeyRepositoryImpl) if err != nil { return nil, err } @@ -575,7 +576,7 @@ func InitializeApp() (*App, error) { deploymentTemplateHistoryServiceImpl := history.NewDeploymentTemplateHistoryServiceImpl(sugaredLogger, deploymentTemplateHistoryRepositoryImpl, pipelineRepositoryImpl, chartRepositoryImpl, userServiceImpl, cdWorkflowRepositoryImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl) chartServiceImpl := chart.NewChartServiceImpl(chartRepositoryImpl, sugaredLogger, chartTemplateServiceImpl, chartRepoRepositoryImpl, appRepositoryImpl, utilMergeUtil, envConfigOverrideRepositoryImpl, pipelineConfigRepositoryImpl, environmentRepositoryImpl, deploymentTemplateHistoryServiceImpl, scopedVariableManagerImpl, deployedAppMetricsServiceImpl, chartRefServiceImpl, gitOpsConfigReadServiceImpl, deploymentConfigServiceImpl) ciCdPipelineOrchestratorImpl := pipeline.NewCiCdPipelineOrchestrator(appRepositoryImpl, sugaredLogger, materialRepositoryImpl, pipelineRepositoryImpl, ciPipelineRepositoryImpl, ciPipelineMaterialRepositoryImpl, cdWorkflowRepositoryImpl, clientImpl, ciCdConfig, appWorkflowRepositoryImpl, environmentRepositoryImpl, attributesServiceImpl, appCrudOperationServiceImpl, userAuthServiceImpl, prePostCdScriptHistoryServiceImpl, pipelineStageServiceImpl, gitMaterialHistoryServiceImpl, ciPipelineHistoryServiceImpl, ciTemplateServiceImpl, dockerArtifactStoreRepositoryImpl, ciArtifactRepositoryImpl, configMapServiceImpl, customTagServiceImpl, genericNoteServiceImpl, chartServiceImpl, transactionUtilImpl, gitOpsConfigReadServiceImpl, deploymentConfigServiceImpl) - ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, customTagServiceImpl, pluginInputVariableParserImpl, globalPluginServiceImpl, infraProviderImpl, ciCdPipelineOrchestratorImpl) + ciServiceImpl := pipeline.NewCiServiceImpl(sugaredLogger, workflowServiceImpl, ciPipelineMaterialRepositoryImpl, ciWorkflowRepositoryImpl, eventRESTClientImpl, eventSimpleFactoryImpl, ciPipelineRepositoryImpl, ciArtifactRepositoryImpl, pipelineStageServiceImpl, userServiceImpl, ciTemplateServiceImpl, appCrudOperationServiceImpl, environmentRepositoryImpl, appRepositoryImpl, scopedVariableManagerImpl, customTagServiceImpl, pluginInputVariableParserImpl, globalPluginServiceImpl, infraProviderImpl, ciCdPipelineOrchestratorImpl) ciLogServiceImpl, err := pipeline.NewCiLogServiceImpl(sugaredLogger, ciServiceImpl, k8sServiceImpl) if err != nil { return nil, err @@ -713,9 +714,9 @@ func InitializeApp() (*App, error) { k8sResourceHistoryServiceImpl := kubernetesResourceAuditLogs.Newk8sResourceHistoryServiceImpl(k8sResourceHistoryRepositoryImpl, sugaredLogger, appRepositoryImpl, environmentRepositoryImpl) ephemeralContainersRepositoryImpl := repository.NewEphemeralContainersRepositoryImpl(db, transactionUtilImpl) ephemeralContainerServiceImpl := cluster2.NewEphemeralContainerServiceImpl(ephemeralContainersRepositoryImpl, sugaredLogger) - terminalSessionHandlerImpl := terminal.NewTerminalSessionHandlerImpl(environmentServiceImpl, clusterServiceImplExtended, sugaredLogger, k8sServiceImpl, ephemeralContainerServiceImpl, argoApplicationServiceImpl) + terminalSessionHandlerImpl := terminal.NewTerminalSessionHandlerImpl(environmentServiceImpl, clusterServiceImplExtended, sugaredLogger, k8sServiceImpl, ephemeralContainerServiceImpl, argoApplicationReadServiceImpl) fluxApplicationServiceImpl := fluxApplication.NewFluxApplicationServiceImpl(sugaredLogger, helmAppServiceImpl, clusterServiceImplExtended, helmAppClientImpl, pumpImpl) - k8sApplicationServiceImpl, err := application2.NewK8sApplicationServiceImpl(sugaredLogger, clusterServiceImplExtended, pumpImpl, helmAppServiceImpl, k8sServiceImpl, acdAuthConfig, k8sResourceHistoryServiceImpl, k8sCommonServiceImpl, terminalSessionHandlerImpl, ephemeralContainerServiceImpl, ephemeralContainersRepositoryImpl, argoApplicationServiceImpl, fluxApplicationServiceImpl) + k8sApplicationServiceImpl, err := application2.NewK8sApplicationServiceImpl(sugaredLogger, clusterServiceImplExtended, pumpImpl, helmAppServiceImpl, k8sServiceImpl, acdAuthConfig, k8sResourceHistoryServiceImpl, k8sCommonServiceImpl, terminalSessionHandlerImpl, ephemeralContainerServiceImpl, ephemeralContainersRepositoryImpl, fluxApplicationServiceImpl) if err != nil { return nil, err } @@ -868,9 +869,10 @@ func InitializeApp() (*App, error) { appRouterImpl := app3.NewAppRouterImpl(appFilteringRouterImpl, appListingRouterImpl, appInfoRouterImpl, pipelineTriggerRouterImpl, pipelineConfigRouterImpl, pipelineHistoryRouterImpl, pipelineStatusRouterImpl, appWorkflowRouterImpl, devtronAppAutoCompleteRouterImpl, appWorkflowRestHandlerImpl, appListingRestHandlerImpl, appFilteringRestHandlerImpl) coreAppRestHandlerImpl := restHandler.NewCoreAppRestHandlerImpl(sugaredLogger, userServiceImpl, validate, enforcerUtilImpl, enforcerImpl, appCrudOperationServiceImpl, pipelineBuilderImpl, gitRegistryConfigImpl, chartServiceImpl, configMapServiceImpl, appListingServiceImpl, propertiesConfigServiceImpl, appWorkflowServiceImpl, materialRepositoryImpl, gitProviderRepositoryImpl, appWorkflowRepositoryImpl, environmentRepositoryImpl, configMapRepositoryImpl, chartRepositoryImpl, teamServiceImpl, argoUserServiceImpl, pipelineStageServiceImpl, ciPipelineRepositoryImpl) coreAppRouterImpl := router.NewCoreAppRouterImpl(coreAppRestHandlerImpl) + argoApplicationServiceImpl := argoApplication.NewArgoApplicationServiceImpl(sugaredLogger, clusterRepositoryImpl, k8sServiceImpl, argoUserServiceImpl, helmAppClientImpl, helmAppServiceImpl, k8sApplicationServiceImpl, argoApplicationReadServiceImpl) helmAppRestHandlerImpl := client3.NewHelmAppRestHandlerImpl(sugaredLogger, helmAppServiceImpl, enforcerImpl, clusterServiceImplExtended, enforcerUtilHelmImpl, appStoreDeploymentServiceImpl, installedAppDBServiceImpl, userServiceImpl, attributesServiceImpl, serverEnvConfigServerEnvConfig, fluxApplicationServiceImpl, argoApplicationServiceImpl) helmAppRouterImpl := client3.NewHelmAppRouterImpl(helmAppRestHandlerImpl) - k8sApplicationRestHandlerImpl := application3.NewK8sApplicationRestHandlerImpl(sugaredLogger, k8sApplicationServiceImpl, pumpImpl, terminalSessionHandlerImpl, enforcerImpl, enforcerUtilHelmImpl, enforcerUtilImpl, helmAppServiceImpl, userServiceImpl, k8sCommonServiceImpl, validate, environmentVariables, fluxApplicationServiceImpl, argoApplicationServiceImpl) + k8sApplicationRestHandlerImpl := application3.NewK8sApplicationRestHandlerImpl(sugaredLogger, k8sApplicationServiceImpl, pumpImpl, terminalSessionHandlerImpl, enforcerImpl, enforcerUtilHelmImpl, enforcerUtilImpl, helmAppServiceImpl, userServiceImpl, k8sCommonServiceImpl, validate, environmentVariables, fluxApplicationServiceImpl, argoApplicationReadServiceImpl) k8sApplicationRouterImpl := application3.NewK8sApplicationRouterImpl(k8sApplicationRestHandlerImpl) pProfRestHandlerImpl := restHandler.NewPProfRestHandler(userServiceImpl, enforcerImpl) pProfRouterImpl := router.NewPProfRouter(sugaredLogger, pProfRestHandlerImpl) @@ -955,7 +957,7 @@ func InitializeApp() (*App, error) { deploymentConfigurationRouterImpl := router.NewDeploymentConfigurationRouter(deploymentConfigurationRestHandlerImpl) infraConfigRestHandlerImpl := infraConfig2.NewInfraConfigRestHandlerImpl(sugaredLogger, infraConfigServiceImpl, userServiceImpl, enforcerImpl, enforcerUtilImpl, validate) infraConfigRouterImpl := infraConfig2.NewInfraProfileRouterImpl(infraConfigRestHandlerImpl) - argoApplicationRestHandlerImpl := argoApplication2.NewArgoApplicationRestHandlerImpl(argoApplicationServiceImpl, sugaredLogger, enforcerImpl) + argoApplicationRestHandlerImpl := argoApplication2.NewArgoApplicationRestHandlerImpl(argoApplicationServiceImpl, argoApplicationReadServiceImpl, sugaredLogger, enforcerImpl) argoApplicationRouterImpl := argoApplication2.NewArgoApplicationRouterImpl(argoApplicationRestHandlerImpl) deploymentHistoryServiceImpl := cdPipeline.NewDeploymentHistoryServiceImpl(sugaredLogger, cdHandlerImpl, imageTaggingServiceImpl, pipelineRepositoryImpl, deployedConfigurationHistoryServiceImpl) apiReqDecoderServiceImpl := devtronResource.NewAPIReqDecoderServiceImpl(sugaredLogger, pipelineRepositoryImpl) From 78f45effb4eb7c7d90ccdbe7a591da1332094424 Mon Sep 17 00:00:00 2001 From: prakhar katiyar <39842461+prkhrkat@users.noreply.github.com> Date: Fri, 13 Sep 2024 13:50:58 +0530 Subject: [PATCH 46/61] feat: Custom tag for copy container image plugin (#5760) (#5841) * wip: changes for v2 * migration * sql script renaming * adding back registryDestinationImageMap in request * migration script update * adding isExposed check * modification for multiple plugin in same stage * wip: fixing query * custom tag deactivate fixes * returning err for deactivateUnusedPaths function * filepath fix * changes needed for updated event payload * down migration * updating migration * updating migration Co-authored-by: iamayushm <32041961+iamayushm@users.noreply.github.com> --- .../sql/repository/CiArtifactRepository.go | 3 + .../sql/repository/CustomTagRepository.go | 14 ++ .../devtronApps/PostStageTriggerService.go | 2 +- .../devtronApps/PreStageTriggerService.go | 195 ++++++++++-------- .../bean/pluginArtifactsBean.go | 61 ++++++ pkg/eventProcessor/bean/workflowEventBean.go | 2 + .../in/WorkflowEventProcessorService.go | 17 +- pkg/pipeline/CiService.go | 87 +++++--- pkg/pipeline/CustomTagService.go | 5 + pkg/pipeline/PipelineStageService.go | 2 +- pkg/pipeline/pipelineStageVariableParser.go | 6 +- pkg/plugin/GlobalPluginService.go | 17 +- pkg/workflow/dag/WorkflowDagExecutor.go | 67 +++++- .../sql/286_copy_container_image_v2.down.sql | 1 + .../sql/286_copy_container_image_v2.up.sql | 25 +++ util/mapUtil.go | 13 ++ 16 files changed, 385 insertions(+), 132 deletions(-) create mode 100644 pkg/eventProcessor/bean/pluginArtifactsBean.go create mode 100644 scripts/sql/286_copy_container_image_v2.down.sql create mode 100644 scripts/sql/286_copy_container_image_v2.up.sql create mode 100644 util/mapUtil.go diff --git a/internal/sql/repository/CiArtifactRepository.go b/internal/sql/repository/CiArtifactRepository.go index 9cf6a18d824..d6297f07819 100644 --- a/internal/sql/repository/CiArtifactRepository.go +++ b/internal/sql/repository/CiArtifactRepository.go @@ -836,6 +836,9 @@ func (impl CiArtifactRepositoryImpl) GetArtifactsByDataSourceAndComponentId(data func (impl CiArtifactRepositoryImpl) FindCiArtifactByImagePaths(images []string) ([]CiArtifact, error) { var ciArtifacts []CiArtifact + if len(images) == 0 { + return nil, nil + } err := impl.dbConnection. Model(&ciArtifacts). Where(" image in (?) ", pg.In(images)). diff --git a/internal/sql/repository/CustomTagRepository.go b/internal/sql/repository/CustomTagRepository.go index 4b33725afc6..0eb41620f81 100644 --- a/internal/sql/repository/CustomTagRepository.go +++ b/internal/sql/repository/CustomTagRepository.go @@ -56,6 +56,7 @@ type ImageTagRepository interface { DeactivateImagePathReservationByImagePaths(tx *pg.Tx, imagePaths []string) error DeactivateImagePathReservationByImagePathReservationIds(tx *pg.Tx, imagePathReservationIds []int) error DisableCustomTag(entityKey int, entityValue string) error + GetImagePathsByIds(ids []int) ([]*ImagePathReservation, error) } type ImageTagRepositoryImpl struct { @@ -139,6 +140,9 @@ func (impl *ImageTagRepositoryImpl) InsertImagePath(tx *pg.Tx, reservation *Imag } func (impl *ImageTagRepositoryImpl) DeactivateImagePathReservationByImagePaths(tx *pg.Tx, imagePaths []string) error { + if len(imagePaths) == 0 { + return nil + } query := `UPDATE image_path_reservation set active=false where image_path in (?)` _, err := tx.Exec(query, pg.In(imagePaths)) if err != nil && err != pg.ErrNoRows { @@ -161,3 +165,13 @@ func (impl *ImageTagRepositoryImpl) DisableCustomTag(entityKey int, entityValue _, err := impl.dbConnection.Exec(query, entityKey, entityValue) return err } +func (impl *ImageTagRepositoryImpl) GetImagePathsByIds(ids []int) ([]*ImagePathReservation, error) { + var imagePaths []*ImagePathReservation + if len(ids) == 0 { + return imagePaths, nil + } + err := impl.dbConnection.Model(&imagePaths). + Where("id in (?) ", pg.In(ids)). + Where("active = ?", true).Select() + return imagePaths, err +} diff --git a/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go b/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go index 413e51b5fc1..26a20f28434 100644 --- a/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go +++ b/pkg/deployment/trigger/devtronApps/PostStageTriggerService.go @@ -92,7 +92,7 @@ func (impl *TriggerServiceImpl) TriggerPostStage(request bean.TriggerRequest) er cdStageWorkflowRequest.Type = bean3.CD_WORKFLOW_PIPELINE_TYPE // handling plugin specific logic - pluginImagePathReservationIds, err := impl.SetCopyContainerImagePluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.POST, cdWf.CiArtifact) + pluginImagePathReservationIds, err := impl.setCopyContainerImagePluginDataAndReserveImages(cdStageWorkflowRequest, pipeline.Id, types.POST, cdWf.CiArtifact) if err != nil { runner.Status = pipelineConfig.WorkflowFailed runner.Message = err.Error() diff --git a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go index 37596e74791..244c5b0c47c 100644 --- a/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go +++ b/pkg/deployment/trigger/devtronApps/PreStageTriggerService.go @@ -39,6 +39,7 @@ import ( repository3 "github.com/devtron-labs/devtron/pkg/pipeline/history/repository" "github.com/devtron-labs/devtron/pkg/pipeline/types" "github.com/devtron-labs/devtron/pkg/plugin" + bean3 "github.com/devtron-labs/devtron/pkg/plugin/bean" "github.com/devtron-labs/devtron/pkg/resourceQualifiers" "github.com/devtron-labs/devtron/pkg/sql" util3 "github.com/devtron-labs/devtron/pkg/util" @@ -110,7 +111,7 @@ func (impl *TriggerServiceImpl) TriggerPreStage(request bean.TriggerRequest) err } cdStageWorkflowRequest.StageType = types.PRE // handling copyContainerImage plugin specific logic - imagePathReservationIds, err := impl.SetCopyContainerImagePluginDataInWorkflowRequest(cdStageWorkflowRequest, pipeline.Id, types.PRE, artifact) + imagePathReservationIds, err := impl.setCopyContainerImagePluginDataAndReserveImages(cdStageWorkflowRequest, pipeline.Id, types.PRE, artifact) if err != nil { runner.Status = pipelineConfig.WorkflowFailed runner.Message = err.Error() @@ -236,95 +237,121 @@ func (impl *TriggerServiceImpl) checkVulnerabilityStatusAndFailWfIfNeeded(ctx co return nil } -func (impl *TriggerServiceImpl) SetCopyContainerImagePluginDataInWorkflowRequest(cdStageWorkflowRequest *types.WorkflowRequest, pipelineId int, pipelineStage string, artifact *repository.CiArtifact) ([]int, error) { - copyContainerImagePluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(pipeline.COPY_CONTAINER_IMAGE) - var imagePathReservationIds []int +// setCopyContainerImagePluginDataAndReserveImages sets required fields in cdStageWorkflowRequest and reserve images generated by plugin +func (impl *TriggerServiceImpl) setCopyContainerImagePluginDataAndReserveImages(cdStageWorkflowRequest *types.WorkflowRequest, pipelineId int, pipelineStage string, artifact *repository.CiArtifact) ([]int, error) { + + copyContainerImagePluginDetail, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(pipeline.COPY_CONTAINER_IMAGE) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting copyContainerImage plugin id", "err", err) - return imagePathReservationIds, err + return nil, err } - for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { - if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { - var pipelineStageEntityType int - if pipelineStage == types.PRE { - pipelineStageEntityType = pipelineConfigBean.EntityTypePreCD - } else { - pipelineStageEntityType = pipelineConfigBean.EntityTypePostCD - } - customTagId := -1 - var DockerImageTag string - customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pipelineStageEntityType, strconv.Itoa(pipelineId)) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching custom tag data", "err", err) - return imagePathReservationIds, err - } + pluginIdToVersionMap := make(map[int]string) + for _, p := range copyContainerImagePluginDetail { + pluginIdToVersionMap[p.Id] = p.Version + } - if !customTag.Enabled { - // case when custom tag is not configured - source image tag will be taken as docker image tag - pluginTriggerImageSplit := strings.Split(artifact.Image, ":") - DockerImageTag = pluginTriggerImageSplit[len(pluginTriggerImageSplit)-1] - } else { - // for copyContainerImage plugin parse destination images and save its data in image path reservation table - customTagDbObject, customDockerImageTag, err := impl.customTagService.GetCustomTag(pipelineStageEntityType, strconv.Itoa(pipelineId)) - if err != nil && err != pg.ErrNoRows { - impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) - return imagePathReservationIds, err - } - if customTagDbObject != nil && customTagDbObject.Id > 0 { - customTagId = customTagDbObject.Id - } - DockerImageTag = customDockerImageTag - } + dockerImageTag, customTagId, err := impl.getDockerTagAndCustomTagIdForPlugin(pipelineStage, pipelineId, artifact) + if err != nil { + impl.logger.Errorw("error in getting docker tag", "err", err) + return nil, err + } - var sourceDockerRegistryId string - if artifact.DataSource == repository.PRE_CD || artifact.DataSource == repository.POST_CD || artifact.DataSource == repository.POST_CI { - if artifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { - sourceDockerRegistryId = artifact.CredentialSourceValue - } - } else { - sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId - } - registryDestinationImageMap, registryCredentialMap, err := impl.pluginInputVariableParser.HandleCopyContainerImagePluginInputVariables(step.InputVars, DockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) + var sourceDockerRegistryId string + if artifact.DataSource == repository.PRE_CD || artifact.DataSource == repository.POST_CD || artifact.DataSource == repository.POST_CI { + if artifact.CredentialsSourceType == repository.GLOBAL_CONTAINER_REGISTRY { + sourceDockerRegistryId = artifact.CredentialSourceValue + } + } else { + sourceDockerRegistryId = cdStageWorkflowRequest.DockerRegistryId + } + + registryCredentialMap := make(map[string]bean3.RegistryCredentials) + var allDestinationImages []string //saving all images to be reserved in this array + + for _, step := range cdStageWorkflowRequest.PrePostDeploySteps { + if version, ok := pluginIdToVersionMap[step.RefPluginId]; ok { + registryDestinationImageMap, credentialMap, err := impl.pluginInputVariableParser.HandleCopyContainerImagePluginInputVariables(step.InputVars, dockerImageTag, cdStageWorkflowRequest.CiArtifactDTO.Image, sourceDockerRegistryId) if err != nil { impl.logger.Errorw("error in parsing copyContainerImage input variable", "err", err) - return imagePathReservationIds, err - } - var destinationImages []string - for _, images := range registryDestinationImageMap { - for _, image := range images { - destinationImages = append(destinationImages, image) - } - } - // fetch already saved artifacts to check if they are already present - savedCIArtifacts, err := impl.ciArtifactRepository.FindCiArtifactByImagePaths(destinationImages) - if err != nil { - impl.logger.Errorw("error in fetching artifacts by image path", "err", err) - return imagePathReservationIds, err + return nil, err } - if len(savedCIArtifacts) > 0 { - // if already present in ci artifact, return "image path already in use error" - return imagePathReservationIds, pipelineConfigBean.ErrImagePathInUse + if version == pipeline.COPY_CONTAINER_IMAGE_VERSION_V1 { + // this is needed in ci runner only for v1 + cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap } - imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) - if err != nil { - impl.logger.Errorw("error in reserving image", "err", err) - return imagePathReservationIds, err + for _, images := range registryDestinationImageMap { + allDestinationImages = append(allDestinationImages, images...) } - cdStageWorkflowRequest.RegistryDestinationImageMap = registryDestinationImageMap - cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap - var pluginArtifactStage string - if pipelineStage == types.PRE { - pluginArtifactStage = repository.PRE_CD - } else { - pluginArtifactStage = repository.POST_CD + for k, v := range credentialMap { + registryCredentialMap[k] = v } - cdStageWorkflowRequest.PluginArtifactStage = pluginArtifactStage } } + + // set data in cdStageWorkflowRequest needed for copy container image plugin + + cdStageWorkflowRequest.RegistryCredentialMap = registryCredentialMap + cdStageWorkflowRequest.DockerImageTag = dockerImageTag + if pipelineStage == types.PRE { + cdStageWorkflowRequest.PluginArtifactStage = repository.PRE_CD + } else { + cdStageWorkflowRequest.PluginArtifactStage = repository.POST_CD + } + + // fetch already saved artifacts to check if they are already present + + savedCIArtifacts, err := impl.ciArtifactRepository.FindCiArtifactByImagePaths(allDestinationImages) + if err != nil { + impl.logger.Errorw("error in fetching artifacts by image path", "err", err) + return nil, err + } + if len(savedCIArtifacts) > 0 { + // if already present in ci artifact, return "image path already in use error" + return nil, pipelineConfigBean.ErrImagePathInUse + } + // reserve all images where data will be + imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, allDestinationImages) + if err != nil { + impl.logger.Errorw("error in reserving image", "err", err) + return imagePathReservationIds, err + } return imagePathReservationIds, nil } +func (impl *TriggerServiceImpl) getDockerTagAndCustomTagIdForPlugin(pipelineStage string, pipelineId int, artifact *repository.CiArtifact) (string, int, error) { + var pipelineStageEntityType int + if pipelineStage == types.PRE { + pipelineStageEntityType = pipelineConfigBean.EntityTypePreCD + } else { + pipelineStageEntityType = pipelineConfigBean.EntityTypePostCD + } + customTag, err := impl.customTagService.GetActiveCustomTagByEntityKeyAndValue(pipelineStageEntityType, strconv.Itoa(pipelineId)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom tag data", "err", err) + return "", 0, err + } + var DockerImageTag string + customTagId := -1 // if customTag is not configured id=-1 will be saved in image_path_reservation table for image reservation + if !customTag.Enabled { + // case when custom tag is not configured - source image tag will be taken as docker image tag + pluginTriggerImageSplit := strings.Split(artifact.Image, ":") + DockerImageTag = pluginTriggerImageSplit[len(pluginTriggerImageSplit)-1] + } else { + // for copyContainerImage plugin parse destination images and save its data in image path reservation table + customTagDbObject, customDockerImageTag, err := impl.customTagService.GetCustomTag(pipelineStageEntityType, strconv.Itoa(pipelineId)) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in fetching custom tag by entity key and value for CD", "err", err) + return "", 0, err + } + if customTagDbObject != nil && customTagDbObject.Id > 0 { + customTagId = customTagDbObject.Id + } + DockerImageTag = customDockerImageTag + } + return DockerImageTag, customTagId, nil +} + func (impl *TriggerServiceImpl) buildWFRequest(runner *pipelineConfig.CdWorkflowRunner, cdWf *pipelineConfig.CdWorkflow, cdPipeline *pipelineConfig.Pipeline, envDeploymentConfig *bean5.DeploymentConfig, triggeredBy int32) (*types.WorkflowRequest, error) { if cdPipeline.App.Id == 0 { appModel, err := impl.appRepository.FindById(cdPipeline.AppId) @@ -843,20 +870,20 @@ func (impl *TriggerServiceImpl) getSourceCiPipelineForArtifact(ciPipeline pipeli return sourceCiPipeline, nil } -func (impl *TriggerServiceImpl) ReserveImagesGeneratedAtPlugin(customTagId int, registryImageMap map[string][]string) ([]int, error) { +func (impl *TriggerServiceImpl) ReserveImagesGeneratedAtPlugin(customTagId int, destinationImages []string) ([]int, error) { var imagePathReservationIds []int - for _, images := range registryImageMap { - for _, image := range images { - imagePathReservationData, err := impl.customTagService.ReserveImagePath(image, customTagId) - if err != nil { - impl.logger.Errorw("Error in marking custom tag reserved", "err", err) - return imagePathReservationIds, err - } - if imagePathReservationData != nil { - imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) - } + + for _, image := range destinationImages { + imagePathReservationData, err := impl.customTagService.ReserveImagePath(image, customTagId) + if err != nil { + impl.logger.Errorw("Error in marking custom tag reserved", "err", err) + return imagePathReservationIds, err + } + if imagePathReservationData != nil { + imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) } } + return imagePathReservationIds, nil } diff --git a/pkg/eventProcessor/bean/pluginArtifactsBean.go b/pkg/eventProcessor/bean/pluginArtifactsBean.go new file mode 100644 index 00000000000..4c60cea04c5 --- /dev/null +++ b/pkg/eventProcessor/bean/pluginArtifactsBean.go @@ -0,0 +1,61 @@ +package bean + +import ( + "slices" + "time" +) + +type Kind string +type CredentialSourceType string +type ArtifactType string + +const ( + PluginArtifactsKind Kind = "PluginArtifacts" + GlobalContainerRegistrySource CredentialSourceType = "global_container_registry" + ArtifactTypeContainer ArtifactType = "CONTAINER" +) + +type PluginArtifacts struct { + Kind Kind `json:"Kind"` + Artifacts []Artifact `json:"Artifacts"` +} + +func NewPluginArtifact() *PluginArtifacts { + return &PluginArtifacts{ + Kind: PluginArtifactsKind, + Artifacts: make([]Artifact, 0), + } +} + +func (p *PluginArtifacts) MergePluginArtifact(pluginArtifact *PluginArtifacts) { + if pluginArtifact == nil { + return + } + p.Artifacts = append(p.Artifacts, pluginArtifact.Artifacts...) +} + +func (p *PluginArtifacts) GetRegistryToUniqueContainerArtifactDataMapping() map[string][]string { + registryToImageMapping := make(map[string][]string) + for _, artifact := range p.Artifacts { + if artifact.Type == ArtifactTypeContainer { + if artifact.CredentialsSourceType == GlobalContainerRegistrySource { + if _, ok := registryToImageMapping[artifact.CredentialSourceValue]; !ok { + registryToImageMapping[artifact.CredentialSourceValue] = make([]string, 0) + } + registryToImageMapping[artifact.CredentialSourceValue] = append(registryToImageMapping[artifact.CredentialSourceValue], artifact.Data...) + slices.Sort(registryToImageMapping[artifact.CredentialSourceValue]) + slices.Compact(registryToImageMapping[artifact.CredentialSourceValue]) + } + } + } + return registryToImageMapping +} + +type Artifact struct { + Type ArtifactType `json:"Type"` + Data []string `json:"Data"` + CredentialsSourceType CredentialSourceType `json:"CredentialsSourceType"` + CredentialSourceValue string `json:"CredentialSourceValue"` + CreatedByPluginIdentifier string `json:"createdByPluginIdentifier"` + CreatedOn time.Time `json:"createdOn"` +} diff --git a/pkg/eventProcessor/bean/workflowEventBean.go b/pkg/eventProcessor/bean/workflowEventBean.go index 755defdd3ee..37112b5fa22 100644 --- a/pkg/eventProcessor/bean/workflowEventBean.go +++ b/pkg/eventProcessor/bean/workflowEventBean.go @@ -38,6 +38,7 @@ type CdStageCompleteEvent struct { PipelineName string `json:"pipelineName"` CiArtifactDTO pipelineConfig.CiArtifactDTO `json:"ciArtifactDTO"` PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` + PluginArtifacts *PluginArtifacts `json:"pluginArtifacts"` } type UserDeploymentRequest struct { @@ -81,6 +82,7 @@ type CiCompleteEvent struct { PluginRegistryArtifactDetails map[string][]string `json:"PluginRegistryArtifactDetails"` PluginArtifactStage string `json:"pluginArtifactStage"` pluginImageDetails *registry.ImageDetailsFromCR + PluginArtifacts *PluginArtifacts `json:"pluginArtifacts"` } func (c *CiCompleteEvent) GetPluginImageDetails() *registry.ImageDetailsFromCR { diff --git a/pkg/eventProcessor/in/WorkflowEventProcessorService.go b/pkg/eventProcessor/in/WorkflowEventProcessorService.go index d01fb1bfe60..afd03182486 100644 --- a/pkg/eventProcessor/in/WorkflowEventProcessorService.go +++ b/pkg/eventProcessor/in/WorkflowEventProcessorService.go @@ -185,8 +185,15 @@ func (impl *WorkflowEventProcessorImpl) SubscribeCDStageCompleteEvent() error { return } } else if wfr.WorkflowType == apiBean.CD_WORKFLOW_TYPE_POST { + + pluginArtifacts := make(map[string][]string) + if cdStageCompleteEvent.PluginArtifacts != nil { + pluginArtifacts = cdStageCompleteEvent.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() + globalUtil.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) + } + impl.logger.Debugw("received post stage success event for workflow runner ", "wfId", strconv.Itoa(wfr.Id)) - err = impl.workflowDagExecutor.HandlePostStageSuccessEvent(triggerContext, wfr.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy, cdStageCompleteEvent.PluginRegistryArtifactDetails) + err = impl.workflowDagExecutor.HandlePostStageSuccessEvent(triggerContext, wfr, wfr.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy, pluginArtifacts) if err != nil { impl.logger.Errorw("deployment success event error", "err", err) return @@ -636,6 +643,12 @@ func (impl *WorkflowEventProcessorImpl) BuildCiArtifactRequest(event bean.CiComp event.TriggeredBy = 1 // system triggered event } + pluginArtifacts := make(map[string][]string) + if event.PluginArtifacts != nil { + pluginArtifacts = event.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() + globalUtil.MergeMaps(pluginArtifacts, event.PluginRegistryArtifactDetails) + } + request := &wrokflowDagBean.CiArtifactWebhookRequest{ Image: event.DockerImage, ImageDigest: event.Digest, @@ -645,7 +658,7 @@ func (impl *WorkflowEventProcessorImpl) BuildCiArtifactRequest(event bean.CiComp UserId: event.TriggeredBy, WorkflowId: event.WorkflowId, IsArtifactUploaded: event.IsArtifactUploaded, - PluginRegistryArtifactDetails: event.PluginRegistryArtifactDetails, + PluginRegistryArtifactDetails: pluginArtifacts, PluginArtifactStage: event.PluginArtifactStage, } // if DataSource is empty, repository.WEBHOOK is considered as default diff --git a/pkg/pipeline/CiService.go b/pkg/pipeline/CiService.go index eff00206933..174e3cbb894 100644 --- a/pkg/pipeline/CiService.go +++ b/pkg/pipeline/CiService.go @@ -697,10 +697,10 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } // copyContainerImage plugin specific logic - var registryDestinationImageMap map[string][]string var registryCredentialMap map[string]bean2.RegistryCredentials var pluginArtifactStage string var imageReservationIds []int + var registryDestinationImageMap map[string][]string if !isJob { registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imageReservationIds, err = impl.GetWorkflowRequestVariablesForCopyContainerImagePlugin(preCiSteps, postCiSteps, dockerImageTag, customTag.Id, fmt.Sprintf(pipelineConfigBean.ImagePathPattern, @@ -835,9 +835,7 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. if ciWorkflowConfig.LogsBucket == "" { ciWorkflowConfig.LogsBucket = impl.config.GetDefaultBuildLogsBucket() } - if len(registryDestinationImageMap) > 0 { - workflowRequest.PushImageBeforePostCI = true - } + switch workflowRequest.CloudProvider { case types.BLOB_STORAGE_S3: // No AccessKey is used for uploading artifacts, instead IAM based auth is used @@ -896,58 +894,81 @@ func (impl *CiServiceImpl) buildWfRequestForCiPipeline(pipeline *pipelineConfig. } func (impl *CiServiceImpl) GetWorkflowRequestVariablesForCopyContainerImagePlugin(preCiSteps []*pipelineConfigBean.StepObject, postCiSteps []*pipelineConfigBean.StepObject, customTag string, customTagId int, buildImagePath string, buildImagedockerRegistryId string) (map[string][]string, map[string]bean2.RegistryCredentials, string, []int, error) { - var registryDestinationImageMap map[string][]string - var registryCredentialMap map[string]bean2.RegistryCredentials - var pluginArtifactStage string - var imagePathReservationIds []int - copyContainerImagePluginId, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(COPY_CONTAINER_IMAGE) + + copyContainerImagePluginDetail, err := impl.globalPluginService.GetRefPluginIdByRefPluginName(COPY_CONTAINER_IMAGE) if err != nil && err != pg.ErrNoRows { impl.Logger.Errorw("error in getting copyContainerImage plugin id", "err", err) - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err + return nil, nil, "", nil, err + } + + pluginIdToVersionMap := make(map[int]string) + for _, p := range copyContainerImagePluginDetail { + pluginIdToVersionMap[p.Id] = p.Version } + for _, step := range preCiSteps { - if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { + if _, ok := pluginIdToVersionMap[step.RefPluginId]; ok { // for copyContainerImage plugin parse destination images and save its data in image path reservation table - return nil, nil, pluginArtifactStage, nil, errors.New("copyContainerImage plugin not allowed in pre-ci step, please remove it and try again") + return nil, nil, "", nil, errors.New("copyContainerImage plugin not allowed in pre-ci step, please remove it and try again") } } + + registryCredentialMap := make(map[string]bean2.RegistryCredentials) + registryDestinationImageMap := make(map[string][]string) + var allDestinationImages []string //saving all images to be reserved in this array + for _, step := range postCiSteps { - if copyContainerImagePluginId != 0 && step.RefPluginId == copyContainerImagePluginId { - // for copyContainerImage plugin parse destination images and save its data in image path reservation table - registryDestinationImageMap, registryCredentialMap, err = impl.pluginInputVariableParser.HandleCopyContainerImagePluginInputVariables(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) + if version, ok := pluginIdToVersionMap[step.RefPluginId]; ok { + destinationImageMap, credentialMap, err := impl.pluginInputVariableParser.HandleCopyContainerImagePluginInputVariables(step.InputVars, customTag, buildImagePath, buildImagedockerRegistryId) if err != nil { impl.Logger.Errorw("error in parsing copyContainerImage input variable", "err", err) - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, err + return nil, nil, "", nil, err + } + if version == COPY_CONTAINER_IMAGE_VERSION_V1 { + // this is needed in ci runner only for v1 + registryDestinationImageMap = destinationImageMap + } + for _, images := range destinationImageMap { + allDestinationImages = append(allDestinationImages, images...) + } + for k, v := range credentialMap { + registryCredentialMap[k] = v } - pluginArtifactStage = repository5.POST_CI } } - for _, images := range registryDestinationImageMap { - for _, image := range images { - if image == buildImagePath { - return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, - pipelineConfigBean.ErrImagePathInUse - } + + pluginArtifactStage := repository5.POST_CI + for _, image := range allDestinationImages { + if image == buildImagePath { + return nil, registryCredentialMap, pluginArtifactStage, nil, + pipelineConfigBean.ErrImagePathInUse } } - imagePathReservationIds, err = impl.ReserveImagesGeneratedAtPlugin(customTagId, registryDestinationImageMap) + savedCIArtifacts, err := impl.ciArtifactRepository.FindCiArtifactByImagePaths(allDestinationImages) + if err != nil { + impl.Logger.Errorw("error in fetching artifacts by image path", "err", err) + return nil, nil, pluginArtifactStage, nil, err + } + if len(savedCIArtifacts) > 0 { + // if already present in ci artifact, return "image path already in use error" + return nil, nil, pluginArtifactStage, nil, pipelineConfigBean.ErrImagePathInUse + } + imagePathReservationIds, err := impl.ReserveImagesGeneratedAtPlugin(customTagId, allDestinationImages) if err != nil { return nil, nil, pluginArtifactStage, imagePathReservationIds, err } return registryDestinationImageMap, registryCredentialMap, pluginArtifactStage, imagePathReservationIds, nil } -func (impl *CiServiceImpl) ReserveImagesGeneratedAtPlugin(customTagId int, registryImageMap map[string][]string) ([]int, error) { +func (impl *CiServiceImpl) ReserveImagesGeneratedAtPlugin(customTagId int, destinationImages []string) ([]int, error) { var imagePathReservationIds []int - for _, images := range registryImageMap { - for _, image := range images { - imagePathReservationData, err := impl.customTagService.ReserveImagePath(image, customTagId) - if err != nil { - impl.Logger.Errorw("Error in marking custom tag reserved", "err", err) - return imagePathReservationIds, err - } - imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) + for _, image := range destinationImages { + imagePathReservationData, err := impl.customTagService.ReserveImagePath(image, customTagId) + if err != nil { + impl.Logger.Errorw("Error in marking custom tag reserved", "err", err) + return imagePathReservationIds, err } + imagePathReservationIds = append(imagePathReservationIds, imagePathReservationData.Id) } return imagePathReservationIds, nil } diff --git a/pkg/pipeline/CustomTagService.go b/pkg/pipeline/CustomTagService.go index 9869f707861..a17bd8ef5a5 100644 --- a/pkg/pipeline/CustomTagService.go +++ b/pkg/pipeline/CustomTagService.go @@ -41,6 +41,7 @@ type CustomTagService interface { DeactivateImagePathReservationByImagePath(imagePaths []string) error DeactivateImagePathReservationByImageIds(imagePathReservationIds []int) error DisableCustomTagIfExist(tag bean.CustomTag) error + GetImagePathsByIds(ids []int) ([]*repository.ImagePathReservation, error) } type CustomTagServiceImpl struct { @@ -303,3 +304,7 @@ func (impl *CustomTagServiceImpl) DeactivateImagePathReservationByImageIds(image func (impl *CustomTagServiceImpl) DisableCustomTagIfExist(tag bean.CustomTag) error { return impl.customTagRepository.DisableCustomTag(tag.EntityKey, tag.EntityValue) } + +func (impl *CustomTagServiceImpl) GetImagePathsByIds(ids []int) ([]*repository.ImagePathReservation, error) { + return impl.customTagRepository.GetImagePathsByIds(ids) +} diff --git a/pkg/pipeline/PipelineStageService.go b/pkg/pipeline/PipelineStageService.go index d4e7e7cfeae..847355dda71 100644 --- a/pkg/pipeline/PipelineStageService.go +++ b/pkg/pipeline/PipelineStageService.go @@ -2078,7 +2078,7 @@ func (impl *PipelineStageServiceImpl) BuildPluginVariableAndConditionDataForWfRe variableData.VariableType = bean.VARIABLE_TYPE_VALUE } else if variable.ValueType == repository2.PLUGIN_VARIABLE_VALUE_TYPE_GLOBAL { variableData.VariableType = bean.VARIABLE_TYPE_REF_GLOBAL - } else if variable.ValueType == repository2.PLUGIN_VARIABLE_VALUE_TYPE_PREVIOUS { + } else if variable.ValueType == repository2.PLUGIN_VARIABLE_VALUE_TYPE_PREVIOUS && !variable.IsExposed { variableData.VariableType = bean.VARIABLE_TYPE_REF_PLUGIN } if variable.VariableType == repository2.PLUGIN_VARIABLE_TYPE_INPUT { diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index a4497724c71..3dc9a9150d1 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -33,8 +33,10 @@ type copyContainerImagePluginInputVariable = string type RefPluginName = string const ( - COPY_CONTAINER_IMAGE RefPluginName = "Copy container image" - EMPTY_STRING = " " + COPY_CONTAINER_IMAGE RefPluginName = "Copy container image" + COPY_CONTAINER_IMAGE_VERSION_V1 = "v1.0.0" + COPY_CONTAINER_IMAGE_VERSION_V2 = "v1.1.0" + EMPTY_STRING = " " ) const ( diff --git a/pkg/plugin/GlobalPluginService.go b/pkg/plugin/GlobalPluginService.go index 5423ab22b2d..a39cef039b3 100644 --- a/pkg/plugin/GlobalPluginService.go +++ b/pkg/plugin/GlobalPluginService.go @@ -69,7 +69,7 @@ type GlobalPluginService interface { GetAllGlobalVariables(appType helper.AppType) ([]*GlobalVariable, error) ListAllPlugins(stageTypeReq string) ([]*bean2.PluginListComponentDto, error) GetPluginDetailById(pluginId int) (*bean2.PluginDetailDto, error) - GetRefPluginIdByRefPluginName(pluginName string) (refPluginId int, err error) + GetRefPluginIdByRefPluginName(pluginName string) (pluginVersionDetail []bean2.PluginsVersionDetail, err error) PatchPlugin(pluginDto *bean2.PluginMetadataDto, userId int32) (*bean2.PluginMetadataDto, error) GetDetailedPluginInfoByPluginId(pluginId int) (*bean2.PluginMetadataDto, error) GetAllDetailedPluginInfo() ([]*bean2.PluginMetadataDto, error) @@ -383,16 +383,23 @@ func getVariableDto(pluginVariable *repository.PluginStepVariable) *bean2.Plugin } } -func (impl *GlobalPluginServiceImpl) GetRefPluginIdByRefPluginName(pluginName string) (refPluginId int, err error) { +func (impl *GlobalPluginServiceImpl) GetRefPluginIdByRefPluginName(pluginName string) (pluginVersionDetail []bean2.PluginsVersionDetail, err error) { pluginMetadata, err := impl.globalPluginRepository.GetPluginByName(pluginName) if err != nil { impl.logger.Errorw("error in fetching plugin metadata by name", "err", err) - return 0, err + return nil, err } if pluginMetadata == nil { - return 0, nil + return nil, nil + } + pluginVersionDetail = make([]bean2.PluginsVersionDetail, 0) + for _, p := range pluginMetadata { + pluginVersionDetail = append(pluginVersionDetail, bean2.PluginsVersionDetail{ + PluginMetadataDto: &bean2.PluginMetadataDto{Id: p.Id}, + Version: p.PluginVersion, + }) } - return pluginMetadata[0].Id, nil + return pluginVersionDetail, nil } func (impl *GlobalPluginServiceImpl) PatchPlugin(pluginDto *bean2.PluginMetadataDto, userId int32) (*bean2.PluginMetadataDto, error) { diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index 511f76279ec..15a71c104b3 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -43,6 +43,7 @@ import ( repository2 "github.com/devtron-labs/devtron/pkg/plugin/repository" "github.com/devtron-labs/devtron/pkg/sql" "github.com/devtron-labs/devtron/pkg/workflow/cd" + bean4 "github.com/devtron-labs/devtron/pkg/workflow/cd/bean" bean2 "github.com/devtron-labs/devtron/pkg/workflow/dag/bean" error2 "github.com/devtron-labs/devtron/util/error" util2 "github.com/devtron-labs/devtron/util/event" @@ -74,7 +75,7 @@ type WorkflowDagExecutor interface { HandleCiSuccessEvent(triggerContext triggerBean.TriggerContext, ciPipelineId int, request *bean2.CiArtifactWebhookRequest, imagePushedAt time.Time) (id int, err error) HandlePreStageSuccessEvent(triggerContext triggerBean.TriggerContext, cdStageCompleteEvent eventProcessorBean.CdStageCompleteEvent) error HandleDeploymentSuccessEvent(triggerContext triggerBean.TriggerContext, pipelineOverride *chartConfig.PipelineOverride) error - HandlePostStageSuccessEvent(triggerContext triggerBean.TriggerContext, cdWorkflowId int, cdPipelineId int, triggeredBy int32, pluginRegistryImageDetails map[string][]string) error + HandlePostStageSuccessEvent(triggerContext triggerBean.TriggerContext, wfr *bean4.CdWorkflowRunnerDto, cdWorkflowId int, cdPipelineId int, triggeredBy int32, pluginRegistryImageDetails map[string][]string) error HandleCdStageReTrigger(runner *pipelineConfig.CdWorkflowRunner) error HandleCiStepFailedEvent(ciPipelineId int, request *bean2.CiArtifactWebhookRequest) (err error) HandleExternalCiWebhook(externalCiId int, request *bean2.CiArtifactWebhookRequest, @@ -533,6 +534,19 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(triggerContext t return err } if wfRunner.WorkflowType == bean.CD_WORKFLOW_TYPE_PRE { + + pluginArtifacts := make(map[string][]string) + if cdStageCompleteEvent.PluginArtifacts != nil { + pluginArtifacts = cdStageCompleteEvent.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() + util4.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) + } + + err = impl.deactivateUnusedPaths(wfRunner.ImagePathReservationIds, pluginArtifacts) + if err != nil { + impl.logger.Errorw("error in deactiving unusedImagePaths", "err", err) + return err + } + pipeline, err := impl.pipelineRepository.FindById(cdStageCompleteEvent.CdPipelineId) if err != nil { return err @@ -548,7 +562,7 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(triggerContext t impl.logger.Warnw("unable to migrate deprecated DataSource", "artifactId", ciArtifact.Id) } } - PreCDArtifacts, err := impl.commonArtifactService.SavePluginArtifacts(ciArtifact, cdStageCompleteEvent.PluginRegistryArtifactDetails, pipeline.Id, repository.PRE_CD, cdStageCompleteEvent.TriggeredBy) + PreCDArtifacts, err := impl.commonArtifactService.SavePluginArtifacts(ciArtifact, pluginArtifacts, pipeline.Id, repository.PRE_CD, cdStageCompleteEvent.TriggeredBy) if err != nil { impl.logger.Errorw("error in saving plugin artifacts", "err", err) return err @@ -629,7 +643,7 @@ func (impl *WorkflowDagExecutorImpl) HandleDeploymentSuccessEvent(triggerContext } else { // to trigger next pre/cd, if any // finding children cd by pipeline id - err = impl.HandlePostStageSuccessEvent(triggerContext, cdWorkflow.Id, pipelineOverride.PipelineId, 1, nil) + err = impl.HandlePostStageSuccessEvent(triggerContext, nil, cdWorkflow.Id, pipelineOverride.PipelineId, 1, nil) if err != nil { impl.logger.Errorw("error in triggering children cd after successful deployment event", "parentCdPipelineId", pipelineOverride.PipelineId) return err @@ -638,7 +652,7 @@ func (impl *WorkflowDagExecutorImpl) HandleDeploymentSuccessEvent(triggerContext return nil } -func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(triggerContext triggerBean.TriggerContext, cdWorkflowId int, cdPipelineId int, triggeredBy int32, pluginRegistryImageDetails map[string][]string) error { +func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(triggerContext triggerBean.TriggerContext, wfr *bean4.CdWorkflowRunnerDto, cdWorkflowId int, cdPipelineId int, triggeredBy int32, pluginRegistryImageDetails map[string][]string) error { // finding children cd by pipeline id cdPipelinesMapping, err := impl.appWorkflowRepository.FindWFCDMappingByParentCDPipelineId(cdPipelineId) if err != nil { @@ -651,6 +665,13 @@ func (impl *WorkflowDagExecutorImpl) HandlePostStageSuccessEvent(triggerContext return err } if len(pluginRegistryImageDetails) > 0 { + if wfr != nil { + err = impl.deactivateUnusedPaths(wfr.ImagePathReservationIds, pluginRegistryImageDetails) + if err != nil { + impl.logger.Errorw("error in deactivation images", "err", err) + return err + } + } PostCDArtifacts, err := impl.commonArtifactService.SavePluginArtifacts(ciArtifact, pluginRegistryImageDetails, cdPipelineId, repository.POST_CD, triggeredBy) if err != nil { impl.logger.Errorw("error in saving plugin artifacts", "err", err) @@ -706,6 +727,13 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext trigger impl.logger.Errorw("update wf failed for id ", "err", err) return 0, err } + + err = impl.deactivateUnusedPaths(savedWorkflow.ImagePathReservationIds, request.PluginRegistryArtifactDetails) + if err != nil { + impl.logger.Errorw("error in deactivation images", "err", err) + return 0, err + } + } pipeline, err := impl.ciPipelineRepository.FindByCiAndAppDetailsById(ciPipelineId) @@ -872,6 +900,37 @@ func (impl *WorkflowDagExecutorImpl) HandleCiSuccessEvent(triggerContext trigger return buildArtifact.Id, err } +func (impl *WorkflowDagExecutorImpl) deactivateUnusedPaths(reserveImagePathIds []int, pluginRegistryArtifactDetails map[string][]string) error { + // for copy container image plugin if images reserved are not equal to actual copird + reservedImagePaths, err := impl.customTagService.GetImagePathsByIds(reserveImagePathIds) + if err != nil && err != pg.ErrNoRows { + impl.logger.Errorw("error in getting imagePaths by ids", "ImagePathReservationIds", reserveImagePathIds, "err", err) + return err + } + + copiedImagesMapping := make(map[string]bool) + for _, savedImages := range pluginRegistryArtifactDetails { + for _, image := range savedImages { + copiedImagesMapping[image] = true + } + } + + unusedPaths := make([]string, 0, len(reservedImagePaths)) + for _, reservedImage := range reservedImagePaths { + if _, ok := copiedImagesMapping[reservedImage.ImagePath]; !ok { + unusedPaths = append(unusedPaths, reservedImage.ImagePath) + } + } + + err = impl.customTagService.DeactivateImagePathReservationByImagePath(unusedPaths) + if err != nil { + impl.logger.Errorw("error in deactivating unused image paths", "imagePathReservationIds", reserveImagePathIds, "err", err) + return err + } + + return nil +} + func (impl *WorkflowDagExecutorImpl) WriteCiSuccessEvent(request *bean2.CiArtifactWebhookRequest, pipeline *pipelineConfig.CiPipeline, artifact *repository.CiArtifact) { event := impl.eventFactory.Build(util2.Success, &pipeline.Id, pipeline.AppId, nil, util2.CI) event.CiArtifactId = artifact.Id diff --git a/scripts/sql/286_copy_container_image_v2.down.sql b/scripts/sql/286_copy_container_image_v2.down.sql new file mode 100644 index 00000000000..b1b2291c112 --- /dev/null +++ b/scripts/sql/286_copy_container_image_v2.down.sql @@ -0,0 +1 @@ +delete from plugin_parent_metadata where identifier='copy-container-image'; \ No newline at end of file diff --git a/scripts/sql/286_copy_container_image_v2.up.sql b/scripts/sql/286_copy_container_image_v2.up.sql new file mode 100644 index 00000000000..34476367e89 --- /dev/null +++ b/scripts/sql/286_copy_container_image_v2.up.sql @@ -0,0 +1,25 @@ + + +INSERT INTO "plugin_parent_metadata" ("id", "name", "identifier", "description", "type", "icon", "deleted", "created_on", "created_by", "updated_on", "updated_by") +SELECT nextval('id_seq_plugin_parent_metadata'), 'Copy container image','copy-container-image', 'Copy container images from the source repository to a desired repository','PRESET','https://raw.githubusercontent.com/devtron-labs/devtron/main/assets/ic-plugin-copy-container-image.png','f', 'now()', 1, 'now()', 1 + WHERE NOT EXISTS ( + SELECT 1 + FROM plugin_parent_metadata + WHERE identifier='copy-container-image' + AND deleted = false +); + +-- update the plugin_metadata with the plugin_parent_metadata_id +UPDATE plugin_metadata +SET plugin_parent_metadata_id = ( + SELECT id + FROM plugin_parent_metadata + WHERE identifier='copy-container-image' + AND deleted = false +),plugin_version='1.0.0' +WHERE name='Copy container image' + AND ( + plugin_parent_metadata_id IS NULL + OR plugin_parent_metadata_id = 0 + ) + AND deleted = false; diff --git a/util/mapUtil.go b/util/mapUtil.go new file mode 100644 index 00000000000..07476ba8c65 --- /dev/null +++ b/util/mapUtil.go @@ -0,0 +1,13 @@ +package util + +func MergeMaps(map1, map2 map[string][]string) { + for key, values := range map2 { + if existingValues, found := map1[key]; found { + // Key exists in map1, append the values from map2 + map1[key] = append(existingValues, values...) + } else { + // Key does not exist in map1, add the new key-value pair + map1[key] = values + } + } +} From 395d6592349a76284db148d3a879d9d07609bcc0 Mon Sep 17 00:00:00 2001 From: Prakash Date: Fri, 13 Sep 2024 14:02:01 +0530 Subject: [PATCH 47/61] chore: migration number fix (#5840) * migration number fix * go mod --- go.mod | 1 - go.sum | 7 ------- ...r_mandatory_plugin_policy_for_parent_plugin_id.down.sql | 1 + ...ter_mandatory_plugin_policy_for_parent_plugin_id.up.sql | 1 + 4 files changed, 2 insertions(+), 8 deletions(-) create mode 100644 scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.down.sql create mode 100644 scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.up.sql diff --git a/go.mod b/go.mod index d5f09c6b3dc..7eb2b5d156e 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/argoproj/argo-workflows/v3 v3.5.10 github.com/argoproj/gitops-engine v0.7.1-0.20240715141028-c68bce0f979c github.com/aws/aws-sdk-go v1.44.317 - github.com/aws/aws-sdk-go-v2/service/ecr v1.20.0 github.com/caarlos0/env v3.5.0+incompatible github.com/caarlos0/env/v6 v6.7.2 github.com/casbin/casbin v1.9.1 diff --git a/go.sum b/go.sum index d4251e80e8c..ba488c2627a 100644 --- a/go.sum +++ b/go.sum @@ -95,13 +95,6 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l github.com/aws/aws-sdk-go v1.44.290/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.317 h1:+8XWrLmGMwPPXSRSLPzhgcGnzJ2mYkgkrcB9C/GnSOU= github.com/aws/aws-sdk-go v1.44.317/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= -github.com/aws/aws-sdk-go-v2/service/ecr v1.20.0 h1:Qw8H7V55d2P1d/a9+cLgAcdez4GtP6l30KQAeYqx9vY= -github.com/aws/aws-sdk-go-v2/service/ecr v1.20.0/go.mod h1:pGwmNL8hN0jpBfKfTbmu+Rl0bJkDhaGl+9PQLrZ4KLo= -github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= -github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= diff --git a/scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.down.sql b/scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.down.sql new file mode 100644 index 00000000000..5876fd62ebd --- /dev/null +++ b/scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.down.sql @@ -0,0 +1 @@ +--empty script for maintaining script number common across repo. \ No newline at end of file diff --git a/scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.up.sql b/scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.up.sql new file mode 100644 index 00000000000..5876fd62ebd --- /dev/null +++ b/scripts/sql/287_alter_mandatory_plugin_policy_for_parent_plugin_id.up.sql @@ -0,0 +1 @@ +--empty script for maintaining script number common across repo. \ No newline at end of file From 68934d7b417b6760ff119eee7fe18d0248988452 Mon Sep 17 00:00:00 2001 From: iamayushm <32041961+iamayushm@users.noreply.github.com> Date: Fri, 13 Sep 2024 18:46:40 +0530 Subject: [PATCH 48/61] wip: adding variable id (#5844) --- .../app/pipeline/configure/DeploymentPipelineRestHandler.go | 4 ++-- pkg/pipeline/PipelineStageService.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go index 2b6608822d0..091a77a40a3 100644 --- a/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go +++ b/api/restHandler/app/pipeline/configure/DeploymentPipelineRestHandler.go @@ -2023,13 +2023,13 @@ func (handler *PipelineConfigRestHandlerImpl) GetCdPipelineById(w http.ResponseW return } - ciConf, err := handler.pipelineBuilder.GetCdPipelineById(pipelineId) + cdPipeline, err := handler.pipelineBuilder.GetCdPipelineById(pipelineId) if err != nil { handler.Logger.Errorw("service err, GetCdPipelineById", "err", err, "appId", appId, "pipelineId", pipelineId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) return } - cdResp, err := pipeline.CreatePreAndPostStageResponse(ciConf, version) + cdResp, err := pipeline.CreatePreAndPostStageResponse(cdPipeline, version) if err != nil { handler.Logger.Errorw("service err, CheckForVersionAndCreatePreAndPostStagePayload", "err", err, "appId", appId, "pipelineId", pipelineId) common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) diff --git a/pkg/pipeline/PipelineStageService.go b/pkg/pipeline/PipelineStageService.go index 847355dda71..f6029271c8d 100644 --- a/pkg/pipeline/PipelineStageService.go +++ b/pkg/pipeline/PipelineStageService.go @@ -349,6 +349,7 @@ func (impl *PipelineStageServiceImpl) BuildVariableAndConditionDataByStepIdDeepC for _, variable := range variables { variableNameIdMap[variable.Id] = variable.Name variableDto := &bean.StepVariableDto{ + Id: variable.Id, Name: variable.Name, Format: variable.Format, Description: variable.Description, From 0fc6d64e1befa160405c54786590c5b598389af3 Mon Sep 17 00:00:00 2001 From: Gireesh Naidu <111440205+gireesh-naidu@users.noreply.github.com> Date: Tue, 17 Sep 2024 19:08:04 +0530 Subject: [PATCH 49/61] fix: ignore kubelink errors in server startup (#5852) (#5854) * fix: ignore error message got while checking devtron installation status * fix: correct error messaging for docker creds --- api/helm-app/service/HelmAppService.go | 8 +++--- api/restHandler/DockerRegRestHandler.go | 19 +++----------- pkg/pipeline/DockerRegistryConfig.go | 35 +++++++++++++++++-------- pkg/server/ServerCacheService.go | 5 ++-- 4 files changed, 35 insertions(+), 32 deletions(-) diff --git a/api/helm-app/service/HelmAppService.go b/api/helm-app/service/HelmAppService.go index f0ea1411e86..01e1e90d94c 100644 --- a/api/helm-app/service/HelmAppService.go +++ b/api/helm-app/service/HelmAppService.go @@ -86,7 +86,7 @@ type HelmAppService interface { UpdateApplicationWithChartInfoWithExtraValues(ctx context.Context, appIdentifier *helmBean.AppIdentifier, chartRepository *gRPC.ChartRepository, extraValues map[string]interface{}, extraValuesYamlUrl string, useLatestChartVersion bool) (*openapi.UpdateReleaseResponse, error) TemplateChart(ctx context.Context, templateChartRequest *openapi2.TemplateChartRequest) (*openapi2.TemplateChartResponse, error) GetNotes(ctx context.Context, request *gRPC.InstallReleaseRequest) (string, error) - ValidateOCIRegistry(ctx context.Context, OCIRegistryRequest *gRPC.RegistryCredential) bool + ValidateOCIRegistry(ctx context.Context, OCIRegistryRequest *gRPC.RegistryCredential) (bool, error) GetRevisionHistoryMaxValue(appType bean.SourceAppType) int32 GetResourceTreeForExternalResources(ctx context.Context, clusterId int, clusterConfig *gRPC.ClusterConfig, resources []*gRPC.ExternalResourceDetail) (*gRPC.ResourceTreeResponse, error) CheckIfNsExistsForClusterIds(clusterIdToNsMap map[int]string) error @@ -1022,13 +1022,13 @@ func (impl *HelmAppServiceImpl) GetNotes(ctx context.Context, request *gRPC.Inst return notesTxt, err } -func (impl *HelmAppServiceImpl) ValidateOCIRegistry(ctx context.Context, OCIRegistryRequest *gRPC.RegistryCredential) bool { +func (impl *HelmAppServiceImpl) ValidateOCIRegistry(ctx context.Context, OCIRegistryRequest *gRPC.RegistryCredential) (bool, error) { response, err := impl.helmAppClient.ValidateOCIRegistry(ctx, OCIRegistryRequest) if err != nil { impl.logger.Errorw("error in fetching chart", "err", err) - return false + return false, err } - return response.IsLoggedIn + return response.IsLoggedIn, nil } func (impl *HelmAppServiceImpl) DecodeAppId(appId string) (*helmBean.AppIdentifier, error) { diff --git a/api/restHandler/DockerRegRestHandler.go b/api/restHandler/DockerRegRestHandler.go index 3d6a46827f3..828185ca4ed 100644 --- a/api/restHandler/DockerRegRestHandler.go +++ b/api/restHandler/DockerRegRestHandler.go @@ -24,7 +24,6 @@ import ( "github.com/devtron-labs/devtron/api/restHandler/common" repository "github.com/devtron-labs/devtron/internal/sql/repository/dockerRegistry" - "github.com/devtron-labs/devtron/internal/util" chartProviderService "github.com/devtron-labs/devtron/pkg/appStore/chartProvider" "github.com/devtron-labs/devtron/pkg/auth/authorisation/casbin" "github.com/devtron-labs/devtron/pkg/auth/user" @@ -233,13 +232,8 @@ func (impl DockerRegRestHandlerImpl) SaveDockerRegistryConfig(w http.ResponseWri //RBAC enforcer Ends // valid registry credentials from kubelink - if isValid := impl.dockerRegistryConfig.ValidateRegistryCredentials(&bean); !isValid { - impl.logger.Errorw("registry credentials validation err, SaveDockerRegistryConfig", "err", err, "payload", bean) - err = &util.ApiError{ - HttpStatusCode: http.StatusBadRequest, - InternalMessage: "Invalid authentication credentials. Please verify.", - UserMessage: "Invalid authentication credentials. Please verify.", - } + if err = impl.dockerRegistryConfig.ValidateRegistryCredentials(&bean); err != nil { + impl.logger.Errorw("registry credentials validation err, SaveDockerRegistryConfig", "err", err) common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return } @@ -349,13 +343,8 @@ func (impl DockerRegRestHandlerImpl) ValidateDockerRegistryConfig(w http.Respons bean.Cert = existingStore.Cert } // valid registry credentials from kubelink - if isValid := impl.dockerRegistryConfig.ValidateRegistryCredentials(&bean); !isValid { - impl.logger.Errorw("registry credentials validation err, SaveDockerRegistryConfig", "err", err, "payload", bean) - err = &util.ApiError{ - HttpStatusCode: http.StatusBadRequest, - InternalMessage: "Invalid authentication credentials. Please verify.", - UserMessage: "Invalid authentication credentials. Please verify.", - } + if err = impl.dockerRegistryConfig.ValidateRegistryCredentials(&bean); err != nil { + impl.logger.Errorw("registry credentials validation err, SaveDockerRegistryConfig", "err", err) common.WriteJsonResp(w, err, nil, http.StatusBadRequest) return } diff --git a/pkg/pipeline/DockerRegistryConfig.go b/pkg/pipeline/DockerRegistryConfig.go index 48fa6350a63..bf1f0ae28e3 100644 --- a/pkg/pipeline/DockerRegistryConfig.go +++ b/pkg/pipeline/DockerRegistryConfig.go @@ -46,7 +46,7 @@ type DockerRegistryConfig interface { Delete(storeId string) (string, error) DeleteReg(bean *types.DockerArtifactStoreBean) error CheckInActiveDockerAccount(storeId string) (bool, error) - ValidateRegistryCredentials(bean *types.DockerArtifactStoreBean) bool + ValidateRegistryCredentials(bean *types.DockerArtifactStoreBean) error ConfigureOCIRegistry(bean *types.DockerArtifactStoreBean, isUpdate bool, userId int32, tx *pg.Tx) error CreateOrUpdateOCIRegistryConfig(ociRegistryConfig *repository.OCIRegistryConfig, userId int32, tx *pg.Tx) error FilterOCIRegistryConfigForSpecificRepoType(ociRegistryConfigList []*repository.OCIRegistryConfig, repositoryType string) *repository.OCIRegistryConfig @@ -578,13 +578,8 @@ func (impl DockerRegistryConfigImpl) Update(bean *types.DockerArtifactStoreBean) bean.PluginId = existingStore.PluginId store := NewDockerArtifactStore(bean, true, existingStore.CreatedOn, time.Now(), existingStore.CreatedBy, bean.User) - if isValid := impl.ValidateRegistryCredentials(bean); !isValid { - impl.logger.Errorw("registry credentials validation err, SaveDockerRegistryConfig", "err", err, "payload", bean) - err = &util.ApiError{ - HttpStatusCode: http.StatusBadRequest, - InternalMessage: "Invalid authentication credentials. Please verify.", - UserMessage: "Invalid authentication credentials. Please verify.", - } + if err = impl.ValidateRegistryCredentials(bean); err != nil { + impl.logger.Errorw("registry credentials validation err, SaveDockerRegistryConfig", "err", err) return nil, err } err = impl.dockerArtifactStoreRepository.Update(store, tx) @@ -888,12 +883,14 @@ func (impl DockerRegistryConfigImpl) CheckInActiveDockerAccount(storeId string) return exist, nil } -func (impl DockerRegistryConfigImpl) ValidateRegistryCredentials(bean *types.DockerArtifactStoreBean) bool { +const ociRegistryInvalidCredsMsg = "Invalid authentication credentials. Please verify." + +func (impl DockerRegistryConfigImpl) ValidateRegistryCredentials(bean *types.DockerArtifactStoreBean) error { if bean.IsPublic || bean.RegistryType == repository.REGISTRYTYPE_GCR || bean.RegistryType == repository.REGISTRYTYPE_ARTIFACT_REGISTRY || bean.RegistryType == repository.REGISTRYTYPE_OTHER { - return true + return nil } request := &bean2.RegistryCredential{ RegistryUrl: bean.RegistryURL, @@ -906,5 +903,21 @@ func (impl DockerRegistryConfigImpl) ValidateRegistryCredentials(bean *types.Doc IsPublic: bean.IsPublic, Connection: bean.Connection, } - return impl.helmAppService.ValidateOCIRegistry(context.Background(), request) + + isLoggedIn, err := impl.helmAppService.ValidateOCIRegistry(context.Background(), request) + if err != nil { + impl.logger.Errorw("error in fetching chart", "err", err) + return util.NewApiError(). + WithUserMessage("error in validating oci registry"). + WithInternalMessage(err.Error()). + WithHttpStatusCode(http.StatusInternalServerError) + } + if !isLoggedIn { + return util.NewApiError(). + WithUserMessage(ociRegistryInvalidCredsMsg). + WithInternalMessage(ociRegistryInvalidCredsMsg). + WithHttpStatusCode(http.StatusBadRequest) + } + + return nil } diff --git a/pkg/server/ServerCacheService.go b/pkg/server/ServerCacheService.go index 3dcdf36d99e..cfcd43bd6c2 100644 --- a/pkg/server/ServerCacheService.go +++ b/pkg/server/ServerCacheService.go @@ -63,8 +63,9 @@ func NewServerCacheServiceImpl(logger *zap.SugaredLogger, serverEnvConfig *serve // check if the release is installed or not isDevtronHelmReleaseInstalled, err := impl.helmAppService.IsReleaseInstalled(context.Background(), &appIdentifier) if err != nil { - log.Println("not able to check if the devtron helm release exists or not.", "error", err) - return nil, err + logger.Errorw("not able to check if the devtron helm release exists or not.", "error", err) + // return nil, err + // not returning the error as it will bring down orchestrator } // if not installed, treat it as OSS kubectl user From 18431d04363f3447094a2330b6b24a37aceff26f Mon Sep 17 00:00:00 2001 From: kartik-579 <84493919+kartik-579@users.noreply.github.com> Date: Wed, 18 Sep 2024 11:28:47 +0530 Subject: [PATCH 50/61] fix: fixed user rbac flows (#5804) * update CreateService rbac flow * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * wip * prod bug fixes * updated logic for chartGroup * updated comments * role group delete prod bug fix * role group update prod bug fix --- api/auth/user/UserRestHandler.go | 432 +++++++++++++++++++---------- api/bean/UserRequest.go | 1 + cmd/external-app/wire_gen.go | 2 +- pkg/apiToken/ApiTokenService.go | 2 +- pkg/auth/user/RoleGroupService.go | 59 ++-- pkg/auth/user/UserCommonService.go | 107 ++----- pkg/auth/user/UserService.go | 270 ++++++++---------- 7 files changed, 453 insertions(+), 420 deletions(-) diff --git a/api/auth/user/UserRestHandler.go b/api/auth/user/UserRestHandler.go index 037de4166b9..c4f892a332b 100644 --- a/api/auth/user/UserRestHandler.go +++ b/api/auth/user/UserRestHandler.go @@ -19,9 +19,10 @@ package user import ( "encoding/json" "errors" - "fmt" util2 "github.com/devtron-labs/devtron/api/auth/user/util" "github.com/devtron-labs/devtron/pkg/auth/user/helper" + "github.com/devtron-labs/devtron/pkg/auth/user/repository" + "github.com/go-pg/pg" "github.com/gorilla/schema" "net/http" "strconv" @@ -34,7 +35,6 @@ import ( user2 "github.com/devtron-labs/devtron/pkg/auth/user" bean2 "github.com/devtron-labs/devtron/pkg/auth/user/bean" "github.com/devtron-labs/devtron/util/response" - "github.com/go-pg/pg" "github.com/gorilla/mux" "go.uber.org/zap" "gopkg.in/go-playground/validator.v9" @@ -129,68 +129,20 @@ func (handler UserRestHandlerImpl) CreateUser(w http.ResponseWriter, r *http.Req // RBAC enforcer applying token := r.Header.Get("token") - isActionUserSuperAdmin := false - if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*"); ok { - isActionUserSuperAdmin = true + isAuthorised, err := handler.checkRBACForUserCreate(token, userInfo.SuperAdmin, userInfo.RoleFilters, userInfo.UserRoleGroup) + if err != nil { + common.WriteJsonResp(w, err, "", http.StatusInternalServerError) + return } - if userInfo.RoleFilters != nil && len(userInfo.RoleFilters) > 0 { - for _, filter := range userInfo.RoleFilters { - if filter.AccessType == bean.APP_ACCESS_TYPE_HELM && !isActionUserSuperAdmin { - response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) - return - } - if len(filter.Team) > 0 { - if ok := handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team); !ok { - response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) - return - } - } - if filter.Entity == bean.CLUSTER_ENTITIY { - if ok := handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth); !ok { - response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) - return - } - } - } - } else { - if ok := handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, "*"); !ok { - response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) - return - } + if !isAuthorised { + response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) + return } - // auth check inside groups - if len(userInfo.UserRoleGroup) > 0 { - groupRoles, err := handler.roleGroupService.FetchRolesForUserRoleGroups(userInfo.UserRoleGroup) - if err != nil && err != pg.ErrNoRows { - handler.logger.Errorw("service err, UpdateUser", "err", err, "payload", userInfo) - common.WriteJsonResp(w, err, "", http.StatusInternalServerError) - return - } - - if len(groupRoles) > 0 { - for _, groupRole := range groupRoles { - if groupRole.AccessType == bean.APP_ACCESS_TYPE_HELM && !isActionUserSuperAdmin { - response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) - return - } - if len(groupRole.Team) > 0 { - if ok := handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, groupRole.Team); !ok { - response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) - return - } - } - } - } else { - if ok := handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, "*"); !ok { - response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) - return - } - } - } //RBAC enforcer Ends - - res, restrictedGroups, err := handler.userService.CreateUser(&userInfo, token, handler.CheckManagerAuth) + //In create req, we also check if any email exists already. If yes, then in that case we go on and merge existing roles and groups with the ones in request + //but rbac is only checked on create request roles and groups as existing roles and groups are assumed to be checked when created/updated before + res, err := handler.userService.CreateUser(&userInfo) if err != nil { handler.logger.Errorw("service err, CreateUser", "err", err, "payload", userInfo) if _, ok := err.(*util.ApiError); ok { @@ -201,23 +153,8 @@ func (handler UserRestHandlerImpl) CreateUser(w http.ResponseWriter, r *http.Req } return } - - if len(restrictedGroups) == 0 { - common.WriteJsonResp(w, err, res, http.StatusOK) - } else { - errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin := helper.CreateErrorMessageForUserRoleGroups(restrictedGroups) - - if len(restrictedGroups) != len(userInfo.UserRoleGroup) { - // warning - message := fmt.Errorf("User permissions added partially. %s%s", errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin) - common.WriteJsonResp(w, message, nil, http.StatusExpectationFailed) - - } else { - //error - message := fmt.Errorf("Permission could not be added. %s%s", errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin) - common.WriteJsonResp(w, message, nil, http.StatusBadRequest) - } - } + common.WriteJsonResp(w, err, res, http.StatusOK) + return } func (handler UserRestHandlerImpl) UpdateUser(w http.ResponseWriter, r *http.Request) { @@ -255,31 +192,29 @@ func (handler UserRestHandlerImpl) UpdateUser(w http.ResponseWriter, r *http.Req return } - res, rolesChanged, groupsModified, restrictedGroups, err := handler.userService.UpdateUser(&userInfo, token, handler.CheckManagerAuth) - + res, err := handler.userService.UpdateUser(&userInfo, token, handler.checkRBACForUserUpdate) if err != nil { handler.logger.Errorw("service err, UpdateUser", "err", err, "payload", userInfo) common.WriteJsonResp(w, err, "", http.StatusInternalServerError) return } - - if len(restrictedGroups) == 0 { - common.WriteJsonResp(w, err, res, http.StatusOK) - } else { - errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin := helper.CreateErrorMessageForUserRoleGroups(restrictedGroups) - - if rolesChanged || groupsModified { - // warning - message := fmt.Errorf("User permissions updated partially. %s%s", errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin) - common.WriteJsonResp(w, message, nil, http.StatusExpectationFailed) - - } else { - //error - message := fmt.Errorf("Permission could not be added/removed. %s%s", errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin) - common.WriteJsonResp(w, message, nil, http.StatusBadRequest) - } - } - + common.WriteJsonResp(w, err, res, http.StatusOK) + //if len(restrictedGroups) == 0 { + // common.WriteJsonResp(w, err, res, http.StatusOK) + //} else { + // errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin := helper.CreateErrorMessageForUserRoleGroups(restrictedGroups) + // + // if rolesChanged || groupsModified { + // // warning + // message := fmt.Errorf("User permissions updated partially. %s%s", errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin) + // common.WriteJsonResp(w, message, nil, http.StatusExpectationFailed) + // + // } else { + // //error + // message := fmt.Errorf("Permission could not be added/removed. %s%s", errorMessageForGroupsWithoutSuperAdmin, errorMessageForGroupsWithSuperAdmin) + // common.WriteJsonResp(w, message, nil, http.StatusBadRequest) + // } + //} } func (handler UserRestHandlerImpl) GetById(w http.ResponseWriter, r *http.Request) { @@ -410,6 +345,7 @@ func (handler UserRestHandlerImpl) GetAllV2(w http.ResponseWriter, r *http.Reque common.WriteJsonResp(w, err, res, http.StatusOK) } + func (handler UserRestHandlerImpl) GetAll(w http.ResponseWriter, r *http.Request) { userId, err := handler.userService.GetLoggedInUser(r) if userId == 0 || err != nil { @@ -693,41 +629,16 @@ func (handler UserRestHandlerImpl) CreateRoleGroup(w http.ResponseWriter, r *htt // RBAC enforcer applying token := r.Header.Get("token") - isActionUserSuperAdmin := false - if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*"); ok { - isActionUserSuperAdmin = true + isAuthorised, err := handler.checkRBACForUserCreate(token, request.SuperAdmin, request.RoleFilters, nil) + if err != nil { + common.WriteJsonResp(w, err, "", http.StatusInternalServerError) + return } - - if request.SuperAdmin && !isActionUserSuperAdmin { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) + if !isAuthorised { + response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) return } - if request.RoleFilters != nil && len(request.RoleFilters) > 0 { - for _, filter := range request.RoleFilters { - if filter.AccessType == bean.APP_ACCESS_TYPE_HELM && !isActionUserSuperAdmin { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) - return - } - if len(filter.Team) > 0 { - if ok := handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team); !ok { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) - return - } - } - if filter.Entity == bean.CLUSTER_ENTITIY && !isActionUserSuperAdmin { - if isValidAuth := handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth); !isValidAuth { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) - return - } - } - } - } else { - if ok := handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, "*"); !ok { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) - return - } - } //RBAC enforcer Ends err = handler.validator.Struct(request) if err != nil { @@ -787,7 +698,7 @@ func (handler UserRestHandlerImpl) UpdateRoleGroup(w http.ResponseWriter, r *htt return } - res, err := handler.roleGroupService.UpdateRoleGroup(&request, token, handler.CheckManagerAuth) + res, err := handler.roleGroupService.UpdateRoleGroup(&request, token, handler.checkRBACForRoleGroupUpdate) if err != nil { handler.logger.Errorw("service err, UpdateRoleGroup", "err", err, "payload", request) common.WriteJsonResp(w, err, "", http.StatusInternalServerError) @@ -996,31 +907,15 @@ func (handler UserRestHandlerImpl) DeleteRoleGroup(w http.ResponseWriter, r *htt common.WriteJsonResp(w, err, "", http.StatusInternalServerError) return } - token := r.Header.Get("token") - isActionUserSuperAdmin := false - if ok := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*"); ok { - isActionUserSuperAdmin = true + isAuthorised, err := handler.checkRBACForRoleGroupDelete(token, userGroup.RoleFilters) + if err != nil { + common.WriteJsonResp(w, err, "", http.StatusInternalServerError) + return } - if userGroup.RoleFilters != nil && len(userGroup.RoleFilters) > 0 { - for _, filter := range userGroup.RoleFilters { - if filter.AccessType == bean.APP_ACCESS_TYPE_HELM && !isActionUserSuperAdmin { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) - return - } - if len(filter.Team) > 0 { - if ok := handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionDelete, filter.Team); !ok { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) - return - } - } - if filter.Entity == bean.CLUSTER_ENTITIY { - if isValidAuth := handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth); !isValidAuth { - common.WriteJsonResp(w, errors.New("unauthorized"), nil, http.StatusForbidden) - return - } - } - } + if !isAuthorised { + response.WriteResponse(http.StatusForbidden, "FORBIDDEN", w, errors.New("unauthorized")) + return } //RBAC enforcer Ends @@ -1217,3 +1112,236 @@ func (handler UserRestHandlerImpl) CheckManagerAuth(resource, token string, obje return true } + +func (handler UserRestHandlerImpl) checkRBACForUserCreate(token string, requestSuperAdmin bool, roleFilters []bean.RoleFilter, + roleGroups []bean.UserRoleGroup) (isAuthorised bool, err error) { + isActionUserSuperAdmin := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*") + if requestSuperAdmin && !isActionUserSuperAdmin { + return false, nil + } + isAuthorised = isActionUserSuperAdmin + if !isAuthorised { + if roleFilters != nil && len(roleFilters) > 0 { //auth check inside roleFilters + for _, filter := range roleFilters { + switch { + case filter.AccessType == bean.APP_ACCESS_TYPE_HELM || filter.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(filter.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team) + case filter.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth) + case filter.Entity == bean.CHART_GROUP_ENTITY && len(roleFilters) == 1: //if only chartGroup entity is present in request then access will be judged through super-admin access + isAuthorised = isActionUserSuperAdmin + case filter.Entity == bean.CHART_GROUP_ENTITY && len(roleFilters) > 1: //if entities apart from chartGroup entity are present, not checking chartGroup access + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } + if len(roleGroups) > 0 { // auth check inside groups + groupRoles, err := handler.roleGroupService.FetchRolesForUserRoleGroups(roleGroups) + if err != nil && err != pg.ErrNoRows { + handler.logger.Errorw("service err, UpdateUser", "err", err, "payload", roleGroups) + return false, err + } + if len(groupRoles) > 0 { + for _, groupRole := range groupRoles { + switch { + case groupRole.Action == bean.ACTION_SUPERADMIN: + isAuthorised = isActionUserSuperAdmin + case groupRole.AccessType == bean.APP_ACCESS_TYPE_HELM || groupRole.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(groupRole.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, groupRole.Team) + case groupRole.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(groupRole.Cluster, groupRole.Namespace, groupRole.Group, groupRole.Kind, groupRole.Resource, token, handler.CheckManagerAuth) + case groupRole.Entity == bean.CHART_GROUP_ENTITY && len(groupRoles) == 1: //if only chartGroup entity is present in request then access will be judged through super-admin access + isAuthorised = isActionUserSuperAdmin + case groupRole.Entity == bean.CHART_GROUP_ENTITY && len(groupRoles) > 1: //if entities apart from chartGroup entity are present, not checking chartGroup access + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } else { + isAuthorised = false + } + } + } + return isAuthorised, nil +} + +func (handler UserRestHandlerImpl) checkRBACForUserUpdate(token string, userInfo *bean.UserInfo, isUserAlreadySuperAdmin bool, eliminatedRoleFilters, + eliminatedGroupRoles []*repository.RoleModel) (isAuthorised bool, err error) { + isActionUserSuperAdmin := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*") + requestSuperAdmin := userInfo.SuperAdmin + if (requestSuperAdmin || isUserAlreadySuperAdmin) && !isActionUserSuperAdmin { + //if user is going to be provided with super-admin access or already a super-admin then the action user should be a super-admin + return false, nil + } + roleFilters := userInfo.RoleFilters + roleGroups := userInfo.UserRoleGroup + isAuthorised = isActionUserSuperAdmin + eliminatedRolesToBeChecked := append(eliminatedRoleFilters, eliminatedGroupRoles...) + if !isAuthorised { + if roleFilters != nil && len(roleFilters) > 0 { //auth check inside roleFilters + for _, filter := range roleFilters { + switch { + case filter.AccessType == bean.APP_ACCESS_TYPE_HELM || filter.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(filter.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team) + case filter.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth) + case filter.Entity == bean.CHART_GROUP_ENTITY: + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } + if eliminatedRolesToBeChecked != nil && len(eliminatedRolesToBeChecked) > 0 { + for _, filter := range eliminatedRolesToBeChecked { + switch { + case filter.AccessType == bean.APP_ACCESS_TYPE_HELM || filter.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(filter.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team) + case filter.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth) + case filter.Entity == bean.CHART_GROUP_ENTITY: + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } + if len(roleGroups) > 0 { // auth check inside groups + groupRoles, err := handler.roleGroupService.FetchRolesForUserRoleGroups(roleGroups) + if err != nil && err != pg.ErrNoRows { + handler.logger.Errorw("service err, UpdateUser", "err", err, "payload", roleGroups) + return false, err + } + if len(groupRoles) > 0 { + for _, groupRole := range groupRoles { + switch { + case groupRole.Action == bean.ACTION_SUPERADMIN: + isAuthorised = isActionUserSuperAdmin + case groupRole.AccessType == bean.APP_ACCESS_TYPE_HELM || groupRole.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(groupRole.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, groupRole.Team) + case groupRole.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(groupRole.Cluster, groupRole.Namespace, groupRole.Group, groupRole.Kind, groupRole.Resource, token, handler.CheckManagerAuth) + case groupRole.Entity == bean.CHART_GROUP_ENTITY: + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } else { + isAuthorised = false + } + } + } + return isAuthorised, nil +} + +func (handler UserRestHandlerImpl) checkRBACForRoleGroupUpdate(token string, groupInfo *bean.RoleGroup, + eliminatedRoleFilters []*repository.RoleModel) (isAuthorised bool, err error) { + isActionUserSuperAdmin := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*") + requestSuperAdmin := groupInfo.SuperAdmin + if requestSuperAdmin && !isActionUserSuperAdmin { + //if user is going to be provided with super-admin access or already a super-admin then the action user should be a super-admin + return false, nil + } + isAuthorised = isActionUserSuperAdmin + if !isAuthorised { + if groupInfo.RoleFilters != nil && len(groupInfo.RoleFilters) > 0 { //auth check inside roleFilters + for _, filter := range groupInfo.RoleFilters { + switch { + case filter.Action == bean.ACTION_SUPERADMIN: + isAuthorised = isActionUserSuperAdmin + case filter.AccessType == bean.APP_ACCESS_TYPE_HELM || filter.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(filter.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team) + case filter.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth) + case filter.Entity == bean.CHART_GROUP_ENTITY: + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } + if len(eliminatedRoleFilters) > 0 { + for _, filter := range eliminatedRoleFilters { + switch { + case filter.AccessType == bean.APP_ACCESS_TYPE_HELM || filter.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(filter.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team) + case filter.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth) + case filter.Entity == bean.CHART_GROUP_ENTITY: + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } + } + return isAuthorised, nil +} + +func (handler UserRestHandlerImpl) checkRBACForRoleGroupDelete(token string, groupRoles []bean.RoleFilter) (isAuthorised bool, err error) { + isActionUserSuperAdmin := handler.enforcer.Enforce(token, casbin.ResourceGlobal, casbin.ActionGet, "*") + isAuthorised = isActionUserSuperAdmin + if !isAuthorised { + if groupRoles != nil && len(groupRoles) > 0 { //auth check inside roleFilters + for _, filter := range groupRoles { + switch { + case filter.Action == bean.ACTION_SUPERADMIN: + isAuthorised = isActionUserSuperAdmin + case filter.AccessType == bean.APP_ACCESS_TYPE_HELM || filter.Entity == bean2.EntityJobs: + isAuthorised = isActionUserSuperAdmin + case len(filter.Team) > 0: + isAuthorised = handler.enforcer.Enforce(token, casbin.ResourceUser, casbin.ActionCreate, filter.Team) + case filter.Entity == bean.CLUSTER_ENTITIY: + isAuthorised = handler.userCommonService.CheckRbacForClusterEntity(filter.Cluster, filter.Namespace, filter.Group, filter.Kind, filter.Resource, token, handler.CheckManagerAuth) + case filter.Entity == bean.CHART_GROUP_ENTITY: + isAuthorised = true + default: + isAuthorised = false + } + if !isAuthorised { + break + } + } + } + } + return isAuthorised, nil +} diff --git a/api/bean/UserRequest.go b/api/bean/UserRequest.go index 392c864a6ad..2ba15a68ca1 100644 --- a/api/bean/UserRequest.go +++ b/api/bean/UserRequest.go @@ -118,6 +118,7 @@ const ( USER_TYPE_API_TOKEN = "apiToken" CHART_GROUP_ENTITY = "chart-group" CLUSTER_ENTITIY = "cluster" + ACTION_SUPERADMIN = "super-admin" ) type UserListingResponse struct { diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index ab3bca55cdc..bab4ef2b470 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run github.com/google/wire/cmd/wire +//go:generate go run -mod=mod github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject diff --git a/pkg/apiToken/ApiTokenService.go b/pkg/apiToken/ApiTokenService.go index 91f49c32c1e..a6f54d7ce04 100644 --- a/pkg/apiToken/ApiTokenService.go +++ b/pkg/apiToken/ApiTokenService.go @@ -205,7 +205,7 @@ func (impl ApiTokenServiceImpl) CreateApiToken(request *openapi.CreateApiTokenRe EmailId: email, UserType: bean.USER_TYPE_API_TOKEN, } - createUserResponse, _, err := impl.userService.CreateUser(&createUserRequest, token, managerAuth) + createUserResponse, err := impl.userService.CreateUser(&createUserRequest) if err != nil { impl.logger.Errorw("error while creating user for api-token", "email", email, "error", err) return nil, err diff --git a/pkg/auth/user/RoleGroupService.go b/pkg/auth/user/RoleGroupService.go index a04493dee36..696eb208371 100644 --- a/pkg/auth/user/RoleGroupService.go +++ b/pkg/auth/user/RoleGroupService.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "github.com/devtron-labs/devtron/pkg/auth/user/repository/helper" + "net/http" "strings" "time" @@ -37,7 +38,8 @@ import ( type RoleGroupService interface { CreateRoleGroup(request *bean.RoleGroup) (*bean.RoleGroup, error) - UpdateRoleGroup(request *bean.RoleGroup, token string, managerAuth func(resource, token string, object string) bool) (*bean.RoleGroup, error) + UpdateRoleGroup(request *bean.RoleGroup, token string, checkRBACForGroupUpdate func(token string, groupInfo *bean.RoleGroup, + eliminatedRoleFilters []*repository.RoleModel) (isAuthorised bool, err error)) (*bean.RoleGroup, error) FetchDetailedRoleGroups(req *bean.ListingRequest) ([]*bean.RoleGroup, error) FetchRoleGroupsById(id int32) (*bean.RoleGroup, error) FetchRoleGroups() ([]*bean.RoleGroup, error) @@ -136,21 +138,21 @@ func (impl RoleGroupServiceImpl) CreateRoleGroup(request *bean.RoleGroup) (*bean for index, roleFilter := range request.RoleFilters { entity := roleFilter.Entity if entity == bean.CLUSTER_ENTITIY { - policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForClusterEntity(roleFilter, request.UserId, model, nil, "", nil, tx, mapping[index]) + policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForClusterEntity(roleFilter, request.UserId, model, nil, tx, mapping[index]) policies = append(policies, policiesToBeAdded...) if err != nil { // making it non-blocking as it is being done for multiple Role filters and does not want this to be blocking. impl.logger.Errorw("error in creating updating role group for cluster entity", "err", err, "roleFilter", roleFilter) } } else if entity == bean2.EntityJobs { - policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForJobsEntity(roleFilter, request.UserId, model, nil, "", nil, tx, mapping[index]) + policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForJobsEntity(roleFilter, request.UserId, model, nil, tx, mapping[index]) policies = append(policies, policiesToBeAdded...) if err != nil { // making it non-blocking as it is being done for multiple Role filters and does not want this to be blocking. impl.logger.Errorw("error in creating updating role group for jobs entity", "err", err, "roleFilter", roleFilter) } } else { - policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForOtherEntity(roleFilter, request, model, nil, "", nil, tx, mapping[index]) + policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForOtherEntity(roleFilter, request, model, nil, tx, mapping[index]) policies = append(policies, policiesToBeAdded...) if err != nil { // making it non-blocking as it is being done for multiple Role filters and does not want this to be blocking. @@ -199,7 +201,7 @@ func (impl RoleGroupServiceImpl) CreateRoleGroup(request *bean.RoleGroup) (*bean return request, nil } -func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForClusterEntity(roleFilter bean.RoleFilter, userId int32, model *repository.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, token string, managerAuth func(resource string, token string, object string) bool, tx *pg.Tx, capacity int) ([]casbin2.Policy, error) { +func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForClusterEntity(roleFilter bean.RoleFilter, userId int32, model *repository.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, tx *pg.Tx, capacity int) ([]casbin2.Policy, error) { //var policiesToBeAdded []casbin2.Policy namespaces := strings.Split(roleFilter.Namespace, ",") groups := strings.Split(roleFilter.Group, ",") @@ -213,12 +215,6 @@ func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForClusterEntity(roleFil for _, group := range groups { for _, kind := range kinds { for _, resource := range resources { - if managerAuth != nil { - isValidAuth := impl.userCommonService.CheckRbacForClusterEntity(roleFilter.Cluster, namespace, group, kind, resource, token, managerAuth) - if !isValidAuth { - continue - } - } roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(entity, "", "", "", "", accessType, roleFilter.Cluster, namespace, group, kind, resource, actionType, false, "") if err != nil { impl.logger.Errorw("error in getting new role model by filter") @@ -263,7 +259,7 @@ func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForClusterEntity(roleFil return policiesToBeAdded, nil } -func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForOtherEntity(roleFilter bean.RoleFilter, request *bean.RoleGroup, model *repository.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, token string, managerAuth func(resource string, token string, object string) bool, tx *pg.Tx, capacity int) ([]casbin2.Policy, error) { +func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForOtherEntity(roleFilter bean.RoleFilter, request *bean.RoleGroup, model *repository.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, tx *pg.Tx, capacity int) ([]casbin2.Policy, error) { actionType := roleFilter.Action accessType := roleFilter.AccessType entity := roleFilter.Entity @@ -319,7 +315,7 @@ func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForOtherEntity(roleFilte return policiesToBeAdded, nil } -func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForJobsEntity(roleFilter bean.RoleFilter, userId int32, model *repository.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, token string, managerAuth func(resource string, token string, object string) bool, tx *pg.Tx, capacity int) ([]casbin2.Policy, error) { +func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForJobsEntity(roleFilter bean.RoleFilter, userId int32, model *repository.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, tx *pg.Tx, capacity int) ([]casbin2.Policy, error) { actionType := roleFilter.Action accessType := roleFilter.AccessType entity := roleFilter.Entity @@ -372,7 +368,8 @@ func (impl RoleGroupServiceImpl) CreateOrUpdateRoleGroupForJobsEntity(roleFilter return policiesToBeAdded, nil } -func (impl RoleGroupServiceImpl) UpdateRoleGroup(request *bean.RoleGroup, token string, managerAuth func(resource, token string, object string) bool) (*bean.RoleGroup, error) { +func (impl RoleGroupServiceImpl) UpdateRoleGroup(request *bean.RoleGroup, token string, checkRBACForGroupUpdate func(token string, groupInfo *bean.RoleGroup, + eliminatedRoleFilters []*repository.RoleModel) (isAuthorised bool, err error)) (*bean.RoleGroup, error) { dbConnection := impl.roleGroupRepository.GetConnection() tx, err := dbConnection.Begin() if err != nil { @@ -404,6 +401,8 @@ func (impl RoleGroupServiceImpl) UpdateRoleGroup(request *bean.RoleGroup, token var eliminatedPolicies []casbin2.Policy capacity, mapping := impl.userCommonService.GetCapacityForRoleFilter(request.RoleFilters) var policies = make([]casbin2.Policy, 0, capacity) + var eliminatedRoleModels []*repository.RoleModel + var items []casbin2.Policy if request.SuperAdmin == false { roleGroupMappingModels, err := impl.roleGroupRepository.GetRoleGroupRoleMappingByRoleGroupId(roleGroup.Id) if err != nil { @@ -417,7 +416,7 @@ func (impl RoleGroupServiceImpl) UpdateRoleGroup(request *bean.RoleGroup, token // DELETE PROCESS STARTS - items, err := impl.userCommonService.RemoveRolesAndReturnEliminatedPoliciesForGroups(request, existingRoles, eliminatedRoles, tx, token, managerAuth) + items, eliminatedRoleModels, err = impl.userCommonService.RemoveRolesAndReturnEliminatedPoliciesForGroups(request, existingRoles, eliminatedRoles, tx) if err != nil { return nil, err } @@ -427,24 +426,16 @@ func (impl RoleGroupServiceImpl) UpdateRoleGroup(request *bean.RoleGroup, token //Adding New Policies for index, roleFilter := range request.RoleFilters { if roleFilter.Entity == bean.CLUSTER_ENTITIY { - policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForClusterEntity(roleFilter, request.UserId, roleGroup, existingRoles, token, managerAuth, tx, mapping[index]) + policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForClusterEntity(roleFilter, request.UserId, roleGroup, existingRoles, tx, mapping[index]) policies = append(policies, policiesToBeAdded...) if err != nil { impl.logger.Errorw("error in creating updating role group for cluster entity", "err", err, "roleFilter", roleFilter) } } else { - if len(roleFilter.Team) > 0 { - // check auth only for apps permission, skip for chart group - rbacObject := fmt.Sprintf("%s", roleFilter.Team) - isValidAuth := managerAuth(casbin2.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } switch roleFilter.Entity { case bean2.EntityJobs: { - policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForJobsEntity(roleFilter, request.UserId, roleGroup, existingRoles, token, managerAuth, tx, mapping[index]) + policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForJobsEntity(roleFilter, request.UserId, roleGroup, existingRoles, tx, mapping[index]) policies = append(policies, policiesToBeAdded...) if err != nil { impl.logger.Errorw("error in creating updating role group for jobs entity", "err", err, "roleFilter", roleFilter) @@ -452,7 +443,7 @@ func (impl RoleGroupServiceImpl) UpdateRoleGroup(request *bean.RoleGroup, token } default: { - policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForOtherEntity(roleFilter, request, roleGroup, existingRoles, token, managerAuth, tx, mapping[index]) + policiesToBeAdded, err := impl.CreateOrUpdateRoleGroupForOtherEntity(roleFilter, request, roleGroup, existingRoles, tx, mapping[index]) policies = append(policies, policiesToBeAdded...) if err != nil { impl.logger.Errorw("error in creating updating role group for other entity", "err", err, "roleFilter", roleFilter) @@ -486,6 +477,22 @@ func (impl RoleGroupServiceImpl) UpdateRoleGroup(request *bean.RoleGroup, token policies = append(policies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(roleGroup.CasbinName), Obj: casbin2.Object(roleModel.Role)}) } } + + if checkRBACForGroupUpdate != nil { + isAuthorised, err := checkRBACForGroupUpdate(token, request, eliminatedRoleModels) + if err != nil { + impl.logger.Errorw("error in checking RBAC for role group update", "err", err, "request", request) + return nil, err + } else if !isAuthorised { + impl.logger.Errorw("rbac check failed for role group update", "request", request) + return nil, &util.ApiError{ + Code: "403", + HttpStatusCode: http.StatusForbidden, + UserMessage: "unauthorized", + } + } + } + //deleting policies from casbin impl.logger.Debugw("eliminated policies", "eliminatedPolicies", eliminatedPolicies) if len(eliminatedPolicies) > 0 { diff --git a/pkg/auth/user/UserCommonService.go b/pkg/auth/user/UserCommonService.go index c0ed2c3371f..6ce69cf3007 100644 --- a/pkg/auth/user/UserCommonService.go +++ b/pkg/auth/user/UserCommonService.go @@ -36,8 +36,8 @@ import ( type UserCommonService interface { CreateDefaultPoliciesForAllTypes(team, entityName, env, entity, cluster, namespace, group, kind, resource, actionType, accessType, workflow string, userId int32) (bool, error, []casbin.Policy) - RemoveRolesAndReturnEliminatedPolicies(userInfo *bean.UserInfo, existingRoleIds map[int]repository.UserRoleModel, eliminatedRoleIds map[int]*repository.UserRoleModel, tx *pg.Tx, token string, managerAuth func(resource, token, object string) bool) ([]casbin.Policy, error) - RemoveRolesAndReturnEliminatedPoliciesForGroups(request *bean.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, eliminatedRoles map[int]*repository.RoleGroupRoleMapping, tx *pg.Tx, token string, managerAuth func(resource string, token string, object string) bool) ([]casbin.Policy, error) + RemoveRolesAndReturnEliminatedPolicies(userInfo *bean.UserInfo, existingRoleIds map[int]repository.UserRoleModel, eliminatedRoleIds map[int]*repository.UserRoleModel, tx *pg.Tx) ([]casbin.Policy, []*repository.RoleModel, error) + RemoveRolesAndReturnEliminatedPoliciesForGroups(request *bean.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, eliminatedRoles map[int]*repository.RoleGroupRoleMapping, tx *pg.Tx) ([]casbin.Policy, []*repository.RoleModel, error) CheckRbacForClusterEntity(cluster, namespace, group, kind, resource, token string, managerAuth func(resource, token, object string) bool) bool GetCapacityForRoleFilter(roleFilters []bean.RoleFilter) (int, map[int]int) BuildRoleFilterKeyForCluster(roleFilterMap map[string]*bean.RoleFilter, role repository.RoleModel, key string) @@ -238,7 +238,7 @@ func getResolvedPValMapValue(rawValue string) string { func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPolicies(userInfo *bean.UserInfo, existingRoleIds map[int]repository.UserRoleModel, eliminatedRoleIds map[int]*repository.UserRoleModel, - tx *pg.Tx, token string, managerAuth func(resource, token, object string) bool) ([]casbin.Policy, error) { + tx *pg.Tx) ([]casbin.Policy, []*repository.RoleModel, error) { var eliminatedPolicies []casbin.Policy // DELETE Removed Items for _, roleFilter := range userInfo.RoleFilters { @@ -253,14 +253,10 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPolicies(userInf for _, group := range groups { for _, kind := range kinds { for _, resource := range resources { - isValidAuth := impl.CheckRbacForClusterEntity(roleFilter.Cluster, namespace, group, kind, resource, token, managerAuth) - if !isValidAuth { - continue - } roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(roleFilter.Entity, "", "", "", "", accessType, roleFilter.Cluster, namespace, group, kind, resource, actionType, false, "") if err != nil { impl.logger.Errorw("Error in fetching roles by filter", "roleFilter", roleFilter) - return nil, err + return nil, nil, err } if roleModel.Id == 0 { impl.logger.Warnw("no role found for given filter", "filter", roleFilter) @@ -274,13 +270,6 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPolicies(userInf } } } else if roleFilter.Entity == bean2.EntityJobs { - if len(roleFilter.Team) > 0 { // check auth only for apps permission, skip for chart group - rbacObject := fmt.Sprintf("%s", roleFilter.Team) - isValidAuth := managerAuth(casbin.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } entityNames := strings.Split(roleFilter.EntityName, ",") environments := strings.Split(roleFilter.Environment, ",") workflows := strings.Split(roleFilter.Workflow, ",") @@ -292,7 +281,7 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPolicies(userInf roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(roleFilter.Entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", actionType, false, workflow) if err != nil { impl.logger.Errorw("Error in fetching roles by filter", "user", userInfo) - return nil, err + return nil, nil, err } if roleModel.Id == 0 { impl.logger.Debugw("no role found for given filter", "filter", roleFilter) @@ -306,13 +295,6 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPolicies(userInf } } } else { - if len(roleFilter.Team) > 0 { // check auth only for apps permission, skip for chart group - rbacObject := fmt.Sprintf("%s", roleFilter.Team) - isValidAuth := managerAuth(casbin.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } entityNames := strings.Split(roleFilter.EntityName, ",") environments := strings.Split(roleFilter.Environment, ",") actionType := roleFilter.Action @@ -322,11 +304,11 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPolicies(userInf roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(roleFilter.Entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", actionType, false, "") if err != nil { impl.logger.Errorw("Error in fetching roles by filter", "user", userInfo) - return nil, err + return nil, nil, err } oldRoleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(roleFilter.Entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", actionType, true, "") if err != nil { - return nil, err + return nil, nil, err } if roleModel.Id == 0 { impl.logger.Debugw("no role found for given filter", "filter", roleFilter) @@ -349,37 +331,25 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPolicies(userInf // delete remaining Ids from casbin role mapping table in orchestrator and casbin policy db // which are existing but not provided in this request - + eliminatedRoles := make([]*repository.RoleModel, 0, len(eliminatedRoleIds)) for _, userRoleModel := range eliminatedRoleIds { role, err := impl.userAuthRepository.GetRoleById(userRoleModel.RoleId) if err != nil { - return nil, err - } - if len(role.Team) > 0 { - rbacObject := fmt.Sprintf("%s", role.Team) - isValidAuth := managerAuth(casbin.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } - if role.Entity == bean.CLUSTER_ENTITIY { - isValidAuth := impl.CheckRbacForClusterEntity(role.Cluster, role.Namespace, role.Group, role.Kind, role.Resource, token, managerAuth) - if !isValidAuth { - continue - } + return nil, nil, err } + eliminatedRoles = append(eliminatedRoles, role) _, err = impl.userAuthRepository.DeleteUserRoleMapping(userRoleModel, tx) if err != nil { impl.logger.Errorw("Error in delete user role mapping", "user", userInfo) - return nil, err + return nil, nil, err } eliminatedPolicies = append(eliminatedPolicies, casbin.Policy{Type: "g", Sub: casbin.Subject(userInfo.EmailId), Obj: casbin.Object(role.Role)}) } // DELETE ENDS - return eliminatedPolicies, nil + return eliminatedPolicies, eliminatedRoles, nil } -func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroups(request *bean.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, eliminatedRoles map[int]*repository.RoleGroupRoleMapping, tx *pg.Tx, token string, managerAuth func(resource string, token string, object string) bool) ([]casbin.Policy, error) { +func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroups(request *bean.RoleGroup, existingRoles map[int]*repository.RoleGroupRoleMapping, eliminatedRoles map[int]*repository.RoleGroupRoleMapping, tx *pg.Tx) ([]casbin.Policy, []*repository.RoleModel, error) { // Filter out removed items in current request //var policies []casbin.Policy for _, roleFilter := range request.RoleFilters { @@ -395,19 +365,15 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroup for _, group := range groups { for _, kind := range kinds { for _, resource := range resources { - isValidAuth := impl.CheckRbacForClusterEntity(roleFilter.Cluster, namespace, group, kind, resource, token, managerAuth) - if !isValidAuth { - continue - } roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(entity, "", "", "", "", accessType, roleFilter.Cluster, namespace, group, kind, resource, actionType, false, "") if err != nil { impl.logger.Errorw("Error in fetching roles by filter", "user", request) - return nil, err + return nil, nil, err } oldRoleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(entity, "", "", "", "", accessType, roleFilter.Cluster, namespace, group, kind, resource, actionType, true, "") if err != nil { impl.logger.Errorw("Error in fetching roles by filter", "user", request) - return nil, err + return nil, nil, err } if roleModel.Id == 0 && oldRoleModel.Id == 0 { impl.logger.Warnw("no role found for given filter", "filter", roleFilter) @@ -425,13 +391,6 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroup } } } else if entity == bean2.EntityJobs { - if len(roleFilter.Team) > 0 { // check auth only for apps permission, skip for chart group - rbacObject := fmt.Sprintf("%s", roleFilter.Team) - isValidAuth := managerAuth(casbin.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } entityNames := strings.Split(roleFilter.EntityName, ",") environments := strings.Split(roleFilter.Environment, ",") workflows := strings.Split(roleFilter.Workflow, ",") @@ -443,7 +402,7 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroup roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(roleFilter.Entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", "", false, workflow) if err != nil { impl.logger.Errorw("Error in fetching roles by filter", "user", request) - return nil, err + return nil, nil, err } if roleModel.Id == 0 { impl.logger.Warnw("no role found for given filter", "filter", roleFilter) @@ -457,13 +416,6 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroup } } } else { - if len(roleFilter.Team) > 0 { // check auth only for apps permission, skip for chart group - rbacObject := fmt.Sprintf("%s", roleFilter.Team) - isValidAuth := managerAuth(casbin.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } entityNames := strings.Split(roleFilter.EntityName, ",") environments := strings.Split(roleFilter.Environment, ",") accessType := roleFilter.AccessType @@ -473,12 +425,12 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroup roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(roleFilter.Entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", "", false, "") if err != nil { impl.logger.Errorw("Error in fetching roles by filter", "user", request) - return nil, err + return nil, nil, err } oldRoleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(roleFilter.Entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", "", true, "") if err != nil { impl.logger.Errorw("Error in fetching roles by filter by old values", "user", request) - return nil, err + return nil, nil, err } if roleModel.Id == 0 && oldRoleModel.Id == 0 { impl.logger.Warnw("no role found for given filter", "filter", roleFilter) @@ -502,35 +454,24 @@ func (impl UserCommonServiceImpl) RemoveRolesAndReturnEliminatedPoliciesForGroup //delete remaining Ids from casbin role mapping table in orchestrator and casbin policy db // which are existing but not provided in this request var eliminatedPolicies []casbin.Policy + eliminatedRoleModels := make([]*repository.RoleModel, 0, len(eliminatedRoles)) for _, model := range eliminatedRoles { role, err := impl.userAuthRepository.GetRoleById(model.RoleId) if err != nil { - return nil, err - } - if len(role.Team) > 0 { - rbacObject := fmt.Sprintf("%s", role.Team) - isValidAuth := managerAuth(casbin.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } - if role.Entity == bean.CLUSTER_ENTITIY { - isValidAuth := impl.CheckRbacForClusterEntity(role.Cluster, role.Namespace, role.Group, role.Kind, role.Resource, token, managerAuth) - if !isValidAuth { - continue - } + return nil, nil, err } + eliminatedRoleModels = append(eliminatedRoleModels, role) _, err = impl.roleGroupRepository.DeleteRoleGroupRoleMapping(model, tx) if err != nil { - return nil, err + return nil, nil, err } policyGroup, err := impl.roleGroupRepository.GetRoleGroupById(model.RoleGroupId) if err != nil { - return nil, err + return nil, nil, err } eliminatedPolicies = append(eliminatedPolicies, casbin.Policy{Type: "g", Sub: casbin.Subject(policyGroup.CasbinName), Obj: casbin.Object(role.Role)}) } - return eliminatedPolicies, nil + return eliminatedPolicies, eliminatedRoleModels, nil } func containsArr(s []string, e string) bool { diff --git a/pkg/auth/user/UserService.go b/pkg/auth/user/UserService.go index 3e17f76e442..e065c61bf33 100644 --- a/pkg/auth/user/UserService.go +++ b/pkg/auth/user/UserService.go @@ -51,9 +51,10 @@ const ( ) type UserService interface { - CreateUser(userInfo *bean.UserInfo, token string, managerAuth func(resource, token string, object string) bool) ([]*bean.UserInfo, []bean.RestrictedGroup, error) + CreateUser(userInfo *bean.UserInfo) ([]*bean.UserInfo, error) SelfRegisterUserIfNotExists(userInfo *bean.UserInfo) ([]*bean.UserInfo, error) - UpdateUser(userInfo *bean.UserInfo, token string, managerAuth func(resource, token string, object string) bool) (*bean.UserInfo, bool, bool, []bean.RestrictedGroup, error) + UpdateUser(userInfo *bean.UserInfo, token string, checkRBACForUserUpdate func(token string, userInfo *bean.UserInfo, isUserAlreadySuperAdmin bool, + eliminatedRoleFilters, eliminatedGroupRoles []*repository.RoleModel) (isAuthorised bool, err error)) (*bean.UserInfo, error) GetById(id int32) (*bean.UserInfo, error) GetAll() ([]bean.UserInfo, error) GetAllWithFilters(request *bean.ListingRequest) (*bean.UserListingResponse, error) @@ -276,34 +277,33 @@ func (impl *UserServiceImpl) saveUser(userInfo *bean.UserInfo, emailId string) ( return userInfo, nil } -func (impl *UserServiceImpl) CreateUser(userInfo *bean.UserInfo, token string, managerAuth func(resource, token string, object string) bool) ([]*bean.UserInfo, []bean.RestrictedGroup, error) { +func (impl *UserServiceImpl) CreateUser(userInfo *bean.UserInfo) ([]*bean.UserInfo, error) { var pass []string var userResponse []*bean.UserInfo - var restrictedGroups []bean.RestrictedGroup emailIds := strings.Split(userInfo.EmailId, ",") for _, emailId := range emailIds { dbUser, err := impl.userRepository.FetchActiveOrDeletedUserByEmail(emailId) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error while fetching user from db", "error", err) - return nil, nil, err + return nil, err } //if found, update it with new roles if dbUser != nil && dbUser.Id > 0 { - userInfo, err = impl.updateUserIfExists(userInfo, dbUser, emailId, token, managerAuth) + userInfo, err = impl.updateUserIfExists(userInfo, dbUser, emailId) if err != nil { impl.logger.Errorw("error while create user if exists in db", "error", err) - return nil, nil, err + return nil, err } } // if not found, create new user if err == pg.ErrNoRows { - userInfo, restrictedGroups, err = impl.createUserIfNotExists(userInfo, emailId, token, managerAuth) + userInfo, err = impl.createUserIfNotExists(userInfo, emailId) if err != nil { impl.logger.Errorw("error while create user if not exists in db", "error", err) - return nil, nil, err + return nil, err } } @@ -313,11 +313,10 @@ func (impl *UserServiceImpl) CreateUser(userInfo *bean.UserInfo, token string, m userResponse = append(userResponse, &bean.UserInfo{Id: userInfo.Id, EmailId: emailId, Groups: userInfo.Groups, RoleFilters: userInfo.RoleFilters, SuperAdmin: userInfo.SuperAdmin, UserRoleGroup: userInfo.UserRoleGroup}) } - return userResponse, restrictedGroups, nil + return userResponse, nil } -func (impl *UserServiceImpl) updateUserIfExists(userInfo *bean.UserInfo, dbUser *repository.UserModel, emailId string, - token string, managerAuth func(resource, token, object string) bool) (*bean.UserInfo, error) { +func (impl *UserServiceImpl) updateUserIfExists(userInfo *bean.UserInfo, dbUser *repository.UserModel, emailId string) (*bean.UserInfo, error) { updateUserInfo, err := impl.GetById(dbUser.Id) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error while fetching user from db", "error", err) @@ -332,8 +331,8 @@ func (impl *UserServiceImpl) updateUserIfExists(userInfo *bean.UserInfo, dbUser updateUserInfo.Groups = impl.mergeGroups(updateUserInfo.Groups, userInfo.Groups) updateUserInfo.UserRoleGroup = impl.mergeUserRoleGroup(updateUserInfo.UserRoleGroup, userInfo.UserRoleGroup) updateUserInfo.UserId = userInfo.UserId - updateUserInfo.EmailId = emailId // override case sensitivity - updateUserInfo, _, _, _, err = impl.UpdateUser(updateUserInfo, token, managerAuth) + updateUserInfo.EmailId = emailId // override case sensitivity + updateUserInfo, err = impl.UpdateUser(updateUserInfo, "", nil) //rbac already checked in create request handled if err != nil { impl.logger.Errorw("error while update user", "error", err) return nil, err @@ -341,12 +340,12 @@ func (impl *UserServiceImpl) updateUserIfExists(userInfo *bean.UserInfo, dbUser return userInfo, nil } -func (impl *UserServiceImpl) createUserIfNotExists(userInfo *bean.UserInfo, emailId string, token string, managerAuth func(resource string, token string, object string) bool) (*bean.UserInfo, []bean.RestrictedGroup, error) { +func (impl *UserServiceImpl) createUserIfNotExists(userInfo *bean.UserInfo, emailId string) (*bean.UserInfo, error) { // if not found, create new user dbConnection := impl.userRepository.GetConnection() tx, err := dbConnection.Begin() if err != nil { - return nil, nil, err + return nil, err } // Rollback tx on error. defer tx.Rollback() @@ -354,7 +353,7 @@ func (impl *UserServiceImpl) createUserIfNotExists(userInfo *bean.UserInfo, emai _, err = impl.validateUserRequest(userInfo) if err != nil { err = &util.ApiError{HttpStatusCode: http.StatusBadRequest, UserMessage: "Invalid request, please provide role filters"} - return nil, nil, err + return nil, err } //create new user in our db on d basis of info got from google api or hex. assign a basic role @@ -376,67 +375,54 @@ func (impl *UserServiceImpl) createUserIfNotExists(userInfo *bean.UserInfo, emai InternalMessage: "failed to create new user in db", UserMessage: fmt.Sprintf("requested by %d", userInfo.UserId), } - return nil, nil, err + return nil, err } userInfo.Id = model.Id //loading policy for safety casbin2.LoadPolicy() - var restrictedGroups []bean.RestrictedGroup - //Starts Role and Mapping capacity, mapping := impl.userCommonService.GetCapacityForRoleFilter(userInfo.RoleFilters) //var policies []casbin2.Policy var policies = make([]casbin2.Policy, 0, capacity) if userInfo.SuperAdmin == false { - isActionPerformingUserSuperAdmin, err := impl.IsSuperAdmin(int(userInfo.UserId)) - if err != nil { - return nil, nil, err - } for index, roleFilter := range userInfo.RoleFilters { impl.logger.Infow("Creating Or updating User Roles for RoleFilter ") entity := roleFilter.Entity - policiesToBeAdded, _, err := impl.CreateOrUpdateUserRolesForAllTypes(roleFilter, userInfo.UserId, model, nil, token, managerAuth, tx, entity, mapping[index]) + policiesToBeAdded, _, err := impl.CreateOrUpdateUserRolesForAllTypes(roleFilter, userInfo.UserId, model, nil, tx, entity, mapping[index]) if err != nil { impl.logger.Errorw("error in creating user roles for Alltypes", "err", err) - return nil, nil, err + return nil, err } policies = append(policies, policiesToBeAdded...) - } // START GROUP POLICY for _, item := range userInfo.UserRoleGroup { userGroup, err := impl.roleGroupRepository.GetRoleGroupByName(item.RoleGroup.Name) if err != nil { - return nil, nil, err - } - hasAccessToGroup, hasSuperAdminPermission := impl.checkGroupAuth(userGroup.CasbinName, token, managerAuth, isActionPerformingUserSuperAdmin) - if hasAccessToGroup { - policies = append(policies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(userGroup.CasbinName)}) - } else { - restrictedGroup := adapter.CreateRestrictedGroup(item.RoleGroup.Name, hasSuperAdminPermission) - restrictedGroups = append(restrictedGroups, restrictedGroup) + return nil, err } + policies = append(policies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(userGroup.CasbinName)}) + // below is old code where we used to re check group access, but not needed now as we have moved group rbac to restHandler + + //hasAccessToGroup, hasSuperAdminPermission := impl.checkGroupAuth(userGroup.CasbinName, token, managerAuth, isActionPerformingUserSuperAdmin) + //if hasAccessToGroup { + //policies = append(policies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(userGroup.CasbinName)}) + //} else { + // restrictedGroup := adapter.CreateRestrictedGroup(item.RoleGroup.Name, hasSuperAdminPermission) + // restrictedGroups = append(restrictedGroups, restrictedGroup) + //} } // END GROUP POLICY } else if userInfo.SuperAdmin == true { - - isSuperAdmin, err := impl.IsSuperAdmin(int(userInfo.UserId)) - if err != nil { - return nil, nil, err - } - if isSuperAdmin == false { - err = &util.ApiError{HttpStatusCode: http.StatusForbidden, UserMessage: "Invalid request, not allow to update super admin type user"} - return nil, nil, err - } flag, err := impl.userAuthRepository.CreateRoleForSuperAdminIfNotExists(tx, userInfo.UserId) if err != nil || flag == false { - return nil, nil, err + return nil, err } roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes("", "", "", "", bean2.SUPER_ADMIN, "", "", "", "", "", "", "", false, "") if err != nil { - return nil, nil, err + return nil, err } if roleModel.Id > 0 { userRoleModel := &repository.UserRoleModel{UserId: model.Id, RoleId: roleModel.Id, AuditLog: sql.AuditLog{ @@ -447,11 +433,10 @@ func (impl *UserServiceImpl) createUserIfNotExists(userInfo *bean.UserInfo, emai }} userRoleModel, err = impl.userAuthRepository.CreateUserRoleMapping(userRoleModel, tx) if err != nil { - return nil, nil, err + return nil, err } policies = append(policies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(model.EmailId), Obj: casbin2.Object(roleModel.Role)}) } - } impl.logger.Infow("Checking the length of policies to be added and Adding in casbin ") if len(policies) > 0 { @@ -462,30 +447,30 @@ func (impl *UserServiceImpl) createUserIfNotExists(userInfo *bean.UserInfo, emai //Ends err = tx.Commit() if err != nil { - return nil, nil, err + return nil, err } //loading policy for syncing orchestrator to casbin with newly added policies casbin2.LoadPolicy() - return userInfo, restrictedGroups, nil + return userInfo, nil } -func (impl *UserServiceImpl) CreateOrUpdateUserRolesForAllTypes(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, token string, managerAuth func(resource string, token string, object string) bool, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { +func (impl *UserServiceImpl) CreateOrUpdateUserRolesForAllTypes(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { //var policiesToBeAdded []casbin2.Policy var policiesToBeAdded = make([]casbin2.Policy, 0, capacity) var err error rolesChanged := false if entity == bean2.CLUSTER { - policiesToBeAdded, rolesChanged, err = impl.createOrUpdateUserRolesForClusterEntity(roleFilter, userId, model, existingRoles, token, managerAuth, tx, entity, capacity) + policiesToBeAdded, rolesChanged, err = impl.createOrUpdateUserRolesForClusterEntity(roleFilter, userId, model, existingRoles, tx, entity, capacity) if err != nil { return nil, false, err } } else if entity == bean2.EntityJobs { - policiesToBeAdded, rolesChanged, err = impl.createOrUpdateUserRolesForJobsEntity(roleFilter, userId, model, existingRoles, token, managerAuth, tx, entity, capacity) + policiesToBeAdded, rolesChanged, err = impl.createOrUpdateUserRolesForJobsEntity(roleFilter, userId, model, existingRoles, tx, entity, capacity) if err != nil { return nil, false, err } } else { - policiesToBeAdded, rolesChanged, err = impl.createOrUpdateUserRolesForOtherEntity(roleFilter, userId, model, existingRoles, token, managerAuth, tx, entity, capacity) + policiesToBeAdded, rolesChanged, err = impl.createOrUpdateUserRolesForOtherEntity(roleFilter, userId, model, existingRoles, tx, entity, capacity) if err != nil { return nil, false, err } @@ -493,7 +478,7 @@ func (impl *UserServiceImpl) CreateOrUpdateUserRolesForAllTypes(roleFilter bean. return policiesToBeAdded, rolesChanged, nil } -func (impl *UserServiceImpl) createOrUpdateUserRolesForClusterEntity(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, token string, managerAuth func(resource string, token string, object string) bool, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { +func (impl *UserServiceImpl) createOrUpdateUserRolesForClusterEntity(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { //var policiesToBeAdded []casbin2.Policy rolesChanged := false @@ -510,12 +495,6 @@ func (impl *UserServiceImpl) createOrUpdateUserRolesForClusterEntity(roleFilter for _, group := range groups { for _, kind := range kinds { for _, resource := range resources { - if managerAuth != nil { - isValidAuth := impl.userCommonService.CheckRbacForClusterEntity(roleFilter.Cluster, namespace, group, kind, resource, token, managerAuth) - if !isValidAuth { - continue - } - } impl.logger.Infow("Getting Role by filter for cluster") roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(entity, "", "", "", "", accessType, roleFilter.Cluster, namespace, group, kind, resource, actionType, false, "") if err != nil { @@ -646,12 +625,13 @@ func (impl UserServiceImpl) mergeUserRoleGroup(oldUserRoleGroups []bean.UserRole return finalUserRoleGroups } -func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, managerAuth func(resource, token string, object string) bool) (*bean.UserInfo, bool, bool, []bean.RestrictedGroup, error) { +func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, checkRBACForUserUpdate func(token string, userInfo *bean.UserInfo, + isUserAlreadySuperAdmin bool, eliminatedRoleFilters, eliminatedGroupRoles []*repository.RoleModel) (isAuthorised bool, err error)) (*bean.UserInfo, error) { //checking if request for same user is being processed isLocked := impl.getUserReqLockStateById(userInfo.Id) if isLocked { impl.logger.Errorw("received concurrent request for user update, UpdateUser", "userId", userInfo.Id) - return nil, false, false, nil, &util.ApiError{ + return nil, &util.ApiError{ Code: "409", HttpStatusCode: http.StatusConflict, UserMessage: ConcurrentRequestLockError, @@ -661,7 +641,7 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m err := impl.lockUnlockUserReqState(userInfo.Id, true) if err != nil { impl.logger.Errorw("error in locking, lockUnlockUserReqState", "userId", userInfo.Id) - return nil, false, false, nil, err + return nil, err } defer func() { err = impl.lockUnlockUserReqState(userInfo.Id, false) @@ -673,30 +653,12 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m //validating if action user is not admin and trying to update user who has super admin polices, return 403 isUserSuperAdmin, err := impl.IsSuperAdmin(int(userInfo.Id)) if err != nil { - return nil, false, false, nil, err - } - isActionPerformingUserSuperAdmin, err := impl.IsSuperAdmin(int(userInfo.UserId)) - if err != nil { - return nil, false, false, nil, err - } - //if request comes to make user as a super admin or user already a super admin (who'is going to be updated), action performing user should have super admin access - if userInfo.SuperAdmin || isUserSuperAdmin { - if !isActionPerformingUserSuperAdmin { - err = &util.ApiError{HttpStatusCode: http.StatusForbidden, UserMessage: "Invalid request, not allow to update super admin type user"} - impl.logger.Errorw("Invalid request, not allow to update super admin type user", "error", err) - return nil, false, false, nil, err - } - } - if userInfo.SuperAdmin && isUserSuperAdmin { - err = &util.ApiError{HttpStatusCode: http.StatusBadRequest, UserMessage: "User Already A Super Admin"} - impl.logger.Errorw("user already a superAdmin", "error", err) - return nil, false, false, nil, err + return nil, err } - dbConnection := impl.userRepository.GetConnection() tx, err := dbConnection.Begin() if err != nil { - return nil, false, false, nil, err + return nil, err } // Rollback tx on error. defer tx.Rollback() @@ -704,22 +666,20 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m model, err := impl.userRepository.GetByIdIncludeDeleted(userInfo.Id) if err != nil { impl.logger.Errorw("error while fetching user from db", "error", err) - return nil, false, false, nil, err + return nil, err } var eliminatedPolicies []casbin2.Policy capacity, mapping := impl.userCommonService.GetCapacityForRoleFilter(userInfo.RoleFilters) var addedPolicies = make([]casbin2.Policy, 0, capacity) - restrictedGroups := []bean.RestrictedGroup{} - rolesChanged := false - groupsModified := false //loading policy for safety casbin2.LoadPolicy() + var eliminatedRoles, eliminatedGroupRoles []*repository.RoleModel if userInfo.SuperAdmin == false { //Starts Role and Mapping userRoleModels, err := impl.userAuthRepository.GetUserRoleMappingByUserId(model.Id) if err != nil { - return nil, false, false, nil, err + return nil, err } existingRoleIds := make(map[int]repository.UserRoleModel) eliminatedRoleIds := make(map[int]*repository.UserRoleModel) @@ -732,40 +692,34 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m _, err = impl.validateUserRequest(userInfo) if err != nil { err = &util.ApiError{HttpStatusCode: http.StatusBadRequest, UserMessage: "Invalid request, please provide role filters"} - return nil, false, false, nil, err + return nil, err } // DELETE Removed Items - items, err := impl.userCommonService.RemoveRolesAndReturnEliminatedPolicies(userInfo, existingRoleIds, eliminatedRoleIds, tx, token, managerAuth) + var items []casbin2.Policy + items, eliminatedRoles, err = impl.userCommonService.RemoveRolesAndReturnEliminatedPolicies(userInfo, existingRoleIds, eliminatedRoleIds, tx) if err != nil { - return nil, false, false, nil, err + return nil, err } eliminatedPolicies = append(eliminatedPolicies, items...) - if len(eliminatedPolicies) > 0 { - rolesChanged = true - } //Adding New Policies for index, roleFilter := range userInfo.RoleFilters { entity := roleFilter.Entity - - policiesToBeAdded, rolesChangedFromRoleUpdate, err := impl.CreateOrUpdateUserRolesForAllTypes(roleFilter, userInfo.UserId, model, existingRoleIds, token, managerAuth, tx, entity, mapping[index]) + policiesToBeAdded, _, err := impl.CreateOrUpdateUserRolesForAllTypes(roleFilter, userInfo.UserId, model, existingRoleIds, tx, entity, mapping[index]) if err != nil { impl.logger.Errorw("error in creating user roles for All Types", "err", err) - return nil, false, false, nil, err + return nil, err } addedPolicies = append(addedPolicies, policiesToBeAdded...) - rolesChanged = rolesChangedFromRoleUpdate - } //ROLE GROUP SETUP newGroupMap := make(map[string]string) oldGroupMap := make(map[string]string) userCasbinRoles, err := impl.CheckUserRoles(userInfo.Id) - if err != nil { - return nil, false, false, nil, err + return nil, err } for _, oldItem := range userCasbinRoles { oldGroupMap[oldItem] = oldItem @@ -774,62 +728,85 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m for _, item := range userInfo.UserRoleGroup { userGroup, err := impl.roleGroupRepository.GetRoleGroupByName(item.RoleGroup.Name) if err != nil { - return nil, false, false, nil, err + return nil, err } newGroupMap[userGroup.CasbinName] = userGroup.CasbinName if _, ok := oldGroupMap[userGroup.CasbinName]; !ok { - //check permission for new group which is going to add - hasAccessToGroup, hasSuperAdminPermission := impl.checkGroupAuth(userGroup.CasbinName, token, managerAuth, isActionPerformingUserSuperAdmin) - if hasAccessToGroup { - groupsModified = true - addedPolicies = append(addedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(userGroup.CasbinName)}) - } else { - restrictedGroup := adapter.CreateRestrictedGroup(item.RoleGroup.Name, hasSuperAdminPermission) - restrictedGroups = append(restrictedGroups, restrictedGroup) - } + addedPolicies = append(addedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(userGroup.CasbinName)}) + // //check permission for new group which is going to add + //hasAccessToGroup, hasSuperAdminPermission := impl.checkGroupAuth(userGroup.CasbinName, token, managerAuth, isActionPerformingUserSuperAdmin) + //if hasAccessToGroup { + // groupsModified = true + // addedPolicies = append(addedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(userGroup.CasbinName)}) + //} else { + // restrictedGroup := adapter.CreateRestrictedGroup(item.RoleGroup.Name, hasSuperAdminPermission) + // restrictedGroups = append(restrictedGroups, restrictedGroup) + //} } } - + eliminatedGroupCasbinNames := make([]string, 0, len(newGroupMap)) for _, item := range userCasbinRoles { if _, ok := newGroupMap[item]; !ok { if item != bean.SUPERADMIN { //check permission for group which is going to eliminate if strings.HasPrefix(item, "group:") { - hasAccessToGroup, hasSuperAdminPermission := impl.checkGroupAuth(item, token, managerAuth, isActionPerformingUserSuperAdmin) - if hasAccessToGroup { - if strings.HasPrefix(item, "group:") { - groupsModified = true - } - eliminatedPolicies = append(eliminatedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(item)}) - } else { - restrictedGroup := adapter.CreateRestrictedGroup(item, hasSuperAdminPermission) - restrictedGroups = append(restrictedGroups, restrictedGroup) - } + eliminatedPolicies = append(eliminatedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(item)}) + eliminatedGroupCasbinNames = append(eliminatedGroupCasbinNames, item) + //hasAccessToGroup, hasSuperAdminPermission := impl.checkGroupAuth(item, token, managerAuth, isActionPerformingUserSuperAdmin) + //if hasAccessToGroup { + // if strings.HasPrefix(item, "group:") { + // groupsModified = true + // } + // eliminatedPolicies = append(eliminatedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(userInfo.EmailId), Obj: casbin2.Object(item)}) + //} else { + // restrictedGroup := adapter.CreateRestrictedGroup(item, hasSuperAdminPermission) + // restrictedGroups = append(restrictedGroups, restrictedGroup) + //} } } } + } // END GROUP POLICY + if len(eliminatedGroupCasbinNames) > 0 { + eliminatedGroupRoles, err = impl.roleGroupRepository.GetRolesByGroupCasbinNames(eliminatedGroupCasbinNames) + if err != nil { + impl.logger.Errorw("error, GetRolesByGroupCasbinNames", "err", err, "eliminatedGroupCasbinNames", eliminatedGroupCasbinNames) + return nil, err + } } - // END GROUP POLICY - } else if userInfo.SuperAdmin == true { flag, err := impl.userAuthRepository.CreateRoleForSuperAdminIfNotExists(tx, userInfo.UserId) if err != nil || flag == false { - return nil, false, false, nil, err + return nil, err } roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes("", "", "", "", bean2.SUPER_ADMIN, "", "", "", "", "", "", "", false, "") if err != nil { - return nil, false, false, nil, err + return nil, err } if roleModel.Id > 0 { userRoleModel := &repository.UserRoleModel{UserId: model.Id, RoleId: roleModel.Id} userRoleModel, err = impl.userAuthRepository.CreateUserRoleMapping(userRoleModel, tx) if err != nil { - return nil, false, false, nil, err + return nil, err } addedPolicies = append(addedPolicies, casbin2.Policy{Type: "g", Sub: casbin2.Subject(model.EmailId), Obj: casbin2.Object(roleModel.Role)}) } } + if checkRBACForUserUpdate != nil { + isAuthorised, err := checkRBACForUserUpdate(token, userInfo, isUserSuperAdmin, eliminatedRoles, eliminatedGroupRoles) + if err != nil { + impl.logger.Errorw("error in checking RBAC for user update", "err", err, "userInfo", userInfo) + return nil, err + } else if !isAuthorised { + impl.logger.Errorw("rbac check failed for user update", "userInfo", userInfo) + return nil, &util.ApiError{ + Code: "403", + HttpStatusCode: http.StatusForbidden, + UserMessage: "unauthorized", + } + } + } + //updating in casbin if len(eliminatedPolicies) > 0 { pRes := casbin2.RemovePolicy(eliminatedPolicies) @@ -848,15 +825,15 @@ func (impl *UserServiceImpl) UpdateUser(userInfo *bean.UserInfo, token string, m model, err = impl.userRepository.UpdateUser(model, tx) if err != nil { impl.logger.Errorw("error while fetching user from db", "error", err) - return nil, false, false, nil, err + return nil, err } err = tx.Commit() if err != nil { - return nil, false, false, nil, err + return nil, err } //loading policy for syncing orchestrator to casbin with newly added policies casbin2.LoadPolicy() - return userInfo, rolesChanged, groupsModified, restrictedGroups, nil + return userInfo, nil } func (impl *UserServiceImpl) GetById(id int32) (*bean.UserInfo, error) { @@ -1572,19 +1549,18 @@ func (impl *UserServiceImpl) CheckUserRoles(id int32) ([]string, error) { } if len(groups) > 0 { // getting unique, handling for duplicate roles - grps, err := impl.getUniquesRolesByGroupCasbinNames(groups) + roleFromGroups, err := impl.getUniquesRolesByGroupCasbinNames(groups) if err != nil { impl.logger.Errorw("error in getUniquesRolesByGroupCasbinNames", "err", err) return nil, err } - groups = append(groups, grps...) + groups = append(groups, roleFromGroups...) } return groups, nil } func (impl UserServiceImpl) getUniquesRolesByGroupCasbinNames(groupCasbinNames []string) ([]string, error) { - var groups []string rolesModels, err := impl.roleGroupRepository.GetRolesByGroupCasbinNames(groupCasbinNames) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in getting roles by group names", "err", err) @@ -1598,10 +1574,7 @@ func (impl UserServiceImpl) getUniquesRolesByGroupCasbinNames(groupCasbinNames [ for role, _ := range uniqueRolesFromGroupMap { rolesFromGroup = append(rolesFromGroup, role) } - if len(rolesFromGroup) > 0 { - groups = append(groups, rolesFromGroup...) - } - return groups, nil + return rolesFromGroup, nil } func (impl *UserServiceImpl) SyncOrchestratorToCasbin() (bool, error) { @@ -1773,7 +1746,7 @@ func (impl *UserServiceImpl) GetRoleFiltersByUserRoleGroups(userRoleGroups []bea return roleFilters, nil } -func (impl *UserServiceImpl) createOrUpdateUserRolesForOtherEntity(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, token string, managerAuth func(resource string, token string, object string) bool, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { +func (impl *UserServiceImpl) createOrUpdateUserRolesForOtherEntity(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { rolesChanged := false var policiesToBeAdded = make([]casbin2.Policy, 0, capacity) actionType := roleFilter.Action @@ -1782,14 +1755,6 @@ func (impl *UserServiceImpl) createOrUpdateUserRolesForOtherEntity(roleFilter be environments := strings.Split(roleFilter.Environment, ",") for _, environment := range environments { for _, entityName := range entityNames { - if managerAuth != nil && entity != bean.CHART_GROUP_ENTITY { - // check auth only for apps permission, skip for chart group - rbacObject := fmt.Sprintf("%s", roleFilter.Team) - isValidAuth := managerAuth(casbin2.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", actionType, false, "") if err != nil { impl.logger.Errorw("error in getting role by all type", "err", err, "roleFilter", roleFilter) @@ -1835,8 +1800,7 @@ func (impl *UserServiceImpl) createOrUpdateUserRolesForOtherEntity(roleFilter be return policiesToBeAdded, rolesChanged, nil } -func (impl *UserServiceImpl) createOrUpdateUserRolesForJobsEntity(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, token string, managerAuth func(resource string, token string, object string) bool, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { - +func (impl *UserServiceImpl) createOrUpdateUserRolesForJobsEntity(roleFilter bean.RoleFilter, userId int32, model *repository.UserModel, existingRoles map[int]repository.UserRoleModel, tx *pg.Tx, entity string, capacity int) ([]casbin2.Policy, bool, error) { rolesChanged := false actionType := roleFilter.Action accessType := roleFilter.AccessType @@ -1847,14 +1811,6 @@ func (impl *UserServiceImpl) createOrUpdateUserRolesForJobsEntity(roleFilter bea for _, environment := range environments { for _, entityName := range entityNames { for _, workflow := range workflows { - if managerAuth != nil { - // check auth only for apps permission, skip for chart group - rbacObject := fmt.Sprintf("%s", roleFilter.Team) - isValidAuth := managerAuth(casbin2.ResourceUser, token, rbacObject) - if !isValidAuth { - continue - } - } roleModel, err := impl.userAuthRepository.GetRoleByFilterForAllTypes(entity, roleFilter.Team, entityName, environment, actionType, accessType, "", "", "", "", "", actionType, false, workflow) if err != nil { impl.logger.Errorw("error in getting role by all type", "err", err, "roleFilter", roleFilter) From 75fa6350c0dd00c8442f333b1408b2dbde971938 Mon Sep 17 00:00:00 2001 From: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:33:16 +0530 Subject: [PATCH 51/61] doc: ArgoCD + FluxCD App Listing (#5636) * Glossary Add * Added ArgoCD Listing * PM Feedback Incorporated * Added Flux CD Appl Listing + Other Fixes * Incorporated PM Feedback for FluxCD * Incorporated PM Feedback 3 --- docs/reference/glossary.md | 12 +++ docs/user-guide/applications.md | 144 +++++++++++++++++++++++++++++++- 2 files changed, 155 insertions(+), 1 deletion(-) diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md index c219ea5bd12..ce8354d2d81 100644 --- a/docs/reference/glossary.md +++ b/docs/reference/glossary.md @@ -10,6 +10,12 @@ An immutable blob of data generated as an output after the execution of a job, b * Once a job is complete, you can view the job artifacts by going to Jobs → Run history (tab) → (choose a pipeline and date of triggering the build) → Artifacts (tab). +### ArgoCD Apps + +ArgoCD Apps are the micro-services deployed using a [GitOps](#gitops) deployment tool named [Argo CD](https://argo-cd.readthedocs.io/en/stable/). + +If ArgoCD applications are present in your cluster, they will appear in the [ArgoCD Apps listing](../user-guide/applications.md#enabling-argocd-app-listing). + ### Base Deployment Template A deployment template is a manifest of the application defining its runtime behavior. You can select one of the default deployment charts or custom deployment charts created by super-admin. @@ -112,6 +118,12 @@ Similarly, the CPU and memory resources can be different for each environment. T You can add external links related to the application. For e.g., you can add Prometheus, Grafana, and many more to your application by going to Global Configurations → External Links. [Read More...](../user-guide/global-configurations/external-links.md) +### FluxCD Apps + +FluxCD Apps are the micro-services deployed using a [GitOps](#gitops) deployment tool named [Flux CD](https://fluxcd.io/). + +If FluxCD applications are present in your cluster, they will appear in the [FluxCD Apps listing](../user-guide/applications.md#view-fluxcd-app-listing). + ### GitOps A methodology for managing and automating Kubernetes deployments using Git repositories as the source of truth. Changes to the desired state of the cluster are driven by Git commits. [Read More...](../user-guide/global-configurations/gitops.md) diff --git a/docs/user-guide/applications.md b/docs/user-guide/applications.md index 8df03f9be5d..d436240c032 100644 --- a/docs/user-guide/applications.md +++ b/docs/user-guide/applications.md @@ -1,3 +1,145 @@ # Applications -Please configure Global Configurations before creating an application or cloning an existing application. \ No newline at end of file +{% hint style="warning" %} +Configure [Global Configurations](./global-configurations/README.md) first before creating an application or cloning an existing application. +{% endhint %} + +## Introduction + +The **Applications** page helps you create and manage your microservices, and it majorly consists of the following: + +* [Application Listing](#application-listing) +* [Create Button](#create-button) +* [Other Options](#other-options) + +### Application Listing + +You can view the app name, its status, environment, namespace, and many more upfront. The apps are segregated into: [Devtron Apps](../reference/glossary.md#devtron-apps), [Helm Apps](../reference/glossary.md#helm-apps), [ArgoCD Apps](../reference/glossary.md#argocd-apps), and [FluxCD Apps](../reference/glossary.md#fluxcd-apps). + +![Figure 1: App Types](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/argocd/app-types.jpg) + +### Create Button + +You can use this to: +* [Create a Devtron app](./create-application.md) +* [Create a Helm app](./deploy-chart/deployment-of-charts.md) +* [Create a Job](./jobs/create-job.md) + +### Other Options + +There are additional options available for you: +* **Search and filters** to make it easier for you to find applications. +* **Export CSV** to download the data of Devtron apps (not supported for Helm apps and Argo CD apps). +* **Sync button** to refresh the app listing. + +--- + +## View ArgoCD App Listing + +{% hint style="warning" %} +### Who Can Perform This Action? +Users need super-admin permission to view/enable/disable the ArgoCD listing. +{% endhint %} + +### Preface + +In Argo CD, a user manages one dashboard for one ArgoCD instance. Therefore, with multiple ArgoCD instances, the process becomes cumbersome for the user to manage several dashboards. + +With Devtron, you get an entire Argo CD app listing in one place. This listing includes: +* Apps deployed using [GitOps](../reference/glossary.md#gitops) on Devtron +* Other Argo CD apps present in your cluster + +![Figure 2: ArgoCD App List](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/argocd/app-details-argo.gif) + +### Advantages + +Devtron also bridges the gap for ArgoCD users by providing additional features as follows: + +* **Resource Scanning**: You can scan for vulnerabilities using Devtron's [resource scanning](../user-guide/security-features.md#from-app-details) feature. [![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/elements/EnterpriseTag.svg)](https://devtron.ai/pricing) + +* **Single-pane View**: All Argo CD apps will show details such as their app status, environment, cluster, and namespace together in one dashboard. + +* **Feature-rich Options**: Clicking an Argo CD app will give you access to its logs, terminal, events, manifest, available resource kinds, pod restart log, and many more. + +{% hint style="info" %} +### Additional References +[ArgoCD: Standalone Configuration vs Devtron Configuration](https://devtron.ai/blog/argocd-standalone-configuration-vs-devtron-configuration/#argocd-installation-and-configuration) +{% endhint %} + +### Prerequisite +The cluster in which Argo CD apps exist should be added in **Global Configurations** → **Clusters and Environments** + +### Feature Flag + +> **`ENABLE_EXTERNAL_ARGO_CD: "true"`** + +### Enabling ArgoCD App Listing + +{% embed url="https://www.youtube.com/watch?v=4KyYnsAEpqo" caption="Enabling External ArgoCD Listing" %} + +1. Go to the **Resource Browser** of Devtron. + +2. Select the cluster (in which your Argo CD app exists). + +3. Type `ConfigMap` in the 'Jump to Kind' field. + +4. Search for `dashboard-cm` using the available search bar and click it. + +5. Click **Edit Live Manifest**. + +6. Set the feature flag **ENABLE_EXTERNAL_ARGO_CD** to **"true"** + +7. Click **Apply Changes**. + +8. Go back to the 'Jump to Kind' field and type `Pod`. + +9. Search for `dashboard` pod and use the kebab menu (3 vertical dots) to delete the pod. + +10. Go to **Applications** and refresh the page. A new tab named **ArgoCD Apps** will be visible. + +11. Select the cluster(s) from the dropdown to view the Argo CD apps available in the chosen cluster(s). + + ![Figure 3: Cluster Selection for Argo CD Listing](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/argocd/argo-cluster-selection.jpg) + +--- + +## View FluxCD App Listing + +{% hint style="warning" %} +### Who Can Perform This Action? +Users need super-admin permission to view/enable/disable the FluxCD listing. +{% endhint %} + +### Preface + +Flux CD doesn't have any official dashboard; however, Devtron supports the listing of your [Flux CD](https://fluxcd.io/) apps in one dashboard. Here, the [advantages](#advantages) are same as those of [ArgoCD app listing](#view-argocd-app-listing). + +![Figure 4: FluxCD App List and Details](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/fluxcd-listing.jpg) + +### Prerequisite +The cluster in which Flux CD apps exist should be added in **Global Configurations** → **Clusters and Environments** + +### Feature Flag + +> **`FEATURE_EXTERNAL_FLUX_CD_ENABLE: "true"`** + +### Enabling FluxCD App Listing + +{% hint style="info" %} +### Tip +You may refer the steps mentioned in the [Enabling ArgoCD App Listing](#enabling-argocd-app-listing) section since the procedure is similar. +{% endhint %} + +Using Devtron's Resource Browser, add the [feature flag](#feature-flag-1) in the Dashboard ConfigMap as shown below. + +![Figure 5: Editing Dashboard ConfigMap](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/flux-feature-flag.jpg) + +After successfully executing all the steps, a new tab named **FluxCD Apps** will be visible. Select the cluster(s) from the dropdown to view the Flux CD apps available in the chosen cluster(s). + +![Figure 6: Selecting Cluster](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/cluster-selection.jpg) + +(Optional) Once you choose cluster(s), you may use the **Template Type** dropdown to further filter your Flux CD app listing based on its type, i.e., [Kustomization](https://fluxcd.io/flux/components/kustomize/kustomizations/) or [Helmrelease](https://fluxcd.io/flux/components/helm/helmreleases/). + +Click any Flux CD app to view its details as shown below. + +![Figure 7: Flux App Details](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/app-details-flux.gif) \ No newline at end of file From bcc95e1e3585c87e67de4670465a12d266e9f2bc Mon Sep 17 00:00:00 2001 From: ashokdevtron <141001279+ashokdevtron@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:36:07 +0530 Subject: [PATCH 52/61] doc: Added Special CEL Expr in Filter Condition doc (#5850) * Added Special Regex in Filter Condition doc * Added fixed branch expression * Replaced Video + Occurences of Regex --- .../global-configurations/filter-condition.md | 34 +++++++++++++++---- 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/docs/user-guide/global-configurations/filter-condition.md b/docs/user-guide/global-configurations/filter-condition.md index b721010e5d2..bde3182985a 100644 --- a/docs/user-guide/global-configurations/filter-condition.md +++ b/docs/user-guide/global-configurations/filter-condition.md @@ -8,6 +8,7 @@ Using filter conditions, you can control the progression of events. Here are a f * Images containing the label "test" should not be eligible for deployment in production environment * Only images having tag versions greater than v0.7.4 should be eligible for deployment * Images hosted on Docker Hub should be eligible but not the rest +* Only images derived from master branch should be eligible for production deployment (see [example](#scenario-2)) --- @@ -55,17 +56,17 @@ You must have application(s) with CI-CD workflow(s) configured ![Figure 5: Selecting Environment(s) from Cluster(s)](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/filters/environment-selection.jpg) - {% hint style="info" %} - Since an application can have more than one environment, the filter conditions apply only to the environment you chose in the **Apply to** section. If you create a filter condition without choosing an application or environment, it will not apply to any of your pipelines. - {% endhint %} +{% hint style="info" %} +Since an application can have more than one environment, the filter conditions apply only to the environment you chose in the **Apply to** section. If you create a filter condition without choosing an application or environment, it will not apply to any of your pipelines. +{% endhint %} 6. Click **Save**. You have successfully created a filter. ![Figure 6: Success Toast](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/filters/filter-created.jpg) - {% hint style="warning" %} - If you create filters using CEL expressions that result in a conflict (i.e., passing and failing of the same image), fail will have higher precedence - {% endhint %} +{% hint style="warning" %} +If you create filters using CEL expressions that result in a conflict (i.e., passing and failing of the same image), fail will have higher precedence +{% endhint %} --- @@ -78,6 +79,8 @@ Here's a sample pipeline we will be using for our explanation of [pass condition ### Pass Condition +#### Scenario 1 + Consider a scenario where you wish to make an image eligible for deployment only if its tag version is greater than `v0.0.7` The CEL Expression should be `containerImageTag > "v0.0.7"` @@ -102,6 +105,25 @@ Clicking the filter icon at the top-left shows the filter condition(s) applied t ![Figure 12b: Conditions Applied](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/filters/conditions-applied-1.jpg) +#### Scenario 2 + +Consider another scenario where you wish to make images eligible for deployment only if the application's git branch starts with the word `hotfix` and also if its repo URL matches your specified condition. + +**CEL Expression**: + +`gitCommitDetails.filter(gitCommitDetail, gitCommitDetail.startsWith('https://github.com/devtron-labs')).map(repo, gitCommitDetails[repo].branch).exists_one(branch, branch.startsWith('hotfix-'))` + +where, `https://github.com/devtron-labs` is a portion of the repo URL
+and `hotfix-` is for finding the branch name (say *hotfix-sept-2024*) + +Alternatively, if you have a fixed branch (say *hotfix-123*), you may write the following expression: + +`'hotfix-123' in gitCommitDetails.filter(gitCommitDetail, gitCommitDetail.startsWith('https://github.com/devtron-labs')).map(repo, gitCommitDetails[repo].branch)` + +**Walkthrough Video**: + +{% embed url="https://www.youtube.com/watch?v=R8IbZhXhH-k" caption="Filter Condition Example" %} + ### Fail Condition From 7b8bac71fb5788c8e7b8a89312b297b5951cb92b Mon Sep 17 00:00:00 2001 From: iamayushm <32041961+iamayushm@users.noreply.github.com> Date: Thu, 19 Sep 2024 13:31:05 +0530 Subject: [PATCH 53/61] wip (#5865) (#5869) --- api/helm-app/service/HelmAppService.go | 7 +- internal/sql/repository/app/AppRepository.go | 78 +++++++++++++++++-- pkg/app/AppCrudOperationService.go | 37 ++++++++- .../service/EAMode/InstalledAppDBService.go | 11 +++ 4 files changed, 124 insertions(+), 9 deletions(-) diff --git a/api/helm-app/service/HelmAppService.go b/api/helm-app/service/HelmAppService.go index 01e1e90d94c..a803e0e358d 100644 --- a/api/helm-app/service/HelmAppService.go +++ b/api/helm-app/service/HelmAppService.go @@ -1159,7 +1159,12 @@ func (impl *HelmAppServiceImpl) appListRespProtoTransformer(deployedApps *gRPC.D } // end lastDeployed := deployedapp.LastDeployed.AsTime() - appDetails, appFetchErr := impl.appRepository.FindActiveByName(deployedapp.AppName) + appDetails, appFetchErr := impl.getAppForAppIdentifier( + &helmBean.AppIdentifier{ + ClusterId: int(deployedapp.EnvironmentDetail.ClusterId), + Namespace: deployedapp.EnvironmentDetail.Namespace, + ReleaseName: deployedapp.AppName, + }) projectId := int32(0) if appFetchErr == nil { projectId = int32(appDetails.TeamId) diff --git a/internal/sql/repository/app/AppRepository.go b/internal/sql/repository/app/AppRepository.go index 76d526f1744..02e10b95021 100644 --- a/internal/sql/repository/app/AppRepository.go +++ b/internal/sql/repository/app/AppRepository.go @@ -40,6 +40,10 @@ type App struct { sql.AuditLog } +const ( + SYSTEM_USER_ID = 1 +) + func (r *App) IsAppJobOrExternalType() bool { return len(r.DisplayName) > 0 } @@ -129,16 +133,37 @@ func (repo AppRepositoryImpl) SetDescription(id int, description string, userId } func (repo AppRepositoryImpl) FindActiveByName(appName string) (*App, error) { - pipelineGroup := &App{} + var apps []*App err := repo.dbConnection. - Model(pipelineGroup). + Model(&apps). Where("app_name = ?", appName). Where("active = ?", true). - Order("id DESC").Limit(1). + Order("id DESC"). Select() - // there is only single active app will be present in db with a same name. - return pipelineGroup, err + if len(apps) == 1 { + return apps[0], nil + } else if len(apps) > 1 { + isHelmApp := true + for _, app := range apps { + if app.AppType != helper.ChartStoreApp && app.AppType != helper.ExternalChartStoreApp { + isHelmApp = false + break + } + } + if isHelmApp { + err := repo.fixMultipleHelmAppsWithSameName(appName) + if err != nil { + repo.logger.Errorw("error in fixing duplicate helm apps with same name") + return nil, err + } + } + return apps[0], nil + } else { + err = pg.ErrNoRows + } + return nil, err } + func (repo AppRepositoryImpl) FindAppIdByName(appName string) (int, error) { app := &App{} err := repo.dbConnection. @@ -324,9 +349,52 @@ func (repo AppRepositoryImpl) FindAppAndProjectByAppName(appName string) (*App, Where("app.app_name = ?", appName). Where("app.active=?", true). Select() + + if err == pg.ErrMultiRows && (app.AppType == helper.ChartStoreApp || app.AppType == helper.ExternalChartStoreApp) { + // this case can arise in helms apps only + + err := repo.fixMultipleHelmAppsWithSameName(appName) + if err != nil { + repo.logger.Errorw("error in fixing duplicate helm apps with same name") + return nil, err + } + + err = repo.dbConnection.Model(app).Column("Team"). + Where("app.app_name = ?", appName). + Where("app.active=?", true). + Select() + if err != nil { + repo.logger.Errorw("error in fetching apps by name", "appName", appName, "err", err) + return nil, err + } + } return app, err } +func (repo AppRepositoryImpl) fixMultipleHelmAppsWithSameName(appName string) error { + // updating installed apps setting app_id = max app_id + installAppUpdateQuery := `update installed_apps set + app_id=(select max(id) as id from app where app_name = ?) + where app_id in (select id from app where app_name= ? )` + + _, err := repo.dbConnection.Exec(installAppUpdateQuery, appName, appName) + if err != nil { + repo.logger.Errorw("error in updating maxAppId in installedApps", "appName", appName, "err", err) + return err + } + + maxAppIdQuery := repo.dbConnection.Model((*App)(nil)).ColumnExpr("max(id)"). + Where("app_name = ? ", appName). + Where("active = ? ", true) + + // deleting all apps other than app with max id + _, err = repo.dbConnection.Model((*App)(nil)). + Set("active = ?", false).Set("updated_by = ?", SYSTEM_USER_ID).Set("updated_on = ?", time.Now()). + Where("id not in (?) ", maxAppIdQuery).Update() + + return nil +} + func (repo AppRepositoryImpl) FindAllMatchesByAppName(appName string, appType helper.AppType) ([]*App, error) { var apps []*App var err error diff --git a/pkg/app/AppCrudOperationService.go b/pkg/app/AppCrudOperationService.go index a7035d7603f..845d1f03ae9 100644 --- a/pkg/app/AppCrudOperationService.go +++ b/pkg/app/AppCrudOperationService.go @@ -533,10 +533,15 @@ func (impl AppCrudOperationServiceImpl) GetHelmAppMetaInfo(appId string) (*bean. } // if app.DisplayName is empty then that app_name is not yet migrated to app name unique identifier if app.Id > 0 && len(app.DisplayName) == 0 { - err = impl.updateAppNameToUniqueAppIdentifierInApp(app, appIdDecoded) + appNameUniqueIdentifier := appIdDecoded.GetUniqueAppNameIdentifier() + app.AppName = appNameUniqueIdentifier + app.DisplayName = appIdDecoded.ReleaseName + app.UpdatedBy = bean2.SystemUserId + app.UpdatedOn = time.Now() + err = impl.appRepository.Update(app) if err != nil { - impl.logger.Errorw("GetHelmAppMetaInfo, error in migrating displayName and appName to unique identifier for external apps", "appIdentifier", appIdDecoded, "err", err) - //not returning from here as we need to show helm app metadata even if migration of app_name fails, then migration can happen on project update + impl.logger.Errorw("error in migrating displayName and appName to unique identifier", "appNameUniqueIdentifier", appNameUniqueIdentifier, "err", err) + return nil, err } } if app.Id == 0 { @@ -569,6 +574,12 @@ func (impl AppCrudOperationServiceImpl) GetHelmAppMetaInfo(appId string) (*bean. } } + err = impl.fixMultipleInstalledAppForSingleApp(app) + if err != nil { + impl.logger.Errorw("GetHelmAppMetaInfo, error in fixing multiple installed apps linked to same app", "appId", appId, "err", err) + return nil, err + } + user, err := impl.userRepository.GetByIdIncludeDeleted(app.CreatedBy) if err != nil && err != pg.ErrNoRows { impl.logger.Errorw("error in fetching user for app meta info", "error", err) @@ -598,6 +609,26 @@ func (impl AppCrudOperationServiceImpl) GetHelmAppMetaInfo(appId string) (*bean. return info, nil } +// fixMultipleInstalledAppForSingleApp fixes multiple entries of installed app for single app +func (impl AppCrudOperationServiceImpl) fixMultipleInstalledAppForSingleApp(app *appRepository.App) error { + isLinked, installedApps, err := impl.installedAppDbService.IsExternalAppLinkedToChartStore(app.Id) + if err != nil { + impl.logger.Errorw("error in checking IsExternalAppLinkedToChartStore", "appId", app.Id, "err", err) + return err + } + //if isLinked is true and more than one installed app is found for that app, we will create new app for each installed app + if isLinked && len(installedApps) > 1 { + // if installed_apps are already present for that display_name then migrate the app_name to unique identifier with installedApp's ns and clusterId. + // creating new entry for app all installedApps with uniqueAppNameIdentifier and display name + err := impl.installedAppDbService.CreateNewAppEntryForAllInstalledApps(installedApps) + if err != nil { + impl.logger.Errorw("error in CreateNewAppEntryForAllInstalledApps", "appName", app.AppName, "err", err) + //not returning from here as we have to migrate the app for requested ext-app and return the response for meta info + } + } + return nil +} + func (impl AppCrudOperationServiceImpl) getLabelsByAppIdForDeployment(appId int) (map[string]string, error) { labelsDto := make(map[string]string) labels, err := impl.appLabelRepository.FindAllByAppId(appId) diff --git a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go index 6eb4156e245..21adc8dbd6b 100644 --- a/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go +++ b/pkg/appStore/installedApp/service/EAMode/InstalledAppDBService.go @@ -396,6 +396,17 @@ func (impl *InstalledAppDBServiceImpl) CreateNewAppEntryForAllInstalledApps(inst if err != nil { return err } + currApp, err := impl.AppRepository.FindById(installedApps[0].AppId) + if err != nil { + impl.Logger.Errorw("error in fetching app by id", "appId", currApp.Id, "err", err) + return err + } + currApp.Active = false + err = impl.AppRepository.UpdateWithTxn(currApp, tx) + if err != nil { + impl.Logger.Errorw("error in marking current app inactive while creating new apps", "currentAppId", currApp.Id, "err", err) + return err + } // Rollback tx on error. defer tx.Rollback() for _, installedApp := range installedApps { From ee06e4195c345851f3cfbafddcf48de1a2ee5ed8 Mon Sep 17 00:00:00 2001 From: iamayushm <32041961+iamayushm@users.noreply.github.com> Date: Thu, 19 Sep 2024 19:57:07 +0530 Subject: [PATCH 54/61] fix: app overview panic for helm app (#5863) * fix app overview panic * else if instead of if --- api/appStore/InstalledAppRestHandler.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/api/appStore/InstalledAppRestHandler.go b/api/appStore/InstalledAppRestHandler.go index a7653401191..ffb3f0463ca 100644 --- a/api/appStore/InstalledAppRestHandler.go +++ b/api/appStore/InstalledAppRestHandler.go @@ -148,6 +148,13 @@ func (handler *InstalledAppRestHandlerImpl) FetchAppOverview(w http.ResponseWrit token := r.Header.Get("token") handler.Logger.Infow("request payload, FindAppOverview", "installedAppId", installedAppId) installedApp, err := handler.installedAppService.GetInstalledAppById(installedAppId) + if err != nil && err != pg.ErrNoRows { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } else if err == pg.ErrNoRows || installedApp == nil { + common.WriteJsonResp(w, errors.New("helm app doses not exist"), nil, http.StatusNotFound) + return + } appOverview, err := handler.appCrudOperationService.GetAppMetaInfo(installedApp.AppId, installedAppId, installedApp.EnvironmentId) if err != nil { handler.Logger.Errorw("service err, FetchAppOverview", "err", err, "appId", installedApp.AppId, "installedAppId", installedAppId) From aff5f6312ccb9a9d4c3db369bbf6bfbd56fa06f2 Mon Sep 17 00:00:00 2001 From: Rajeev Ranjan <90333766+RajeevRanjan27@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:20:29 +0530 Subject: [PATCH 55/61] removed field image (#5873) --- internal/sql/repository/AppListingRepository.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/sql/repository/AppListingRepository.go b/internal/sql/repository/AppListingRepository.go index c5536724f67..37f6aaf8f82 100644 --- a/internal/sql/repository/AppListingRepository.go +++ b/internal/sql/repository/AppListingRepository.go @@ -363,7 +363,6 @@ func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx contex " env.cluster_id," + " env.is_virtual_environment," + " cl.cluster_name," + - " cia.image," + " p.id as cd_pipeline_id," + " p.ci_pipeline_id," + " p.trigger_type" + @@ -382,7 +381,6 @@ func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx contex } deploymentDetail.EnvironmentId = envId - deploymentDetail.EnvironmentId = envId dc, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) if err != nil && err != pg.ErrNoRows { impl.Logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) From 4cc9ddf549abf581131afcc86cecd1b52aa69ad6 Mon Sep 17 00:00:00 2001 From: iamayushm <32041961+iamayushm@users.noreply.github.com> Date: Fri, 20 Sep 2024 19:19:03 +0530 Subject: [PATCH 56/61] fixing plugin version (#5876) --- pkg/eventProcessor/in/WorkflowEventProcessorService.go | 4 ++-- pkg/pipeline/pipelineStageVariableParser.go | 3 +-- pkg/workflow/dag/WorkflowDagExecutor.go | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/eventProcessor/in/WorkflowEventProcessorService.go b/pkg/eventProcessor/in/WorkflowEventProcessorService.go index afd03182486..619e196217b 100644 --- a/pkg/eventProcessor/in/WorkflowEventProcessorService.go +++ b/pkg/eventProcessor/in/WorkflowEventProcessorService.go @@ -189,8 +189,8 @@ func (impl *WorkflowEventProcessorImpl) SubscribeCDStageCompleteEvent() error { pluginArtifacts := make(map[string][]string) if cdStageCompleteEvent.PluginArtifacts != nil { pluginArtifacts = cdStageCompleteEvent.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() - globalUtil.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) } + globalUtil.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) impl.logger.Debugw("received post stage success event for workflow runner ", "wfId", strconv.Itoa(wfr.Id)) err = impl.workflowDagExecutor.HandlePostStageSuccessEvent(triggerContext, wfr, wfr.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy, pluginArtifacts) @@ -646,8 +646,8 @@ func (impl *WorkflowEventProcessorImpl) BuildCiArtifactRequest(event bean.CiComp pluginArtifacts := make(map[string][]string) if event.PluginArtifacts != nil { pluginArtifacts = event.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() - globalUtil.MergeMaps(pluginArtifacts, event.PluginRegistryArtifactDetails) } + globalUtil.MergeMaps(pluginArtifacts, event.PluginRegistryArtifactDetails) request := &wrokflowDagBean.CiArtifactWebhookRequest{ Image: event.DockerImage, diff --git a/pkg/pipeline/pipelineStageVariableParser.go b/pkg/pipeline/pipelineStageVariableParser.go index 3dc9a9150d1..9afe8b17725 100644 --- a/pkg/pipeline/pipelineStageVariableParser.go +++ b/pkg/pipeline/pipelineStageVariableParser.go @@ -34,8 +34,7 @@ type RefPluginName = string const ( COPY_CONTAINER_IMAGE RefPluginName = "Copy container image" - COPY_CONTAINER_IMAGE_VERSION_V1 = "v1.0.0" - COPY_CONTAINER_IMAGE_VERSION_V2 = "v1.1.0" + COPY_CONTAINER_IMAGE_VERSION_V1 = "1.0.0" EMPTY_STRING = " " ) diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index 15a71c104b3..3f1be4ca4f1 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -538,8 +538,8 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(triggerContext t pluginArtifacts := make(map[string][]string) if cdStageCompleteEvent.PluginArtifacts != nil { pluginArtifacts = cdStageCompleteEvent.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() - util4.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) } + util4.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) err = impl.deactivateUnusedPaths(wfRunner.ImagePathReservationIds, pluginArtifacts) if err != nil { From d71fc21a4ba7d7405f44d904a0c568c2c879d152 Mon Sep 17 00:00:00 2001 From: Prakarsh <71125043+prakarsh-dt@users.noreply.github.com> Date: Mon, 23 Sep 2024 16:38:45 +0530 Subject: [PATCH 57/61] Update CODEOWNERS (#5885) --- .github/CODEOWNERS | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 87391528dde..823dfd25445 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,10 +1,6 @@ #ALL * @vikramdevtron @kripanshdevtron @nishant-d @prakarsh-dt -#DOCS -docs/ @ashokdevtron @uxarya-d @prakarsh-dt -.gitbook.yaml @uxarya-d @prakarsh-dt - #Helm Charts charts/devtron/ @prakarsh-dt @pawan-mehta-dt @nishant-d scripts/devtron-reference-helm-charts @prakarsh-dt @pawan-mehta-dt @nishant-d From b5a2f8ba920ce4ff41b08cdb640966a98daca2a6 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Mon, 23 Sep 2024 16:57:50 +0530 Subject: [PATCH 58/61] fix: create gitops configuration issue (#5883) --- pkg/gitops/GitOpsConfigService.go | 53 +++++++++++++++---------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/pkg/gitops/GitOpsConfigService.go b/pkg/gitops/GitOpsConfigService.go index c08a2e904b9..6d0ce2af14e 100644 --- a/pkg/gitops/GitOpsConfigService.go +++ b/pkg/gitops/GitOpsConfigService.go @@ -352,39 +352,38 @@ func (impl *GitOpsConfigServiceImpl) createGitOpsConfig(ctx context.Context, req } } } - err = impl.gitOperationService.UpdateGitHostUrlByProvider(request) + } + err = impl.gitOperationService.UpdateGitHostUrlByProvider(request) + if err != nil { + return nil, err + } + operationComplete := false + retryCount := 0 + for !operationComplete && retryCount < 3 { + retryCount = retryCount + 1 + + cm, err := impl.K8sUtil.GetConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.ACDConfigMapName, client) if err != nil { return nil, err } - operationComplete := false - retryCount := 0 - for !operationComplete && retryCount < 3 { - retryCount = retryCount + 1 - - cm, err := impl.K8sUtil.GetConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, impl.aCDAuthConfig.ACDConfigMapName, client) - if err != nil { - return nil, err - } - currentHost := request.Host - updatedData := impl.updateData(cm.Data, request, impl.aCDAuthConfig.GitOpsSecretName, currentHost) - data := cm.Data - if data == nil { - data = make(map[string]string, 0) - } - data["repository.credentials"] = updatedData["repository.credentials"] - cm.Data = data - _, err = impl.K8sUtil.UpdateConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, cm, client) - if err != nil { - continue - } - if err == nil { - operationComplete = true - } + currentHost := request.Host + updatedData := impl.updateData(cm.Data, request, impl.aCDAuthConfig.GitOpsSecretName, currentHost) + data := cm.Data + if data == nil { + data = make(map[string]string, 0) } - if !operationComplete { - return nil, fmt.Errorf("resouce version not matched with config map attempted 3 times") + data["repository.credentials"] = updatedData["repository.credentials"] + cm.Data = data + _, err = impl.K8sUtil.UpdateConfigMap(impl.aCDAuthConfig.ACDConfigMapNamespace, cm, client) + if err != nil { + continue + } else { + operationComplete = true } } + if !operationComplete { + return nil, fmt.Errorf("resouce version not matched with config map attempted 3 times") + } } // if git-ops config is created/saved successfully (just before transaction commit) and this was first git-ops config, then upsert clusters in acd From 3f68456b12cd1731cf22b4573e6edd7e1c35a669 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Mon, 23 Sep 2024 19:53:17 +0530 Subject: [PATCH 59/61] added migration to fix default image in slack notification template (#5890) --- .../288_slack_notification_template.down.sql | 856 ++++++++++++++++++ .../288_slack_notification_template.up.sql | 856 ++++++++++++++++++ 2 files changed, 1712 insertions(+) create mode 100644 scripts/sql/288_slack_notification_template.down.sql create mode 100644 scripts/sql/288_slack_notification_template.up.sql diff --git a/scripts/sql/288_slack_notification_template.down.sql b/scripts/sql/288_slack_notification_template.down.sql new file mode 100644 index 00000000000..2a3b3622398 --- /dev/null +++ b/scripts/sql/288_slack_notification_template.down.sql @@ -0,0 +1,856 @@ +---- update notification template for CI trigger slack +UPDATE notification_templates +set template_payload = '{ + "text": ":arrow_forward: Build pipeline Triggered | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":arrow_forward: *Build Pipeline triggered*\n \n Triggered by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url": "https://github.com/devtron-labs/notifier/assets/image/img_build_notification.png", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}" + }, + { + "type": "mrkdwn", + "text": "*Pipeline*\n{{pipelineName}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Details" + } + {{#buildHistoryLink}} + , + "url": "{{& buildHistoryLink}}" + {{/buildHistoryLink}} + }] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CI' +and event_type_id = 1; + + +---- update notification template for CI success slack +UPDATE notification_templates +set template_payload = '{ + "text": ":tada: Build pipeline Successful | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":tada: *Build Pipeline successful*\n \n Triggered by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url": "https://github.com/devtron-labs/notifier/assets/image/img_build_notification.png", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Application*\n{{appName}}" + }, + { + "type": "mrkdwn", + "text": "*Pipeline*\n{{pipelineName}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Details" + } + {{#buildHistoryLink}} + , + "url": "{{& buildHistoryLink}}" + {{/buildHistoryLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CI' +and event_type_id = 2; + + + +---- update notification template for CI fail slack +UPDATE notification_templates +set template_payload = '{ + "text": ":x: Build pipeline Failed | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":x: *Build Pipeline failed*\n \n Triggered by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url": "https://github.com/devtron-labs/notifier/assets/image/img_build_notification.png", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}" + }, + { + "type": "mrkdwn", + "text": "*Pipeline*\n{{pipelineName}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Details" + } + {{#buildHistoryLink}} + , + "url": "{{& buildHistoryLink}}" + {{/buildHistoryLink}} + }] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CI' +and event_type_id = 3; + + +---- update notification template for CD trigger slack +UPDATE notification_templates +set template_payload = '{ + "text": ":arrow_forward: Deployment pipeline Triggered | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":arrow_forward: *Deployment Pipeline triggered on {{envName}}*\n \n by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url":"https://github.com/devtron-labs/notifier/assets/image/img_deployment_notification.png", + "alt_text": "Deploy Pipeline Triggered" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}\n*Pipeline*\n{{pipelineName}}" + }, + { + "type": "mrkdwn", + "text": "*Environment*\n{{envName}}\n*Stage*\n{{stage}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Docker Image*\n`{{dockerImg}}`" + } + }, + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Pipeline", + "emoji": true + } + {{#deploymentHistoryLink}} + , + "url": "{{& deploymentHistoryLink}}" + {{/deploymentHistoryLink}} + }, + { + "type": "button", + "text": { + "type": "plain_text", + "text": "App details", + "emoji": true + } + {{#appDetailsLink}} + , + "url": "{{& appDetailsLink}}" + {{/appDetailsLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CD' +and event_type_id = 1; + + + +---- update notification template for CD success slack +UPDATE notification_templates +set template_payload = '{ + "text": ":tada: Deployment pipeline Successful | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":tada: *Deployment Pipeline successful on {{envName}}*\n \n by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url":"https://github.com/devtron-labs/notifier/assets/image/img_deployment_notification.png", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}\n*Pipeline*\n{{pipelineName}}" + }, + { + "type": "mrkdwn", + "text": "*Environment*\n{{envName}}\n*Stage*\n{{stage}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Docker Image*\n`{{dockerImg}}`" + } + }, + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Pipeline", + "emoji": true + } + {{#deploymentHistoryLink}} + , + "url": "{{& deploymentHistoryLink}}" + {{/deploymentHistoryLink}} + }, + { + "type": "button", + "text": { + "type": "plain_text", + "text": "App details", + "emoji": true + } + {{#appDetailsLink}} + , + "url": "{{& appDetailsLink}}" + {{/appDetailsLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CD' +and event_type_id = 2; + + +---- update notification template for CD fail slack +UPDATE notification_templates +set template_payload = '{ + "text": ":x: Deployment pipeline Failed | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":x: *Deployment Pipeline failed on {{envName}}*\n \n by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url":"https://github.com/devtron-labs/notifier/assets/image/img_deployment_notification.png", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}\n*Pipeline*\n{{pipelineName}}" + }, + { + "type": "mrkdwn", + "text": "*Environment*\n{{envName}}\n*Stage*\n{{stage}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Docker Image*\n`{{dockerImg}}`" + } + }, + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Pipeline", + "emoji": true + } + {{#deploymentHistoryLink}} + , + "url": "{{& deploymentHistoryLink}}" + {{/deploymentHistoryLink}} + }, + { + "type": "button", + "text": { + "type": "plain_text", + "text": "App details", + "emoji": true + } + {{#appDetailsLink}} + , + "url": "{{& appDetailsLink}}" + {{/appDetailsLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CD' +and event_type_id = 3; \ No newline at end of file diff --git a/scripts/sql/288_slack_notification_template.up.sql b/scripts/sql/288_slack_notification_template.up.sql new file mode 100644 index 00000000000..ca69a39307d --- /dev/null +++ b/scripts/sql/288_slack_notification_template.up.sql @@ -0,0 +1,856 @@ +---- update notification template for CI trigger slack +UPDATE notification_templates +set template_payload = '{ + "text": ":arrow_forward: Build pipeline Triggered | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":arrow_forward: *Build Pipeline triggered*\n \n Triggered by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url": "https://cdn.devtron.ai/images/img-build-notification.png ", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}" + }, + { + "type": "mrkdwn", + "text": "*Pipeline*\n{{pipelineName}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Details" + } + {{#buildHistoryLink}} + , + "url": "{{& buildHistoryLink}}" + {{/buildHistoryLink}} + }] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CI' +and event_type_id = 1; + + +---- update notification template for CI success slack +UPDATE notification_templates +set template_payload = '{ + "text": ":tada: Build pipeline Successful | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":tada: *Build Pipeline successful*\n \n Triggered by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url": "https://cdn.devtron.ai/images/img-build-notification.png ", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Application*\n{{appName}}" + }, + { + "type": "mrkdwn", + "text": "*Pipeline*\n{{pipelineName}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "actions", + "elements": [ + { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Details" + } + {{#buildHistoryLink}} + , + "url": "{{& buildHistoryLink}}" + {{/buildHistoryLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CI' +and event_type_id = 2; + + + +---- update notification template for CI fail slack +UPDATE notification_templates +set template_payload = '{ + "text": ":x: Build pipeline Failed | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":x: *Build Pipeline failed*\n \n Triggered by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url": "https://cdn.devtron.ai/images/img-build-notification.png ", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}" + }, + { + "type": "mrkdwn", + "text": "*Pipeline*\n{{pipelineName}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Details" + } + {{#buildHistoryLink}} + , + "url": "{{& buildHistoryLink}}" + {{/buildHistoryLink}} + }] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CI' +and event_type_id = 3; + + +---- update notification template for CD trigger slack +UPDATE notification_templates +set template_payload = '{ + "text": ":arrow_forward: Deployment pipeline Triggered | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":arrow_forward: *Deployment Pipeline triggered on {{envName}}*\n \n by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url":"https://cdn.devtron.ai/images/img-deploy-notification.png", + "alt_text": "Deploy Pipeline Triggered" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}\n*Pipeline*\n{{pipelineName}}" + }, + { + "type": "mrkdwn", + "text": "*Environment*\n{{envName}}\n*Stage*\n{{stage}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Docker Image*\n`{{dockerImg}}`" + } + }, + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Pipeline", + "emoji": true + } + {{#deploymentHistoryLink}} + , + "url": "{{& deploymentHistoryLink}}" + {{/deploymentHistoryLink}} + }, + { + "type": "button", + "text": { + "type": "plain_text", + "text": "App details", + "emoji": true + } + {{#appDetailsLink}} + , + "url": "{{& appDetailsLink}}" + {{/appDetailsLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CD' +and event_type_id = 1; + + + +---- update notification template for CD success slack +UPDATE notification_templates +set template_payload = '{ + "text": ":tada: Deployment pipeline Successful | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":tada: *Deployment Pipeline successful on {{envName}}*\n \n by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url":"https://cdn.devtron.ai/images/img-deploy-notification.png", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}\n*Pipeline*\n{{pipelineName}}" + }, + { + "type": "mrkdwn", + "text": "*Environment*\n{{envName}}\n*Stage*\n{{stage}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Docker Image*\n`{{dockerImg}}`" + } + }, + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Pipeline", + "emoji": true + } + {{#deploymentHistoryLink}} + , + "url": "{{& deploymentHistoryLink}}" + {{/deploymentHistoryLink}} + }, + { + "type": "button", + "text": { + "type": "plain_text", + "text": "App details", + "emoji": true + } + {{#appDetailsLink}} + , + "url": "{{& appDetailsLink}}" + {{/appDetailsLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CD' +and event_type_id = 2; + + +---- update notification template for CD fail slack +UPDATE notification_templates +set template_payload = '{ + "text": ":x: Deployment pipeline Failed | {{#ciMaterials}} Branch > {{branch}} {{/ciMaterials}} | Application > {{appName}}", + "blocks": [{ + "type": "section", + "text": { + "type": "mrkdwn", + "text": "\n" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":x: *Deployment Pipeline failed on {{envName}}*\n \n by {{triggeredBy}}" + }, + "accessory": { + "type": "image", + "image_url":"https://cdn.devtron.ai/images/img-deploy-notification.png", + "alt_text": "calendar thumbnail" + } + }, + { + "type": "divider" + }, + { + "type": "section", + "fields": [{ + "type": "mrkdwn", + "text": "*Application*\n{{appName}}\n*Pipeline*\n{{pipelineName}}" + }, + { + "type": "mrkdwn", + "text": "*Environment*\n{{envName}}\n*Stage*\n{{stage}}" + } + ] + }, + {{#ciMaterials}} + {{^webhookType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Branch*\n`{{appName}}/{{branch}}`" + }, + { + "type": "mrkdwn", + "text": "*Commit*\n<{{& commitLink}}|{{commit}}>" + } + ] + }, + {{/webhookType}} + {{#webhookType}} + {{#webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Title*\n{{webhookData.data.title}}" + }, + { + "type": "mrkdwn", + "text": "*Git URL*\n<{{& webhookData.data.giturl}}|View>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Source Branch*\n{{webhookData.data.sourcebranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Source Commit*\n<{{& webhookData.data.sourcecheckoutlink}}|{{webhookData.data.sourcecheckout}}>" + } + ] + }, + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Branch*\n{{webhookData.data.targetbranchname}}" + }, + { + "type": "mrkdwn", + "text": "*Target Commit*\n<{{& webhookData.data.targetcheckoutlink}}|{{webhookData.data.targetcheckout}}>" + } + ] + }, + {{/webhookData.mergedType}} + {{^webhookData.mergedType}} + { + "type": "section", + "fields": [ + { + "type": "mrkdwn", + "text": "*Target Checkout*\n{{webhookData.data.targetcheckout}}" + } + ] + }, + {{/webhookData.mergedType}} + {{/webhookType}} + {{/ciMaterials}} + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "*Docker Image*\n`{{dockerImg}}`" + } + }, + { + "type": "actions", + "elements": [{ + "type": "button", + "text": { + "type": "plain_text", + "text": "View Pipeline", + "emoji": true + } + {{#deploymentHistoryLink}} + , + "url": "{{& deploymentHistoryLink}}" + {{/deploymentHistoryLink}} + }, + { + "type": "button", + "text": { + "type": "plain_text", + "text": "App details", + "emoji": true + } + {{#appDetailsLink}} + , + "url": "{{& appDetailsLink}}" + {{/appDetailsLink}} + } + ] + } + ] +}' +where channel_type = 'slack' +and node_type = 'CD' +and event_type_id = 3; \ No newline at end of file From 3af467ab45ce775c1322cb5b32fbb139cae73b32 Mon Sep 17 00:00:00 2001 From: systemsdt <129372406+systemsdt@users.noreply.github.com> Date: Mon, 23 Sep 2024 22:10:56 +0530 Subject: [PATCH 60/61] release: PR for v0.7.2 (#5480) * Updated release-notes files * Updated release notes * Updated latest image of hyperion in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of ci-runner in installer * Updated latest image of image-scanner in installer * Updated release-notes files * Updated latest image of hyperion in installer * Updated release-notes files * Updated release-notes files * Updated latest image of image-scanner in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of gitsensor in installer * Updated latest image of notifier in installer * Updated latest image of git-sensor in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of image-scanner in installer * Updated latest image of image-scanner in installer * Updated latest image of ci-runner in installer * Updated latest image of hyperion in installer * Updated release-notes files * Updated latest image of hyperion in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of chart-sync in installer * Updated release-notes files * Updated latest image of image-scanner in installer * Updated latest image of chart-sync in installer * Updated latest image of hyperion in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of image-scanner in installer * Updated latest image of hyperion in installer * feat: ServiceAccount Addition and Create ImagePullSecrets Function (#5739) * Updated function name * imagepullSecre-for-chartsync * Added Yamls of Scoop and Casbin * Changes the default value of devtronEnterprise to false * Changes the default value of devtronEnterprise to false * Changes the default value of devtronEnterprise to false in bom * handled the imagePullSecrets nil pointer error * Updated function to handle nil pointer * added passKey for scoop * Updated latest image of dashboard in installer * Updated latest image of kubewatch in installer * Updated latest image of ci-runner in installer * Updated latest image of devtron in installer * Updated latest image of git-sensor in installer * Updated latest image of lens in installer * Updated latest image of kubelink in installer * Updated latest image of image-scanner in installer * Updated latest image of notifier in installer * Update release.txt * Updated latest image of hyperion in installer * Update values.yaml Updated Dashboard configmap-variables * Update devtron.yaml ConfigMap Variables * Update devtron-bom.yaml Dashboard-ConfigMap Changes * Updated release-notes files * Update workflow.yaml Moved sevrice-account chart-sync and devtron-default-sa out of if statement * Update configmap-secret.yaml added pre-upgrade upgrade hook to devtron-secret * Update configmap-secret.yaml reverted pre-upgrade hook from devtron-secret * Updated release-notes files * Update configmap-secret.yaml Reverted ORCH_TOKEN changes * Update casbin.yaml * Update values.yaml updated the GLOBAL_API_TIMEOUT: "60000" * Update devtron-bom.yaml updated the GLOBAL_API_TIMEOUT: "60000" * Update casbin.yaml * Update values.yaml Removed resources from casbin * Update devtron-bom.yaml Removed casbin resources * Update casbin.yaml * Update values.yaml Changed the user defined gitops flag to false. * Update devtron-bom.yaml * Updated latest image of devtron in installer * Updated latest image of kubelink in installer * Updated latest image of dashboard in installer * Updated latest image of image-scanner in installer * Updated latest image of hyperion in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of hyperion in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of image-scanner in installer * Update casbin.yaml * Updated latest image of ci-runner in installer * Updated Scoop-Config * Updated latest image of devtron in installer * Updated latest image of kubewatch in installer * Updated latest image of kubelink in installer * Updated latest image of git-sensor in installer * Updated latest image of lens in installer * Updated latest image of dashboard in installer * Updated latest image of ci-runner in installer * Updated latest image of chart-sync in installer * Updated latest image of image-scanner in installer * Updated latest image of hyperion in installer * Updated release-notes files * Updated latest image of devtron in installer * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated release-notes files * Updated latest image of devtron in installer * updated nats port in nats-server.yaml and added INSTALLED_MODULES in devtron.yaml * Updated latest image of kubewatch in installer * Updated latest image of kubelink in installer * Updated latest image of devtron in installer * Updated latest image of git-sensor in installer * Updated latest image of lens in installer * Updated latest image of ci-runner in installer * Updated latest image of notifier in installer * Updated latest image of chart-sync in installer * Updated latest image of image-scanner in installer * Updated latest image of hyperion in installer * Updated release-notes files * Updated latest image of dashboard in installer * Update devtron.yaml added CASBIN_CLIENT_URL in devtron-cm * Updated latest image of hyperion in installer * Updated latest image of devtron in installer * Updated release-notes files * Updated latest image of chart-sync in installer * Updated release-notes files * Updated latest image of kubewatch in installer * Updated latest image of devtron in installer * Updated latest image of kubelink in installer * Updated release-notes files * Updated latest image of notifier in installer * Updated latest image of dashboard in installer * Updated latest image of hyperion in installer * Updated release-notes files * Update casbin.yaml * Update scoop.yaml * Update releasenotes.md * Updated latest image of hyperion in installer * Updated latest image of devtron in installer * Update workflow.yaml * Update update-release-notes.yml * Update values.yaml * Delete CHANGELOG/release-notes-v0.7.2.md * Update values.yaml * Update devtron-bom.yaml * Update Chart.yaml * Update releasenotes.md bugs title modified * Update releasenotes.md modified Enhancements * chore: removes enterprise feature listing of externalCi job * Update devtron-bom.yaml * Update values.yaml * Update grafana.yaml * Update minio.yaml * Update migrator.yaml * Update workflow.yaml * Update devtron-installer.yaml * Update installation-script * Updated latest image of hyperion in installer * Update values.yaml * Update devtron-bom.yaml * Update installation-script * Update devtron-installer.yaml * Update Chart.yaml * Update devtron.yaml * Update minio.yaml * Update minio.yaml * Updated release-notes files * Update release-notes-v0.7.2.md * Update releasenotes.md * Update migrator.yaml * Update nats-server.yaml --------- Co-authored-by: ReleaseBot Co-authored-by: akshatsinha007 <156403098+akshatsinha007@users.noreply.github.com> Co-authored-by: Pawan Mehta <117346502+pawan-mehta-dt@users.noreply.github.com> Co-authored-by: akshatsinha007 Co-authored-by: Vikram <73224103+vikramdevtron@users.noreply.github.com> Co-authored-by: Abhibhaw Asthana <39991296+abhibhaw@users.noreply.github.com> --- .github/workflows/update-release-notes.yml | 1 + CHANGELOG/release-notes-v0.7.2.md | 127 +++++++++++++ charts/devtron/Chart.yaml | 4 +- charts/devtron/devtron-bom.yaml | 58 ++++-- charts/devtron/templates/_helpers.tpl | 10 +- charts/devtron/templates/app-sync-job.yaml | 5 +- charts/devtron/templates/casbin.yaml | 125 +++++++++++++ .../devtron/templates/configmap-secret.yaml | 56 +++++- charts/devtron/templates/dashboard.yaml | 3 +- charts/devtron/templates/devtron-scc.yaml | 2 + charts/devtron/templates/devtron.yaml | 40 +++- charts/devtron/templates/dex.yaml | 2 +- charts/devtron/templates/gitsensor.yaml | 3 +- charts/devtron/templates/grafana.yaml | 9 +- charts/devtron/templates/install.yaml | 1 + charts/devtron/templates/kubelink.yaml | 2 +- charts/devtron/templates/kubewatch.yaml | 2 +- charts/devtron/templates/lens.yaml | 3 +- charts/devtron/templates/migrator.yaml | 16 +- charts/devtron/templates/minio.yaml | 9 +- charts/devtron/templates/nats-server.yaml | 9 +- charts/devtron/templates/notifier.yaml | 3 +- charts/devtron/templates/postgresql.yaml | 6 +- charts/devtron/templates/scoop.yaml | 169 +++++++++++++++++ charts/devtron/templates/workflow.yaml | 25 ++- charts/devtron/values.yaml | 79 +++++--- manifests/install/devtron-installer.yaml | 2 +- manifests/installation-script | 2 +- manifests/release.txt | 2 +- manifests/yamls/dashboard.yaml | 2 +- manifests/yamls/devtron.yaml | 6 +- manifests/yamls/gitsensor.yaml | 4 +- manifests/yamls/image-scanner.yaml | 2 +- manifests/yamls/kubelink.yaml | 2 +- manifests/yamls/kubewatch.yaml | 2 +- manifests/yamls/lens.yaml | 2 +- manifests/yamls/notifier.yaml | 2 +- releasenotes.md | 176 ++++++++++++------ 38 files changed, 823 insertions(+), 150 deletions(-) create mode 100644 CHANGELOG/release-notes-v0.7.2.md create mode 100644 charts/devtron/templates/casbin.yaml create mode 100644 charts/devtron/templates/scoop.yaml diff --git a/.github/workflows/update-release-notes.yml b/.github/workflows/update-release-notes.yml index ed6d35fdac6..b2641ec3970 100644 --- a/.github/workflows/update-release-notes.yml +++ b/.github/workflows/update-release-notes.yml @@ -7,6 +7,7 @@ on: - closed branches: - main + - develop # Allows you to run this workflow manually from the Actions tab workflow_dispatch: diff --git a/CHANGELOG/release-notes-v0.7.2.md b/CHANGELOG/release-notes-v0.7.2.md new file mode 100644 index 00000000000..e78a01c4176 --- /dev/null +++ b/CHANGELOG/release-notes-v0.7.2.md @@ -0,0 +1,127 @@ +## v0.7.2 + +## Bugs +- fix: error in enable change ci (#5358) +- fix: ci patch rbac fixes (#5461) +- fix: bitbucket commit race condition for concurrent requests (#5505) +- fix: handle nil check image scanning (#5497) +- fix: error in switching ci to external ci (#5500) +- fix: autoscale error handling (#5481) +- fix: ci material update fixes for linked ci pipelines (#5523) +- fix: Unable to get HPA manifest for no-gitops deployment (#5522) +- fix: Deployment stuck in starting for no-gitops based pipelines (#5526) +- fix: panic handling for deleted app in app group and env group filters (#5541) +- fix: security time fix when scanning is passed (#5549) +- fix: app group query optimisations (#5558) +- fix: version and fixed_version in image scan result table (#5552) +- fix: add if not exists in migration script for avoiding any errors while rerunning scripts (#5579) +- fix: Resource Browser Shortnames are not applying dynamically (#5573) +- fix: tls enabled flag not getting passed (#5609) +- fix: reverting acd token fetch logic (#5614) +- fix: query optimisations for app group cd listing and ci pipeline blockage state (#5641) +- fix: dependabot security updates (#5608) +- fix: default PipelineType given (#5668) +- fix: validation in CiJob for external Artifact (#5669) +- fix: Nats Panic Error in Orchestrator (#5670) +- fix: SSH & Proxy Cluster flows broken (#5675) +- fix: Restart in orchestrator just after release (#5671) +- fix: Sql query optimisation for application group app status listing (#5672) +- fix: handling for HPA (autoscaling) (#5666) +- fix: refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) +- fix: Decode secret fix on add update oss (#5695) +- fix: saving pco concurrency case handled (#5688) +- fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) +- fix: Issue in EA Mode Cluster - error: pg: multiple rows in result set. (#5708) +- fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) +- fix: migration syn (#5718) +- fix: ci patch rbac for branch update (#5759) +- fix: Bitnami chart repo tls issue (#5740) +- fix: check rbac on env if envName is present (#5765) +- fix: scan tool active check removed (#5771) +- fix: panic handlings and argocd app delete stuck in partial stage (#5770) +- fix: unimplemented cluster cron service (#5781) +- fix: sql injection fixes (#5783) +- fix: sql injection fixes (#5801) +- fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) +- fix: Ea rbac issues and not working on airgapped (#5813) +- fix: scan list in global security page sql injection fix (#5808) +- fix: app details page breaking (#5823) +- fix: plugin ip variables value getting changed (#5844) +- fix: ignore kubelink errors in server startup (#5852) (#5854) +- fix: user rbac flows (#5804) +- fix: pg multiple rows in EA mode (#5869) +- fix: app overview panic for helm app (#5863) +- fix: app detail page breaking (#5873) +- fix: copy container image plugin issue (#5876) +- fix: create GitOps configuration issue (#5883) +## Enhancements +- feat: support for handling hibernation and un-hibernation for keda enabled (#5431) +- feat: Async ArgoCd App refresh operation (#5448) +- feat: deployment config migration (#5368) +- feat: Skipping falg based CMCS for Ci Job (#5536) +- feat: expose git commit data as env vars for ci stage (#5534) +- feat: Defining applications as part of release track (#5489) +- feat: gitlab webhook support (#5420) +- feat: Enhance the buildx to use cache for multi arch builds (#5307) +- feat: bug fix for picking wrong values in docker arguments (#5565) +- feat: enable external argocd listing (#5585) +- feat: plugin versioning feature (#5352) +- feat: service account in chart sync (#5584) +- feat: panic in sync pod cron and terminal not opening fix (#5603) +- feat: tls support for git and gitops (#5305) +- feat: system network controller sql script (#5637) +- feat: skip argowf logs from ci logs (#5646) +- feat: gitops support for oci repositories (#5577) +- feat: ext argo app rbac and missing common features and flux app listing and details with rbac (#5528) +- feat: expose git ops metrics (#5582) +- feat: Generate config and secret hash for application mounting external k8s secrets (#5626) +- feat: Env description handling (#5744) +- feat: Added basic auth support for servicemonitor (#5761) +- feat: Docker pull env driven (#5767) +- feat: plugin creation support (#5630) +- feat: Added multiple features support in servicemonitor (#5789) +## Documentation +- doc: Added FAQ no. 28 + GoLang-migrate Link + Code Block Fix (#5502) +- docs: Drafted Software Distribution Hub (#5459) +- doc: Created Image Label + Comments Doc (#5314) +- doc: FAQ added for Bitnami Charts (#5545) +- doc: Added Keycloak SSO Doc (#5571) +- doc: Code scan plugin docs (#5562) +- docs: jenkins-plugin (#5542) +- doc: Copacetic plugin docs (#5564) +- doc: Pull images from container repository (#5563) +- doc: Collated Doc Fixes for July (#5591) +- doc: Drafted Schema Driven DT (#5533) +- doc: fixes in Copacetic plugin doc (#5622) +- doc: Edit Deployment Chart Schema (#5735) +- doc: Redirection of old entry in gitbook.yaml (#5738) +- docs: added Documentation for Air-Gapped Installation (#5360) +- doc: Update prerequisites of code-scan (#5625) +- doc: Cosign plugin doc (#5665) +- doc: CraneCopy plugin doc (#5658) +- doc: Devtron CD Trigger Plugin doc (#5747) +- doc: DockerSlim plugin doc (#5660) +- doc: Devtron Job Trigger Plugin doc (#5742) +- doc: Vulnerability Scanning Plugin doc (#5722) +- docs: Jira plugins doc (Validator + Updater) (#5709) +- docs: added commands enable ingress during helm installation (#5794) +- doc: Revamped + Restructured Ingress Setup Doc (#5798) +- docs: modifying route in ingress doc (#5799) +- docs: modified the anchorlink in ingress.md (#5800) +- doc: ArgoCD + FluxCD App Listing (#5636) +- doc: Added Special CEL Expr in Filter Condition doc (#5850) +## Others +- misc: removal of azure-devops-issue-sync.yml (#5592) +- misc: added action for discrod webhook (#5615) +- misc: Revert "misc: added action for discrod webhook" (#5619) +- chore: Plugin script fix oss (#5661) +- misc: Release candidate v0.16.0 (#5687) +- chore: migration number changes (#5692) +- chore: ea fixes for helm app (#5713) +- misc: Main sync rc - branch update (#5753) +- chore: Revert "feat: plugin creation support" (#5778) +- chore: cron status update refactoring (#5790) +- misc: sync with common-lib changes with release candidate 18 (#5830) +- chore: Custom tag for copy container image plugin (#5760) (#5841) +- chore: migration number fix (#5840) +- misc: Update CODEOWNERS (#5885) diff --git a/charts/devtron/Chart.yaml b/charts/devtron/Chart.yaml index 9f5318f6307..4c50dadb084 100644 --- a/charts/devtron/Chart.yaml +++ b/charts/devtron/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: devtron-operator -appVersion: 0.7.1 +appVersion: 0.7.2 description: Chart to configure and install Devtron. Devtron is a Kubernetes Orchestration system. keywords: - Devtron @@ -11,7 +11,7 @@ keywords: - argocd - Hyperion engine: gotpl -version: 0.22.73 +version: 0.22.74 sources: - https://github.com/devtron-labs/charts dependencies: diff --git a/charts/devtron/devtron-bom.yaml b/charts/devtron/devtron-bom.yaml index 6e35b6fc0ab..9f96cb713f4 100644 --- a/charts/devtron/devtron-bom.yaml +++ b/charts/devtron/devtron-bom.yaml @@ -8,13 +8,9 @@ global: runAsUser: 1000 runAsNonRoot: true containerRegistry: "quay.io/devtron" - # The below values can be specified both at global as well as component level - nodeSelector: {} - tolerations: [] - imagePullSecrets: [] extraManifests: [] installer: - release: "v0.7.1" + release: "v0.7.2" registry: "" image: "inception" tag: "473deaa4-185-21582" @@ -30,21 +26,25 @@ components: ENABLE_CI_JOB: "true" GLOBAL_API_TIMEOUT: "60000" TRIGGER_API_TIMEOUT: "60000" - ENABLE_EXTERNAL_ARGO_CD: "false" + ENABLE_EXTERNAL_ARGO_CD: "true" SERVICE_WORKER_TIMEOUT: "1" API_BATCH_SIZE: "30" + FEATURE_EXTERNAL_FLUX_CD_ENABLE: "true" + FEATURE_STEP_WISE_LOGS_ENABLE: "true" + FEATURE_USER_DEFINED_GITOPS_REPO_ENABLE: "true" + ENABLE_RESOURCE_SCAN: "true" registry: "" - image: "dashboard:5f95d187-690-23841" + image: "dashboard:215319c7-690-25536" imagePullPolicy: IfNotPresent devtron: registry: "" - image: "hyperion:291c4c75-280-23860" - cicdImage: "devtron:291c4c75-434-23853" + image: "hyperion:3f68456b-280-25566" + cicdImage: "devtron:3f68456b-434-25567" imagePullPolicy: IfNotPresent customOverrides: {} ciRunner: registry: "" - image: "ci-runner:48aca9f4-138-23844" + image: "ci-runner:fd5702db-138-25483" argocdDexServer: registry: "" image: "dex:v2.30.2" @@ -53,7 +53,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:0dee6306-564-23843" + image: "kubelink:6ef0fbbe-564-25533" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" @@ -71,7 +71,7 @@ components: keyName: postgresql-password kubewatch: registry: "" - image: "kubewatch:850b40d5-419-23840" + image: "kubewatch:7c8611f4-419-25531" imagePullPolicy: IfNotPresent configs: devtroncd_NAMESPACE: "devtron-ci" @@ -91,7 +91,7 @@ components: armImage: postgres_exporter:v0.10.1 gitsensor: registry: "" - image: "git-sensor:86e13283-200-23847" + image: "git-sensor:5b9cf0ec-200-25481" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -109,7 +109,7 @@ components: # Values for lens lens: registry: "" - image: "lens:56211042-333-23839" + image: "lens:9db8a2fb-333-25482" imagePullPolicy: IfNotPresent configs: GIT_SENSOR_PROTOCOL: GRPC @@ -154,7 +154,7 @@ components: DB_NAME: "lens" chartSync: registry: "" - image: chart-sync:5a1d0301-150-23845 + image: chart-sync:13ffae06-150-25515 # values for argocd integration argo-cd: global: @@ -174,14 +174,14 @@ workflowController: IMDSv1ExecutorImage: "argoexec:v3.0.7" security: imageScanner: - image: "image-scanner:137872c2-141-23848" + image: "image-scanner:348201f8-141-25486" clair: image: repository: clair tag: 4.3.6 # Values for notifier integration notifier: - image: "notifier:9639b1ab-372-23850" + image: "notifier:06392394-372-25535" minio: image: "minio:RELEASE.2021-02-14T04-01-33Z" mbImage: "minio-mc:RELEASE.2021-02-14T04-28-06Z" @@ -200,3 +200,27 @@ monitoring: image: "k8s-sidecar:1.1.0" curlImage: "curl:7.73.0" imagePullPolicy: IfNotPresent +devtronEnterprise: + enabled: false + casbin: + registry: "" + image: "casbin:efc28fb2-6de0e914-462-25420" + imagePullPolicy: IfNotPresent + configs: + PG_ADDR: postgresql-postgresql.devtroncd + PG_DATABASE: casbin + PG_PORT: "5432" + PG_USER: postgres + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password + resources: {} + scoop: + enabled: false + registry: "" + image: "scoop:296d351d-629-24001" + imagePullPolicy: IfNotPresent + resources: {} + configs: + CLUSTER_ID: "1" + ORCHESTRATOR_URL: http://devtron-service.devtroncd.svc.cluster.local/orchestrator diff --git a/charts/devtron/templates/_helpers.tpl b/charts/devtron/templates/_helpers.tpl index 97da656497d..97f2766cc7b 100644 --- a/charts/devtron/templates/_helpers.tpl +++ b/charts/devtron/templates/_helpers.tpl @@ -19,13 +19,19 @@ it randomly. {{- end -}} {{- end }} +{{- define "imagePullSecret" }} +{{- with .Values.imagePullSecret.credentials }} +{{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"auth\":\"%s\"}}}" .registry .username .password (printf "%s:%s" .username .password | b64enc) | b64enc }} +{{- end }} +{{- end }} + {{/* Expand the node selectors, tolerations, and image pull secrets for a Kubernetes resource. Usage: -{{ include "common.nodeSelector" (dict "nodeSelector" .Values.path.to.nodeSelector "tolerations" .Values.path.to.tolerations "imagePullSecrets" .Values.path.to.imagePullSecrets "global" .Values.global ) }} +{{ include "common.schedulerConfig" (dict "nodeSelector" .Values.path.to.nodeSelector "tolerations" .Values.path.to.tolerations "imagePullSecrets" .Values.path.to.imagePullSecrets "global" .Values.global ) }} */}} -{{- define "common.nodeSelector" -}} +{{- define "common.schedulerConfig" -}} {{- if .nodeSelector }} nodeSelector: {{ toYaml .nodeSelector | indent 2 }} diff --git a/charts/devtron/templates/app-sync-job.yaml b/charts/devtron/templates/app-sync-job.yaml index d665faadc8e..92da12d5a25 100644 --- a/charts/devtron/templates/app-sync-job.yaml +++ b/charts/devtron/templates/app-sync-job.yaml @@ -11,7 +11,7 @@ spec: template: spec: serviceAccountName: devtron - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 6 }} initContainers: - name: migration-wait image: {{ include "common.image" (dict "component" $.Values.components.migrator "global" $.Values.global "extraImage" $.Values.components.migrator.kubectlImage ) }} @@ -75,7 +75,8 @@ spec: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 10 }} + serviceAccountName: chart-sync + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 10 }} {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 12 }} diff --git a/charts/devtron/templates/casbin.yaml b/charts/devtron/templates/casbin.yaml new file mode 100644 index 00000000000..1a21f32143a --- /dev/null +++ b/charts/devtron/templates/casbin.yaml @@ -0,0 +1,125 @@ +{{- if and .Values.devtronEnterprise.enabled }} +{{- with .Values.devtronEnterprise.casbin }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: casbin + release: casbin + name: casbin + namespace: devtroncd +spec: + minReadySeconds: 60 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: casbin + release: casbin + template: + metadata: + labels: + app: casbin + release: casbin + spec: + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.devtronEnterprise.casbin.nodeSelector "tolerations" $.Values.devtronEnterprise.casbin.tolerations "imagePullSecrets" $.Values.devtronEnterprise.casbin.imagePullSecrets "global" $.Values.global) | indent 6 }} + containers: + - name: casbin + image: {{ include "common.image" (dict "component" $.Values.devtronEnterprise.casbin "global" $.Values.global) }} + {{- if .imagePullPolicy }} + imagePullPolicy: {{ .imagePullPolicy }} + {{- end }} + env: + - name: DEVTRON_APP_NAME + value: casbin + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .dbconfig }} + - name: PG_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .dbconfig.secretName }} + key: {{ .dbconfig.keyName }} + {{- end }} + envFrom: + - configMapRef: + name: casbin-cm + livenessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: http + protocol: TCP + - containerPort: 9000 + name: app + protocol: TCP + {{- if .resources }} + resources: +{{ toYaml .resources | indent 12 }} + {{- end }} + volumeMounts: [] + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: [] +--- +# Casbin ConfigMap +apiVersion: v1 +kind: ConfigMap +metadata: + name: casbin-cm + namespace: devtroncd + labels: + app: casbin + release: casbin +{{- if .configs }} +data: +{{ toYaml .configs | indent 2 }} +{{- end }} +--- +# Casbin Service +apiVersion: v1 +kind: Service +metadata: + labels: + app: casbin + release: casbin + annotations: + "helm.sh/resource-policy": keep + name: casbin-service + namespace: devtroncd +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + - name: app + port: 9000 + protocol: TCP + targetPort: app + selector: + app: casbin + release: casbin + type: ClusterIP +{{- end}} +{{- end}} diff --git a/charts/devtron/templates/configmap-secret.yaml b/charts/devtron/templates/configmap-secret.yaml index b856f736dcc..3b6127f3ccd 100644 --- a/charts/devtron/templates/configmap-secret.yaml +++ b/charts/devtron/templates/configmap-secret.yaml @@ -247,9 +247,9 @@ data: PG_PASSWORD: {{ $postgresPwd }} {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} + ORCH_TOKEN: {{ $ORCH_TOKEN }} EXTERNAL_CI_API_SECRET: {{ $EXTERNAL_CI_API_SECRET }} WEBHOOK_TOKEN: {{ $WEBHOOK_TOKEN }} - ORCH_TOKEN: {{ $ORCH_TOKEN }} DEX_SECRET: {{ $DEX_SECRET }} DEX_JWTKEY: {{ $DEX_JWTKEY }} DEX_CSTOREKEY: {{ $DEX_CSTOREKEY }} @@ -289,3 +289,57 @@ data: {{- end }} {{- end }} type: Opaque + +{{- if $.Values.imagePullSecret }} +{{- if $.Values.imagePullSecret.create }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: devtroncd + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} + +{{- if eq .Values.imagePullSecret.namespaceScope "all" }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: devtron-cd + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} + +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: devtron-ci + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} + +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: argo + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/devtron/templates/dashboard.yaml b/charts/devtron/templates/dashboard.yaml index d909d4978dd..8d978e8cace 100644 --- a/charts/devtron/templates/dashboard.yaml +++ b/charts/devtron/templates/dashboard.yaml @@ -77,7 +77,8 @@ spec: securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} {{- end }} - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.dashboard.nodeSelector "tolerations" $.Values.components.dashboard.tolerations "imagePullSecrets" $.Values.components.dashboard.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.dashboard.nodeSelector "tolerations" $.Values.components.dashboard.tolerations "imagePullSecrets" $.Values.components.dashboard.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa containers: - name: dashboard image: {{ include "common.image" (dict "component" $.Values.components.dashboard "global" $.Values.global) }} diff --git a/charts/devtron/templates/devtron-scc.yaml b/charts/devtron/templates/devtron-scc.yaml index b6f1c9680e8..1f5f10d03eb 100644 --- a/charts/devtron/templates/devtron-scc.yaml +++ b/charts/devtron/templates/devtron-scc.yaml @@ -32,6 +32,8 @@ users: - system:serviceaccount:devtroncd:argocd-server - system:serviceaccount:devtron-ci:ci-runner - system:serviceaccount:devtron-cd:cd-runner +- system:serviceaccount:devtroncd:chart-sync +- system:serviceaccount:devtroncd:devtron-default-sa volumes: - '*' {{- end }} diff --git a/charts/devtron/templates/devtron.yaml b/charts/devtron/templates/devtron.yaml index db2f24ccc8f..23c39c90b7a 100644 --- a/charts/devtron/templates/devtron.yaml +++ b/charts/devtron/templates/devtron.yaml @@ -1,4 +1,5 @@ {{- with .Values.components.devtron }} +{{- $argocdEnabled := index $.Values "argo-cd" }} --- apiVersion: v1 kind: ConfigMap @@ -19,6 +20,12 @@ data: DEX_HOST: http://argocd-dex-server.devtroncd DEX_PORT: "5556" APP_SYNC_IMAGE: {{ include "common.image" (dict "component" $.Values.components.chartSync "global" $.Values.global ) }} + {{- if and $.Values.devtronEnterprise.enabled $.Values.devtronEnterprise.scoop.enabled }} + SCOOP_CLUSTER_CONFIG: '{"1":{"serviceName":"scoop-service","passKey":"qhihdidhwid","namespace":"devtroncd","port":"80"}}' + {{- end }} + {{- if $.Values.devtronEnterprise.enabled }} + CASBIN_CLIENT_URL: casbin-service.devtroncd:9000 + {{- end }} {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} CD_HOST: "argocd-server.devtroncd" @@ -86,7 +93,16 @@ data: ENFORCER_MAX_BATCH_SIZE: "1" DEVTRON_SECRET_NAME: "devtron-secret" ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART: "false" - USE_ARTIFACT_LISTING_API_V2: "true" + USE_ARTIFACT_LISTING_API_V2: "false" + ASYNC_BUILDX_CACHE_EXPORT: "true" + BUILDX_CACHE_MODE_MIN: "false" + DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT: "1" + IN_APP_LOGGING_ENABLED: "true" + PARALLELISM_LIMIT_FOR_TAG_PROCESSING: "2" + SCAN_V2_ENABLED: "false" + TIMEOUT_IN_SECONDS: "60" + SHOW_DOCKER_BUILD_ARGS: "true" + FORCE_SECURITY_SCANNING: "false" RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS: "true" ENABLE_ASYNC_INSTALL_DEVTRON_CHART: "true" DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT: "6" @@ -160,6 +176,26 @@ data: {{- if .customOverrides }} {{ toYaml .customOverrides | indent 2}} {{- end }} + {{- $modules := list }} + {{- if has "cicd" $.Values.installer.modules }} + {{- $modules = append $modules "cicd" }} + {{- if $.Values.notifier.enabled }} + {{- $modules = append $modules "notifier" }} + {{- end }} + {{- if and $.Values.security.enabled $.Values.security.trivy.enabled }} + {{- $modules = append $modules "security.trivy" }} + {{- end }} + {{- if and $.Values.security.enabled $.Values.security.clair.enabled }} + {{- $modules = append $modules "security.clair" }} + {{- end }} + {{- if $.Values.monitoring.grafana.enabled }} + {{- $modules = append $modules "monitoring.grafana" }} + {{- end }} + {{- if ($argocdEnabled.enabled) }} + {{- $modules = append $modules "argo-cd" }} + {{- end }} + {{- end }} + INSTALLED_MODULES: {{ if $modules }}{{ printf "'%s'" (join "," $modules) }}{{ else }}""{{ end }} DEFAULT_CI_IMAGE: {{ include "common.image" (dict "component" $.Values.components.ciRunner "global" $.Values.global ) }} --- apiVersion: v1 @@ -201,7 +237,7 @@ spec: app: devtron release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.devtron.nodeSelector "tolerations" $.Values.components.devtron.tolerations "imagePullSecrets" $.Values.components.devtron.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.devtron.nodeSelector "tolerations" $.Values.components.devtron.tolerations "imagePullSecrets" $.Values.components.devtron.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always serviceAccountName: devtron diff --git a/charts/devtron/templates/dex.yaml b/charts/devtron/templates/dex.yaml index b5bbaadbcc9..a95c0379b7c 100644 --- a/charts/devtron/templates/dex.yaml +++ b/charts/devtron/templates/dex.yaml @@ -59,7 +59,7 @@ spec: labels: app.kubernetes.io/name: argocd-dex-server spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.argocdDexServer.nodeSelector "tolerations" $.Values.components.argocdDexServer.tolerations "imagePullSecrets" $.Values.components.argocdDexServer.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.argocdDexServer.nodeSelector "tolerations" $.Values.components.argocdDexServer.tolerations "imagePullSecrets" $.Values.components.argocdDexServer.imagePullSecrets "global" $.Values.global) | indent 6 }} affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/charts/devtron/templates/gitsensor.yaml b/charts/devtron/templates/gitsensor.yaml index 4697699b0b4..6248b7381b3 100644 --- a/charts/devtron/templates/gitsensor.yaml +++ b/charts/devtron/templates/gitsensor.yaml @@ -73,7 +73,8 @@ spec: securityContext: runAsGroup: 1000 runAsUser: 1000 - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.gitsensor.nodeSelector "tolerations" $.Values.components.gitsensor.tolerations "imagePullSecrets" $.Values.components.gitsensor.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.gitsensor.nodeSelector "tolerations" $.Values.components.gitsensor.tolerations "imagePullSecrets" $.Values.components.gitsensor.imagePullSecrets "global" $.Values.global) | indent 6 }} initContainers: - command: - /bin/sh diff --git a/charts/devtron/templates/grafana.yaml b/charts/devtron/templates/grafana.yaml index c99a841e4af..3fb4b8a6212 100644 --- a/charts/devtron/templates/grafana.yaml +++ b/charts/devtron/templates/grafana.yaml @@ -12,9 +12,10 @@ kind: Job metadata: name: grafana-org-job spec: + ttlSecondsAfterFinished: 100 template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: devtron containers: - name: grafana-restart @@ -511,7 +512,7 @@ spec: fsGroup: 472 runAsGroup: 472 runAsUser: 472 - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} initContainers: - name: init-chown-data image: {{ include "common.image" (dict "component" $.Values.monitoring.grafana "global" $.Values.global "extraImage" $.Values.monitoring.grafana.busyboxImage ) }} @@ -660,7 +661,7 @@ metadata: namespace: devtroncd spec: serviceAccountName: devtron-grafana-test - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 2 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 2 }} containers: - name: devtron-test image: {{ include "common.image" (dict "component" $.Values.monitoring.grafana "global" $.Values.global "extraImage" $.Values.monitoring.grafana.batsImage ) }} @@ -679,4 +680,4 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/install.yaml b/charts/devtron/templates/install.yaml index 123e037885a..e3e6192910f 100644 --- a/charts/devtron/templates/install.yaml +++ b/charts/devtron/templates/install.yaml @@ -80,6 +80,7 @@ spec: labels: app: inception spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.installer.nodeSelector "tolerations" $.Values.installer.tolerations "imagePullSecrets" $.Values.installer.imagePullSecrets "global" $.Values.global) | indent 6 }} {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} diff --git a/charts/devtron/templates/kubelink.yaml b/charts/devtron/templates/kubelink.yaml index 25436ac2a4d..f4e93054f0b 100644 --- a/charts/devtron/templates/kubelink.yaml +++ b/charts/devtron/templates/kubelink.yaml @@ -57,7 +57,7 @@ spec: labels: app: kubelink spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.kubelink.nodeSelector "tolerations" $.Values.components.kubelink.tolerations "imagePullSecrets" $.Values.components.kubelink.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.kubelink.nodeSelector "tolerations" $.Values.components.kubelink.tolerations "imagePullSecrets" $.Values.components.kubelink.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always serviceAccount: devtron diff --git a/charts/devtron/templates/kubewatch.yaml b/charts/devtron/templates/kubewatch.yaml index fc7366deda3..fa199caf3ca 100644 --- a/charts/devtron/templates/kubewatch.yaml +++ b/charts/devtron/templates/kubewatch.yaml @@ -167,7 +167,7 @@ spec: app: kubewatch release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.kubewatch.nodeSelector "tolerations" $.Values.components.kubewatch.tolerations "imagePullSecrets" $.Values.components.kubewatch.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.kubewatch.nodeSelector "tolerations" $.Values.components.kubewatch.tolerations "imagePullSecrets" $.Values.components.kubewatch.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always serviceAccountName: kubewatch diff --git a/charts/devtron/templates/lens.yaml b/charts/devtron/templates/lens.yaml index 503fd22eb44..c3a87b34626 100644 --- a/charts/devtron/templates/lens.yaml +++ b/charts/devtron/templates/lens.yaml @@ -66,7 +66,8 @@ spec: app: lens release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.lens.nodeSelector "tolerations" $.Values.components.lens.tolerations "imagePullSecrets" $.Values.components.lens.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.lens.nodeSelector "tolerations" $.Values.components.lens.tolerations "imagePullSecrets" $.Values.components.lens.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa terminationGracePeriodSeconds: 30 restartPolicy: Always {{- if and $.Values.global $.Values.global.podSecurityContext }} diff --git a/charts/devtron/templates/migrator.yaml b/charts/devtron/templates/migrator.yaml index 00313889aee..31247c32776 100644 --- a/charts/devtron/templates/migrator.yaml +++ b/charts/devtron/templates/migrator.yaml @@ -14,7 +14,8 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} @@ -122,7 +123,7 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: devtron {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: @@ -221,7 +222,8 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} @@ -300,7 +302,8 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} @@ -378,9 +381,10 @@ kind: Job metadata: name: postgresql-miscellaneous spec: + ttlSecondsAfterFinished: 100 template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} securityContext: fsGroup: 1000 runAsGroup: 1000 @@ -415,4 +419,4 @@ spec: backoffLimit: 20 activeDeadlineSeconds: 1800 {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/minio.yaml b/charts/devtron/templates/minio.yaml index e445ca43931..1f788bfe82c 100644 --- a/charts/devtron/templates/minio.yaml +++ b/charts/devtron/templates/minio.yaml @@ -259,7 +259,7 @@ spec: app: minio release: {{ $.Release.Name }} spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: "devtron-minio" securityContext: runAsUser: 1000 @@ -322,13 +322,14 @@ metadata: release: {{ $.Release.Name }} heritage: Helm spec: + ttlSecondsAfterFinished: 100 template: metadata: labels: app: minio-job release: {{ $.Release.Name }} spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} restartPolicy: OnFailure volumes: - name: minio-configuration @@ -384,7 +385,7 @@ spec: app: minio release: devtron-minio spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: "devtron-minio" containers: - name: minio @@ -415,4 +416,4 @@ spec: secretName: devtron-minio {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/nats-server.yaml b/charts/devtron/templates/nats-server.yaml index 0c06cd9fe94..e6aa25f71e6 100644 --- a/charts/devtron/templates/nats-server.yaml +++ b/charts/devtron/templates/nats-server.yaml @@ -91,8 +91,9 @@ spec: app.kubernetes.io/name: nats app.kubernetes.io/instance: devtron-nats spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa # Common volumes for the containers. - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 6 }} volumes: - name: config-volume @@ -139,8 +140,6 @@ spec: name: cluster - containerPort: 8222 name: monitor - - containerPort: 7777 - name: metrics command: - "nats-server" @@ -268,7 +267,7 @@ metadata: app.kubernetes.io/instance: devtron-nats app.kubernetes.io/managed-by: Helm spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 2 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 2 }} containers: - name: nats-box image: {{ include "common.image" (dict "component" $.Values.components.nats.natsBox "global" $.Values.global) }} @@ -316,4 +315,4 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/notifier.yaml b/charts/devtron/templates/notifier.yaml index 054046e5a3e..55116ed1431 100644 --- a/charts/devtron/templates/notifier.yaml +++ b/charts/devtron/templates/notifier.yaml @@ -72,7 +72,8 @@ spec: app: notifier release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.notifier.nodeSelector "tolerations" $.Values.notifier.tolerations "imagePullSecrets" $.Values.notifier.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.notifier.nodeSelector "tolerations" $.Values.notifier.tolerations "imagePullSecrets" $.Values.notifier.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always {{- if and $.Values.global $.Values.global.podSecurityContext }} diff --git a/charts/devtron/templates/postgresql.yaml b/charts/devtron/templates/postgresql.yaml index efcabcd0207..01e7e974866 100644 --- a/charts/devtron/templates/postgresql.yaml +++ b/charts/devtron/templates/postgresql.yaml @@ -113,7 +113,8 @@ spec: release: "devtron" role: master spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa securityContext: fsGroup: 1001 initContainers: @@ -443,7 +444,8 @@ spec: app.kubernetes.io/name: postgres app.kubernetes.io/instance: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa securityContext: fsGroup: 999 initContainers: diff --git a/charts/devtron/templates/scoop.yaml b/charts/devtron/templates/scoop.yaml new file mode 100644 index 00000000000..53a7587db1e --- /dev/null +++ b/charts/devtron/templates/scoop.yaml @@ -0,0 +1,169 @@ +{{- if and .Values.devtronEnterprise.enabled .Values.devtronEnterprise.scoop.enabled }} +{{- with .Values.devtronEnterprise.scoop }} +{{- $passKey := randAlphaNum 12 | lower }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: scoop-devtron + namespace: devtroncd + labels: + app: scoop +spec: + minReadySeconds: 60 + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: scoop + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: scoop + spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.devtronEnterprise.scoop.nodeSelector "tolerations" $.Values.devtronEnterprise.scoop.tolerations "imagePullSecrets" $.Values.devtronEnterprise.scoop.imagePullSecrets "global" $.Values.global) | indent 6 }} + terminationGracePeriodSeconds: 30 + restartPolicy: Always + schedulerName: default-scheduler + serviceAccountName: sa-scoop + containers: + - name: scoop + image: {{ include "common.image" (dict "component" $.Values.devtronEnterprise.scoop "global" $.Values.global) }} + {{- if .imagePullPolicy }} + imagePullPolicy: {{ .imagePullPolicy }} + {{- end }} + {{- if and $.Values.global $.Values.global.containerSecurityContext }} + securityContext: +{{- toYaml $.Values.global.containerSecurityContext | nindent 12 }} + {{- end }} + env: + - name: X-PASS-KEY + value: qhihdidhwid + - name: PASS_KEY + value: qhihdidhwid + - name: RETENTION + value: "10080" + - name: TOKEN + valueFrom: + secretKeyRef: + name: devtron-secret + key: ORCH_TOKEN + envFrom: + - configMapRef: + name: scoop-cm + ports: + - containerPort: 8080 + name: app + protocol: TCP + {{- if .resources }} + resources: + {{- toYaml .resources | nindent 12 }} + {{- end }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File +--- +# Scoop-service +apiVersion: v1 +kind: Service +metadata: + labels: + app: scoop + name: scoop-service + namespace: devtroncd +spec: + ports: + - name: app + port: 80 + protocol: TCP + targetPort: app + selector: + app: scoop + sessionAffinity: None + type: ClusterIP + +--- +# Scoop ConfigMap +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: scoop + name: scoop-cm + namespace: devtroncd +{{- if .configs }} +data: +{{ toYaml .configs | indent 2 }} +{{- end }} + +--- +# Scoop ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + app.kubernetes.io/instance: devtron + name: read-only-cluster-role-scoop +rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - "*" + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - "*" + verbs: + - get + - list + - watch + +--- +# Scoop ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-only-user-crb-scoop + annotations: + "helm.sh/resource-policy": keep +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: read-only-cluster-role-scoop +subjects: + - kind: ServiceAccount + name: sa-scoop + namespace: devtroncd + +--- +# Scoop ServiceAccount +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sa-scoop + namespace: devtroncd + labels: + app: scoop + annotations: + "helm.sh/resource-policy": keep +{{- end }} +{{- end }} diff --git a/charts/devtron/templates/workflow.yaml b/charts/devtron/templates/workflow.yaml index 61e38396607..e20c28be175 100644 --- a/charts/devtron/templates/workflow.yaml +++ b/charts/devtron/templates/workflow.yaml @@ -37,6 +37,29 @@ metadata: annotations: "helm.sh/hook": pre-install "helm.sh/resource-policy": keep +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: chart-sync + namespace: devtroncd + labels: + release: devtron +{{- if $.Values.components.chartSync.imagePullSecrets }} +imagePullSecrets: +{{ toYaml .Values.components.chartSync.imagePullSecrets | indent 2 }} +{{- else if $.Values.global.imagePullSecrets }} +imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 2 }} +{{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: devtron-default-sa + namespace: devtroncd + labels: + release: devtron {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} --- @@ -1270,7 +1293,7 @@ spec: labels: app: workflow-controller spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.workflowController.nodeSelector "tolerations" $.Values.workflowController.tolerations "imagePullSecrets" $.Values.workflowController.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.workflowController.nodeSelector "tolerations" $.Values.workflowController.tolerations "imagePullSecrets" $.Values.workflowController.imagePullSecrets "global" $.Values.global) | indent 6 }} containers: - args: - --configmap diff --git a/charts/devtron/values.yaml b/charts/devtron/values.yaml index 19f6854f385..f8b39288472 100644 --- a/charts/devtron/values.yaml +++ b/charts/devtron/values.yaml @@ -9,13 +9,22 @@ global: runAsNonRoot: true containerRegistry: "quay.io/devtron" # The below values can be specified both at global as well as component level + # nodeSelector: + # key: value + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "value1" + # effect: "NoSchedule" + # imagePullSecrets: + # - name: your-image-pull-secret nodeSelector: {} tolerations: [] imagePullSecrets: [] extraManifests: [] installer: repo: "devtron-labs/devtron" - release: "v0.7.1" + release: "v0.7.2" registry: "" image: inception tag: 473deaa4-185-21582 @@ -24,7 +33,6 @@ installer: openshift: false # Set this to true if you are installing on openshift production_overrides: "" # Set true if you want to use this Devtron stack in Production (This will require more resources) # Change the below values for full mode only - #Use secrets in plaintext, they'll be encoded to base64 automatically. secrets: {} # REQUIRED IF BLOB_STORAGE_PROVIDER=AZURE Token with read write access to AZURE_BLOB_CONTAINER_CI_LOG and AZURE_BLOB_CONTAINER_CI_CACHE @@ -61,16 +69,20 @@ components: ENABLE_CI_JOB: "true" GLOBAL_API_TIMEOUT: "60000" TRIGGER_API_TIMEOUT: "60000" - ENABLE_EXTERNAL_ARGO_CD: "false" + ENABLE_EXTERNAL_ARGO_CD: "true" SERVICE_WORKER_TIMEOUT: "1" API_BATCH_SIZE: "30" + FEATURE_EXTERNAL_FLUX_CD_ENABLE: "true" + FEATURE_STEP_WISE_LOGS_ENABLE: "true" + FEATURE_USER_DEFINED_GITOPS_REPO_ENABLE: "true" + ENABLE_RESOURCE_SCAN: "true" registry: "" - image: "dashboard:5f95d187-690-23841" + image: "dashboard:215319c7-690-25536" imagePullPolicy: IfNotPresent devtron: registry: "" - image: "hyperion:291c4c75-280-23860" - cicdImage: "devtron:291c4c75-434-23853" + image: "hyperion:3f68456b-280-25566" + cicdImage: "devtron:3f68456b-434-25567" imagePullPolicy: IfNotPresent customOverrides: {} serviceMonitor: @@ -96,7 +108,7 @@ components: # - devtron.example.com ciRunner: registry: "" - image: "ci-runner:48aca9f4-138-23844" + image: "ci-runner:fd5702db-138-25483" argocdDexServer: registry: "" image: "dex:v2.30.2" @@ -105,7 +117,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:0dee6306-564-23843" + image: "kubelink:6ef0fbbe-564-25533" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" @@ -123,7 +135,7 @@ components: keyName: postgresql-password kubewatch: registry: "" - image: "kubewatch:850b40d5-419-23840" + image: "kubewatch:7c8611f4-419-25531" imagePullPolicy: IfNotPresent configs: devtroncd_NAMESPACE: "devtron-ci" @@ -145,7 +157,7 @@ components: volumeSize: "20Gi" gitsensor: registry: "" - image: "git-sensor:86e13283-200-23847" + image: "git-sensor:5b9cf0ec-200-25481" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -163,7 +175,7 @@ components: # Values for lens lens: registry: "" - image: "lens:56211042-333-23839" + image: "lens:9db8a2fb-333-25482" imagePullPolicy: IfNotPresent secrets: {} resources: {} @@ -210,7 +222,7 @@ components: DB_NAME: "lens" chartSync: registry: "" - image: chart-sync:5a1d0301-150-23845 + image: chart-sync:13ffae06-150-25515 # values for argocd integration argo-cd: enabled: false @@ -251,13 +263,7 @@ argo-cd: - all readOnlyRootFilesystem: true runAsNonRoot: true - env: - - name: ARGOCD_RECONCILIATION_TIMEOUT - valueFrom: - configMapKeyRef: - key: timeout.reconciliation - name: argocd-cm - optional: true + env: [] affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -347,12 +353,6 @@ argo-cd: - --parallelismlimit - "50" env: - - name: ARGOCD_RECONCILIATION_TIMEOUT - valueFrom: - configMapKeyRef: - key: timeout.reconciliation - name: argocd-cm - optional: true - name: ARGOCD_EXEC_TIMEOUT value: 180s containerSecurityContext: @@ -370,7 +370,7 @@ argo-cd: security: enabled: false imageScanner: - image: "image-scanner:137872c2-141-23848" + image: "image-scanner:348201f8-141-25486" # Values for trivy trivy: enabled: false @@ -385,7 +385,7 @@ security: notifier: enabled: false imagePullPolicy: IfNotPresent - image: "notifier:9639b1ab-372-23850" + image: "notifier:06392394-372-25535" configs: CD_ENVIRONMENT: PROD DB: orchestrator @@ -436,3 +436,28 @@ monitoring: resources: {} persistence: storage: "2Gi" +# Change these values for Devtron-Enterprise +devtronEnterprise: + enabled: false + casbin: + registry: "" + image: "casbin:efc28fb2-6de0e914-462-25420" + imagePullPolicy: IfNotPresent + configs: + PG_ADDR: postgresql-postgresql.devtroncd + PG_DATABASE: casbin + PG_PORT: "5432" + PG_USER: postgres + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password + resources: {} + scoop: + enabled: false + registry: "" + image: "scoop:296d351d-629-24001" + imagePullPolicy: IfNotPresent + resources: {} + configs: + CLUSTER_ID: "1" + ORCHESTRATOR_URL: http://devtron-service.devtroncd.svc.cluster.local/orchestrator diff --git a/manifests/install/devtron-installer.yaml b/manifests/install/devtron-installer.yaml index f0bb4839be2..c13839c9b9e 100644 --- a/manifests/install/devtron-installer.yaml +++ b/manifests/install/devtron-installer.yaml @@ -4,4 +4,4 @@ metadata: name: installer-devtron namespace: devtroncd spec: - url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.7.1/manifests/installation-script + url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.7.2/manifests/installation-script diff --git a/manifests/installation-script b/manifests/installation-script index fe6032030c7..b1c4d67d2f6 100644 --- a/manifests/installation-script +++ b/manifests/installation-script @@ -1,4 +1,4 @@ -LTAG="v0.7.1"; +LTAG="v0.7.2"; REPO_RAW_URL="https://raw.githubusercontent.com/devtron-labs/devtron/"; log("executed devtron setup installation"); diff --git a/manifests/release.txt b/manifests/release.txt index d1895904915..8e6c98d5c6d 100644 --- a/manifests/release.txt +++ b/manifests/release.txt @@ -1 +1 @@ -stable -1 v0.7.1 +stable -1 v0.7.2 diff --git a/manifests/yamls/dashboard.yaml b/manifests/yamls/dashboard.yaml index ae5a449c43b..6f05ac96502 100644 --- a/manifests/yamls/dashboard.yaml +++ b/manifests/yamls/dashboard.yaml @@ -235,7 +235,7 @@ spec: - name: envoy-config-volume mountPath: /etc/envoy-config/ - name: dashboard - image: "quay.io/devtron/dashboard:5f95d187-690-23841" + image: "quay.io/devtron/dashboard:215319c7-690-25536" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/devtron.yaml b/manifests/yamls/devtron.yaml index ac0833faa66..6ccd9b8cc09 100644 --- a/manifests/yamls/devtron.yaml +++ b/manifests/yamls/devtron.yaml @@ -53,7 +53,7 @@ data: CD_NODE_TAINTS_VALUE: "ci" CD_ARTIFACT_LOCATION_FORMAT: "%d/%d.zip" DEFAULT_CD_NAMESPACE: "devtron-cd" - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:48aca9f4-138-23844" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:fd5702db-138-25483" DEFAULT_CD_TIMEOUT: "3600" WF_CONTROLLER_INSTANCE_ID: "devtron-runner" CI_LOGS_KEY_PREFIX: "ci-artifacts" @@ -89,7 +89,7 @@ data: ENFORCER_CACHE: "true" ENFORCER_CACHE_EXPIRATION_IN_SEC: "345600" ENFORCER_MAX_BATCH_SIZE: "1" - APP_SYNC_IMAGE: "quay.io/devtron/chart-sync:5a1d0301-150-23845" + APP_SYNC_IMAGE: "quay.io/devtron/chart-sync:13ffae06-150-25515" DEVTRON_SECRET_NAME: "devtron-secret" GIT_SENSOR_PROTOCOL: GRPC GIT_SENSOR_URL: git-sensor-service.devtroncd:90 @@ -169,7 +169,7 @@ spec: runAsUser: 1000 containers: - name: devtron - image: "quay.io/devtron/devtron:291c4c75-434-23853" + image: "quay.io/devtron/devtron:b5a2f8ba-434-25563" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/gitsensor.yaml b/manifests/yamls/gitsensor.yaml index e16b5199343..9c19b458858 100644 --- a/manifests/yamls/gitsensor.yaml +++ b/manifests/yamls/gitsensor.yaml @@ -67,7 +67,7 @@ spec: - /bin/sh - -c - mkdir -p /git-base/ssh-keys && chown -R devtron:devtron /git-base && chmod 777 /git-base/ssh-keys - image: "quay.io/devtron/git-sensor:86e13283-200-23847" + image: "quay.io/devtron/git-sensor:5b9cf0ec-200-25481" imagePullPolicy: IfNotPresent name: chown-git-base resources: {} @@ -80,7 +80,7 @@ spec: name: git-volume containers: - name: git-sensor - image: "quay.io/devtron/git-sensor:86e13283-200-23847" + image: "quay.io/devtron/git-sensor:5b9cf0ec-200-25481" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/image-scanner.yaml b/manifests/yamls/image-scanner.yaml index 9c8a06e861f..61344fddbe1 100644 --- a/manifests/yamls/image-scanner.yaml +++ b/manifests/yamls/image-scanner.yaml @@ -73,7 +73,7 @@ spec: runAsUser: 1000 containers: - name: image-scanner - image: "quay.io/devtron/image-scanner:137872c2-141-23848" + image: "quay.io/devtron/image-scanner:348201f8-141-25486" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/kubelink.yaml b/manifests/yamls/kubelink.yaml index 21531cf24c7..6502a2ff93c 100644 --- a/manifests/yamls/kubelink.yaml +++ b/manifests/yamls/kubelink.yaml @@ -25,7 +25,7 @@ spec: runAsUser: 1000 containers: - name: kubelink - image: "quay.io/devtron/kubelink:0dee6306-564-23843" + image: "quay.io/devtron/kubelink:6ef0fbbe-564-25533" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/kubewatch.yaml b/manifests/yamls/kubewatch.yaml index e00be3131aa..2fabe230ef4 100644 --- a/manifests/yamls/kubewatch.yaml +++ b/manifests/yamls/kubewatch.yaml @@ -164,7 +164,7 @@ spec: runAsUser: 1000 containers: - name: kubewatch - image: "quay.io/devtron/kubewatch:850b40d5-419-23840" + image: "quay.io/devtron/kubewatch:7c8611f4-419-25531" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/lens.yaml b/manifests/yamls/lens.yaml index dc92100db6c..fd2b4c4e393 100644 --- a/manifests/yamls/lens.yaml +++ b/manifests/yamls/lens.yaml @@ -71,7 +71,7 @@ spec: runAsUser: 1000 containers: - name: lens - image: "quay.io/devtron/lens:56211042-333-23839" + image: "quay.io/devtron/lens:9db8a2fb-333-25482" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/notifier.yaml b/manifests/yamls/notifier.yaml index e182739a9ea..437b2938538 100644 --- a/manifests/yamls/notifier.yaml +++ b/manifests/yamls/notifier.yaml @@ -66,7 +66,7 @@ spec: restartPolicy: Always containers: - name: notifier - image: quay.io/devtron/notifier:9639b1ab-372-23850" + image: quay.io/devtron/notifier:06392394-372-25535" imagePullPolicy: IfNotPresent ports: - name: app diff --git a/releasenotes.md b/releasenotes.md index 8e333d8e676..e78a01c4176 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -1,59 +1,127 @@ -## v0.7.1 +## v0.7.2 ## Bugs -- fix: EA mode wire fix (#5462) -- fix: compare manifest fixes (#5430) -- fix: override clusterRbac with direct allow behaviour for super admin (#5449) -- fix: external helm app when linked to devtron and page breaks while adding project to it, without switching back to applist (#5443) -- fix: empty the code and image scan script (#5434) -- fix: K8s Resource list RBAC ignore for Superadmin (#5415) -- fix: repo url and name handling with argocd (#5445) -- fix: fix for terminal disconnect issue when custom transport is being used (#5436) -- fix: gitops async failed for git cli mode in concurrent cases (#5412) -- fix: Updating pr-issue-validator-script (#5384) -- fix: optimised FetchLatestDeploymentWithChartRefs query (#5393) -- fix: nats consumer deleted on shutdown (#5377) -- fix: panic issue in get/ download pod logs api (#5342) -- fix: encountering panic in application groups in build and deploy page (#5330) -- fix: chart group rbac issue (#5183) -- fix: Multiple choice option for namespace in Kubernetes resource permission (#5293) -- fix: restart workloads fix in app group (#5313) -- fix: deployment chart fix (#5215) -- fix: docker file version fix (#5299) -- fix: hibernating status is not being updated in app listing page (#5294) +- fix: error in enable change ci (#5358) +- fix: ci patch rbac fixes (#5461) +- fix: bitbucket commit race condition for concurrent requests (#5505) +- fix: handle nil check image scanning (#5497) +- fix: error in switching ci to external ci (#5500) +- fix: autoscale error handling (#5481) +- fix: ci material update fixes for linked ci pipelines (#5523) +- fix: Unable to get HPA manifest for no-gitops deployment (#5522) +- fix: Deployment stuck in starting for no-gitops based pipelines (#5526) +- fix: panic handling for deleted app in app group and env group filters (#5541) +- fix: security time fix when scanning is passed (#5549) +- fix: app group query optimisations (#5558) +- fix: version and fixed_version in image scan result table (#5552) +- fix: add if not exists in migration script for avoiding any errors while rerunning scripts (#5579) +- fix: Resource Browser Shortnames are not applying dynamically (#5573) +- fix: tls enabled flag not getting passed (#5609) +- fix: reverting acd token fetch logic (#5614) +- fix: query optimisations for app group cd listing and ci pipeline blockage state (#5641) +- fix: dependabot security updates (#5608) +- fix: default PipelineType given (#5668) +- fix: validation in CiJob for external Artifact (#5669) +- fix: Nats Panic Error in Orchestrator (#5670) +- fix: SSH & Proxy Cluster flows broken (#5675) +- fix: Restart in orchestrator just after release (#5671) +- fix: Sql query optimisation for application group app status listing (#5672) +- fix: handling for HPA (autoscaling) (#5666) +- fix: refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) +- fix: Decode secret fix on add update oss (#5695) +- fix: saving pco concurrency case handled (#5688) +- fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) +- fix: Issue in EA Mode Cluster - error: pg: multiple rows in result set. (#5708) +- fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) +- fix: migration syn (#5718) +- fix: ci patch rbac for branch update (#5759) +- fix: Bitnami chart repo tls issue (#5740) +- fix: check rbac on env if envName is present (#5765) +- fix: scan tool active check removed (#5771) +- fix: panic handlings and argocd app delete stuck in partial stage (#5770) +- fix: unimplemented cluster cron service (#5781) +- fix: sql injection fixes (#5783) +- fix: sql injection fixes (#5801) +- fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) +- fix: Ea rbac issues and not working on airgapped (#5813) +- fix: scan list in global security page sql injection fix (#5808) +- fix: app details page breaking (#5823) +- fix: plugin ip variables value getting changed (#5844) +- fix: ignore kubelink errors in server startup (#5852) (#5854) +- fix: user rbac flows (#5804) +- fix: pg multiple rows in EA mode (#5869) +- fix: app overview panic for helm app (#5863) +- fix: app detail page breaking (#5873) +- fix: copy container image plugin issue (#5876) +- fix: create GitOps configuration issue (#5883) ## Enhancements -- feat: Checking multiarchitecture of images (#5232) -- feat: updated kubelink grpc client cfg (#5426) -- feat: Integration of Cranecopy plugin (#5131) -- feat: casbin upgraded to v2 (#5329) -- feat: new scripts added for rescan sbom support , helm manifest scan flag and git container links (#5406) -- feat: Reload materials api added (#5182) -- feat: mirgator plugin (#5347) -- feat: insecure support for chart-sync (#5328) -- feat: GitOps async install for devtron applications (#5169) -- feat: chart ref schema db migration (#5319) -- feat: Up and Down Script for BitBucket Plugin v1.0.0 (#4949) -- feat: Added statefulset chart 5.1.0 (#5199) -- feat: air gap registry v2 (#5220) -- feat: tenants and installations migration (#5187) +- feat: support for handling hibernation and un-hibernation for keda enabled (#5431) +- feat: Async ArgoCd App refresh operation (#5448) +- feat: deployment config migration (#5368) +- feat: Skipping falg based CMCS for Ci Job (#5536) +- feat: expose git commit data as env vars for ci stage (#5534) +- feat: Defining applications as part of release track (#5489) +- feat: gitlab webhook support (#5420) +- feat: Enhance the buildx to use cache for multi arch builds (#5307) +- feat: bug fix for picking wrong values in docker arguments (#5565) +- feat: enable external argocd listing (#5585) +- feat: plugin versioning feature (#5352) +- feat: service account in chart sync (#5584) +- feat: panic in sync pod cron and terminal not opening fix (#5603) +- feat: tls support for git and gitops (#5305) +- feat: system network controller sql script (#5637) +- feat: skip argowf logs from ci logs (#5646) +- feat: gitops support for oci repositories (#5577) +- feat: ext argo app rbac and missing common features and flux app listing and details with rbac (#5528) +- feat: expose git ops metrics (#5582) +- feat: Generate config and secret hash for application mounting external k8s secrets (#5626) +- feat: Env description handling (#5744) +- feat: Added basic auth support for servicemonitor (#5761) +- feat: Docker pull env driven (#5767) +- feat: plugin creation support (#5630) +- feat: Added multiple features support in servicemonitor (#5789) ## Documentation -- doc: Blob Storage Redirection + Other Fixes (#5432) -- doc: Added migration steps for 0.6 to 0.7 upgrade (#5411) -- doc: Created Deployment Window Draft (#4800) -- doc: Redirection Fix for User Permissions Doc + Other Fixes (#5382) -- doc: Redirection Fixes for 0.7 (#5381) -- doc: Redirection Issue Trial Fix (#5378) -- doc: Plugin Creation Doc (#5372) -- docs: Added specs for the global plugin Apis (#5362) -- docs: Fixes + Corrections in Docs (#5335) -- docs: fixed broken link in readme (#5337) -- docs: removed users (#5324) -- docs: Created a file for listing Devtron Users (#5310) +- doc: Added FAQ no. 28 + GoLang-migrate Link + Code Block Fix (#5502) +- docs: Drafted Software Distribution Hub (#5459) +- doc: Created Image Label + Comments Doc (#5314) +- doc: FAQ added for Bitnami Charts (#5545) +- doc: Added Keycloak SSO Doc (#5571) +- doc: Code scan plugin docs (#5562) +- docs: jenkins-plugin (#5542) +- doc: Copacetic plugin docs (#5564) +- doc: Pull images from container repository (#5563) +- doc: Collated Doc Fixes for July (#5591) +- doc: Drafted Schema Driven DT (#5533) +- doc: fixes in Copacetic plugin doc (#5622) +- doc: Edit Deployment Chart Schema (#5735) +- doc: Redirection of old entry in gitbook.yaml (#5738) +- docs: added Documentation for Air-Gapped Installation (#5360) +- doc: Update prerequisites of code-scan (#5625) +- doc: Cosign plugin doc (#5665) +- doc: CraneCopy plugin doc (#5658) +- doc: Devtron CD Trigger Plugin doc (#5747) +- doc: DockerSlim plugin doc (#5660) +- doc: Devtron Job Trigger Plugin doc (#5742) +- doc: Vulnerability Scanning Plugin doc (#5722) +- docs: Jira plugins doc (Validator + Updater) (#5709) +- docs: added commands enable ingress during helm installation (#5794) +- doc: Revamped + Restructured Ingress Setup Doc (#5798) +- docs: modifying route in ingress doc (#5799) +- docs: modified the anchorlink in ingress.md (#5800) +- doc: ArgoCD + FluxCD App Listing (#5636) +- doc: Added Special CEL Expr in Filter Condition doc (#5850) ## Others -- chore: common-lib upgrade for nats replicas (#5446) -- chore: migration for gitops config (#5383) -- chore: update common-lib tag version (#5333) -- chore: updated go version in EA dockerfile (#5327) - - - +- misc: removal of azure-devops-issue-sync.yml (#5592) +- misc: added action for discrod webhook (#5615) +- misc: Revert "misc: added action for discrod webhook" (#5619) +- chore: Plugin script fix oss (#5661) +- misc: Release candidate v0.16.0 (#5687) +- chore: migration number changes (#5692) +- chore: ea fixes for helm app (#5713) +- misc: Main sync rc - branch update (#5753) +- chore: Revert "feat: plugin creation support" (#5778) +- chore: cron status update refactoring (#5790) +- misc: sync with common-lib changes with release candidate 18 (#5830) +- chore: Custom tag for copy container image plugin (#5760) (#5841) +- chore: migration number fix (#5840) +- misc: Update CODEOWNERS (#5885) From 508a70b0d1c52d9a998dcc9fe0e0a8cbf37801c4 Mon Sep 17 00:00:00 2001 From: Ash-exp Date: Tue, 24 Sep 2024 13:11:53 +0530 Subject: [PATCH 61/61] chore: OSS main branch sync --- .github/CODEOWNERS | 4 - .github/workflows/update-release-notes.yml | 1 + CHANGELOG/release-notes-v0.7.2.md | 127 +++++++++++++ api/appStore/InstalledAppRestHandler.go | 7 + charts/devtron/Chart.yaml | 4 +- charts/devtron/devtron-bom.yaml | 58 ++++-- charts/devtron/templates/_helpers.tpl | 10 +- charts/devtron/templates/app-sync-job.yaml | 5 +- charts/devtron/templates/casbin.yaml | 125 +++++++++++++ .../devtron/templates/configmap-secret.yaml | 56 +++++- charts/devtron/templates/dashboard.yaml | 3 +- charts/devtron/templates/devtron-scc.yaml | 2 + charts/devtron/templates/devtron.yaml | 40 +++- charts/devtron/templates/dex.yaml | 2 +- charts/devtron/templates/gitsensor.yaml | 3 +- charts/devtron/templates/grafana.yaml | 9 +- charts/devtron/templates/install.yaml | 1 + charts/devtron/templates/kubelink.yaml | 2 +- charts/devtron/templates/kubewatch.yaml | 2 +- charts/devtron/templates/lens.yaml | 3 +- charts/devtron/templates/migrator.yaml | 16 +- charts/devtron/templates/minio.yaml | 9 +- charts/devtron/templates/nats-server.yaml | 9 +- charts/devtron/templates/notifier.yaml | 3 +- charts/devtron/templates/postgresql.yaml | 6 +- charts/devtron/templates/scoop.yaml | 169 +++++++++++++++++ charts/devtron/templates/workflow.yaml | 25 ++- charts/devtron/values.yaml | 79 +++++--- cmd/external-app/wire_gen.go | 2 +- docs/reference/glossary.md | 12 ++ docs/user-guide/applications.md | 144 +++++++++++++- .../global-configurations/filter-condition.md | 34 +++- .../sql/repository/AppListingRepository.go | 1 - manifests/install/devtron-installer.yaml | 2 +- manifests/installation-script | 2 +- manifests/release.txt | 2 +- manifests/yamls/dashboard.yaml | 2 +- manifests/yamls/devtron.yaml | 6 +- manifests/yamls/gitsensor.yaml | 4 +- manifests/yamls/image-scanner.yaml | 2 +- manifests/yamls/kubelink.yaml | 2 +- manifests/yamls/kubewatch.yaml | 2 +- manifests/yamls/lens.yaml | 2 +- manifests/yamls/notifier.yaml | 2 +- .../in/WorkflowEventProcessorService.go | 4 +- pkg/workflow/dag/WorkflowDagExecutor.go | 2 +- releasenotes.md | 176 ++++++++++++------ wire_gen.go | 2 +- 48 files changed, 1018 insertions(+), 167 deletions(-) create mode 100644 CHANGELOG/release-notes-v0.7.2.md create mode 100644 charts/devtron/templates/casbin.yaml create mode 100644 charts/devtron/templates/scoop.yaml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 87391528dde..823dfd25445 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,10 +1,6 @@ #ALL * @vikramdevtron @kripanshdevtron @nishant-d @prakarsh-dt -#DOCS -docs/ @ashokdevtron @uxarya-d @prakarsh-dt -.gitbook.yaml @uxarya-d @prakarsh-dt - #Helm Charts charts/devtron/ @prakarsh-dt @pawan-mehta-dt @nishant-d scripts/devtron-reference-helm-charts @prakarsh-dt @pawan-mehta-dt @nishant-d diff --git a/.github/workflows/update-release-notes.yml b/.github/workflows/update-release-notes.yml index ed6d35fdac6..b2641ec3970 100644 --- a/.github/workflows/update-release-notes.yml +++ b/.github/workflows/update-release-notes.yml @@ -7,6 +7,7 @@ on: - closed branches: - main + - develop # Allows you to run this workflow manually from the Actions tab workflow_dispatch: diff --git a/CHANGELOG/release-notes-v0.7.2.md b/CHANGELOG/release-notes-v0.7.2.md new file mode 100644 index 00000000000..e78a01c4176 --- /dev/null +++ b/CHANGELOG/release-notes-v0.7.2.md @@ -0,0 +1,127 @@ +## v0.7.2 + +## Bugs +- fix: error in enable change ci (#5358) +- fix: ci patch rbac fixes (#5461) +- fix: bitbucket commit race condition for concurrent requests (#5505) +- fix: handle nil check image scanning (#5497) +- fix: error in switching ci to external ci (#5500) +- fix: autoscale error handling (#5481) +- fix: ci material update fixes for linked ci pipelines (#5523) +- fix: Unable to get HPA manifest for no-gitops deployment (#5522) +- fix: Deployment stuck in starting for no-gitops based pipelines (#5526) +- fix: panic handling for deleted app in app group and env group filters (#5541) +- fix: security time fix when scanning is passed (#5549) +- fix: app group query optimisations (#5558) +- fix: version and fixed_version in image scan result table (#5552) +- fix: add if not exists in migration script for avoiding any errors while rerunning scripts (#5579) +- fix: Resource Browser Shortnames are not applying dynamically (#5573) +- fix: tls enabled flag not getting passed (#5609) +- fix: reverting acd token fetch logic (#5614) +- fix: query optimisations for app group cd listing and ci pipeline blockage state (#5641) +- fix: dependabot security updates (#5608) +- fix: default PipelineType given (#5668) +- fix: validation in CiJob for external Artifact (#5669) +- fix: Nats Panic Error in Orchestrator (#5670) +- fix: SSH & Proxy Cluster flows broken (#5675) +- fix: Restart in orchestrator just after release (#5671) +- fix: Sql query optimisation for application group app status listing (#5672) +- fix: handling for HPA (autoscaling) (#5666) +- fix: refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) +- fix: Decode secret fix on add update oss (#5695) +- fix: saving pco concurrency case handled (#5688) +- fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) +- fix: Issue in EA Mode Cluster - error: pg: multiple rows in result set. (#5708) +- fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) +- fix: migration syn (#5718) +- fix: ci patch rbac for branch update (#5759) +- fix: Bitnami chart repo tls issue (#5740) +- fix: check rbac on env if envName is present (#5765) +- fix: scan tool active check removed (#5771) +- fix: panic handlings and argocd app delete stuck in partial stage (#5770) +- fix: unimplemented cluster cron service (#5781) +- fix: sql injection fixes (#5783) +- fix: sql injection fixes (#5801) +- fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) +- fix: Ea rbac issues and not working on airgapped (#5813) +- fix: scan list in global security page sql injection fix (#5808) +- fix: app details page breaking (#5823) +- fix: plugin ip variables value getting changed (#5844) +- fix: ignore kubelink errors in server startup (#5852) (#5854) +- fix: user rbac flows (#5804) +- fix: pg multiple rows in EA mode (#5869) +- fix: app overview panic for helm app (#5863) +- fix: app detail page breaking (#5873) +- fix: copy container image plugin issue (#5876) +- fix: create GitOps configuration issue (#5883) +## Enhancements +- feat: support for handling hibernation and un-hibernation for keda enabled (#5431) +- feat: Async ArgoCd App refresh operation (#5448) +- feat: deployment config migration (#5368) +- feat: Skipping falg based CMCS for Ci Job (#5536) +- feat: expose git commit data as env vars for ci stage (#5534) +- feat: Defining applications as part of release track (#5489) +- feat: gitlab webhook support (#5420) +- feat: Enhance the buildx to use cache for multi arch builds (#5307) +- feat: bug fix for picking wrong values in docker arguments (#5565) +- feat: enable external argocd listing (#5585) +- feat: plugin versioning feature (#5352) +- feat: service account in chart sync (#5584) +- feat: panic in sync pod cron and terminal not opening fix (#5603) +- feat: tls support for git and gitops (#5305) +- feat: system network controller sql script (#5637) +- feat: skip argowf logs from ci logs (#5646) +- feat: gitops support for oci repositories (#5577) +- feat: ext argo app rbac and missing common features and flux app listing and details with rbac (#5528) +- feat: expose git ops metrics (#5582) +- feat: Generate config and secret hash for application mounting external k8s secrets (#5626) +- feat: Env description handling (#5744) +- feat: Added basic auth support for servicemonitor (#5761) +- feat: Docker pull env driven (#5767) +- feat: plugin creation support (#5630) +- feat: Added multiple features support in servicemonitor (#5789) +## Documentation +- doc: Added FAQ no. 28 + GoLang-migrate Link + Code Block Fix (#5502) +- docs: Drafted Software Distribution Hub (#5459) +- doc: Created Image Label + Comments Doc (#5314) +- doc: FAQ added for Bitnami Charts (#5545) +- doc: Added Keycloak SSO Doc (#5571) +- doc: Code scan plugin docs (#5562) +- docs: jenkins-plugin (#5542) +- doc: Copacetic plugin docs (#5564) +- doc: Pull images from container repository (#5563) +- doc: Collated Doc Fixes for July (#5591) +- doc: Drafted Schema Driven DT (#5533) +- doc: fixes in Copacetic plugin doc (#5622) +- doc: Edit Deployment Chart Schema (#5735) +- doc: Redirection of old entry in gitbook.yaml (#5738) +- docs: added Documentation for Air-Gapped Installation (#5360) +- doc: Update prerequisites of code-scan (#5625) +- doc: Cosign plugin doc (#5665) +- doc: CraneCopy plugin doc (#5658) +- doc: Devtron CD Trigger Plugin doc (#5747) +- doc: DockerSlim plugin doc (#5660) +- doc: Devtron Job Trigger Plugin doc (#5742) +- doc: Vulnerability Scanning Plugin doc (#5722) +- docs: Jira plugins doc (Validator + Updater) (#5709) +- docs: added commands enable ingress during helm installation (#5794) +- doc: Revamped + Restructured Ingress Setup Doc (#5798) +- docs: modifying route in ingress doc (#5799) +- docs: modified the anchorlink in ingress.md (#5800) +- doc: ArgoCD + FluxCD App Listing (#5636) +- doc: Added Special CEL Expr in Filter Condition doc (#5850) +## Others +- misc: removal of azure-devops-issue-sync.yml (#5592) +- misc: added action for discrod webhook (#5615) +- misc: Revert "misc: added action for discrod webhook" (#5619) +- chore: Plugin script fix oss (#5661) +- misc: Release candidate v0.16.0 (#5687) +- chore: migration number changes (#5692) +- chore: ea fixes for helm app (#5713) +- misc: Main sync rc - branch update (#5753) +- chore: Revert "feat: plugin creation support" (#5778) +- chore: cron status update refactoring (#5790) +- misc: sync with common-lib changes with release candidate 18 (#5830) +- chore: Custom tag for copy container image plugin (#5760) (#5841) +- chore: migration number fix (#5840) +- misc: Update CODEOWNERS (#5885) diff --git a/api/appStore/InstalledAppRestHandler.go b/api/appStore/InstalledAppRestHandler.go index a7653401191..ffb3f0463ca 100644 --- a/api/appStore/InstalledAppRestHandler.go +++ b/api/appStore/InstalledAppRestHandler.go @@ -148,6 +148,13 @@ func (handler *InstalledAppRestHandlerImpl) FetchAppOverview(w http.ResponseWrit token := r.Header.Get("token") handler.Logger.Infow("request payload, FindAppOverview", "installedAppId", installedAppId) installedApp, err := handler.installedAppService.GetInstalledAppById(installedAppId) + if err != nil && err != pg.ErrNoRows { + common.WriteJsonResp(w, err, nil, http.StatusInternalServerError) + return + } else if err == pg.ErrNoRows || installedApp == nil { + common.WriteJsonResp(w, errors.New("helm app doses not exist"), nil, http.StatusNotFound) + return + } appOverview, err := handler.appCrudOperationService.GetAppMetaInfo(installedApp.AppId, installedAppId, installedApp.EnvironmentId) if err != nil { handler.Logger.Errorw("service err, FetchAppOverview", "err", err, "appId", installedApp.AppId, "installedAppId", installedAppId) diff --git a/charts/devtron/Chart.yaml b/charts/devtron/Chart.yaml index 9f5318f6307..4c50dadb084 100644 --- a/charts/devtron/Chart.yaml +++ b/charts/devtron/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: devtron-operator -appVersion: 0.7.1 +appVersion: 0.7.2 description: Chart to configure and install Devtron. Devtron is a Kubernetes Orchestration system. keywords: - Devtron @@ -11,7 +11,7 @@ keywords: - argocd - Hyperion engine: gotpl -version: 0.22.73 +version: 0.22.74 sources: - https://github.com/devtron-labs/charts dependencies: diff --git a/charts/devtron/devtron-bom.yaml b/charts/devtron/devtron-bom.yaml index 6e35b6fc0ab..9f96cb713f4 100644 --- a/charts/devtron/devtron-bom.yaml +++ b/charts/devtron/devtron-bom.yaml @@ -8,13 +8,9 @@ global: runAsUser: 1000 runAsNonRoot: true containerRegistry: "quay.io/devtron" - # The below values can be specified both at global as well as component level - nodeSelector: {} - tolerations: [] - imagePullSecrets: [] extraManifests: [] installer: - release: "v0.7.1" + release: "v0.7.2" registry: "" image: "inception" tag: "473deaa4-185-21582" @@ -30,21 +26,25 @@ components: ENABLE_CI_JOB: "true" GLOBAL_API_TIMEOUT: "60000" TRIGGER_API_TIMEOUT: "60000" - ENABLE_EXTERNAL_ARGO_CD: "false" + ENABLE_EXTERNAL_ARGO_CD: "true" SERVICE_WORKER_TIMEOUT: "1" API_BATCH_SIZE: "30" + FEATURE_EXTERNAL_FLUX_CD_ENABLE: "true" + FEATURE_STEP_WISE_LOGS_ENABLE: "true" + FEATURE_USER_DEFINED_GITOPS_REPO_ENABLE: "true" + ENABLE_RESOURCE_SCAN: "true" registry: "" - image: "dashboard:5f95d187-690-23841" + image: "dashboard:215319c7-690-25536" imagePullPolicy: IfNotPresent devtron: registry: "" - image: "hyperion:291c4c75-280-23860" - cicdImage: "devtron:291c4c75-434-23853" + image: "hyperion:3f68456b-280-25566" + cicdImage: "devtron:3f68456b-434-25567" imagePullPolicy: IfNotPresent customOverrides: {} ciRunner: registry: "" - image: "ci-runner:48aca9f4-138-23844" + image: "ci-runner:fd5702db-138-25483" argocdDexServer: registry: "" image: "dex:v2.30.2" @@ -53,7 +53,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:0dee6306-564-23843" + image: "kubelink:6ef0fbbe-564-25533" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" @@ -71,7 +71,7 @@ components: keyName: postgresql-password kubewatch: registry: "" - image: "kubewatch:850b40d5-419-23840" + image: "kubewatch:7c8611f4-419-25531" imagePullPolicy: IfNotPresent configs: devtroncd_NAMESPACE: "devtron-ci" @@ -91,7 +91,7 @@ components: armImage: postgres_exporter:v0.10.1 gitsensor: registry: "" - image: "git-sensor:86e13283-200-23847" + image: "git-sensor:5b9cf0ec-200-25481" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -109,7 +109,7 @@ components: # Values for lens lens: registry: "" - image: "lens:56211042-333-23839" + image: "lens:9db8a2fb-333-25482" imagePullPolicy: IfNotPresent configs: GIT_SENSOR_PROTOCOL: GRPC @@ -154,7 +154,7 @@ components: DB_NAME: "lens" chartSync: registry: "" - image: chart-sync:5a1d0301-150-23845 + image: chart-sync:13ffae06-150-25515 # values for argocd integration argo-cd: global: @@ -174,14 +174,14 @@ workflowController: IMDSv1ExecutorImage: "argoexec:v3.0.7" security: imageScanner: - image: "image-scanner:137872c2-141-23848" + image: "image-scanner:348201f8-141-25486" clair: image: repository: clair tag: 4.3.6 # Values for notifier integration notifier: - image: "notifier:9639b1ab-372-23850" + image: "notifier:06392394-372-25535" minio: image: "minio:RELEASE.2021-02-14T04-01-33Z" mbImage: "minio-mc:RELEASE.2021-02-14T04-28-06Z" @@ -200,3 +200,27 @@ monitoring: image: "k8s-sidecar:1.1.0" curlImage: "curl:7.73.0" imagePullPolicy: IfNotPresent +devtronEnterprise: + enabled: false + casbin: + registry: "" + image: "casbin:efc28fb2-6de0e914-462-25420" + imagePullPolicy: IfNotPresent + configs: + PG_ADDR: postgresql-postgresql.devtroncd + PG_DATABASE: casbin + PG_PORT: "5432" + PG_USER: postgres + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password + resources: {} + scoop: + enabled: false + registry: "" + image: "scoop:296d351d-629-24001" + imagePullPolicy: IfNotPresent + resources: {} + configs: + CLUSTER_ID: "1" + ORCHESTRATOR_URL: http://devtron-service.devtroncd.svc.cluster.local/orchestrator diff --git a/charts/devtron/templates/_helpers.tpl b/charts/devtron/templates/_helpers.tpl index 97da656497d..97f2766cc7b 100644 --- a/charts/devtron/templates/_helpers.tpl +++ b/charts/devtron/templates/_helpers.tpl @@ -19,13 +19,19 @@ it randomly. {{- end -}} {{- end }} +{{- define "imagePullSecret" }} +{{- with .Values.imagePullSecret.credentials }} +{{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"auth\":\"%s\"}}}" .registry .username .password (printf "%s:%s" .username .password | b64enc) | b64enc }} +{{- end }} +{{- end }} + {{/* Expand the node selectors, tolerations, and image pull secrets for a Kubernetes resource. Usage: -{{ include "common.nodeSelector" (dict "nodeSelector" .Values.path.to.nodeSelector "tolerations" .Values.path.to.tolerations "imagePullSecrets" .Values.path.to.imagePullSecrets "global" .Values.global ) }} +{{ include "common.schedulerConfig" (dict "nodeSelector" .Values.path.to.nodeSelector "tolerations" .Values.path.to.tolerations "imagePullSecrets" .Values.path.to.imagePullSecrets "global" .Values.global ) }} */}} -{{- define "common.nodeSelector" -}} +{{- define "common.schedulerConfig" -}} {{- if .nodeSelector }} nodeSelector: {{ toYaml .nodeSelector | indent 2 }} diff --git a/charts/devtron/templates/app-sync-job.yaml b/charts/devtron/templates/app-sync-job.yaml index d665faadc8e..92da12d5a25 100644 --- a/charts/devtron/templates/app-sync-job.yaml +++ b/charts/devtron/templates/app-sync-job.yaml @@ -11,7 +11,7 @@ spec: template: spec: serviceAccountName: devtron - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 6 }} initContainers: - name: migration-wait image: {{ include "common.image" (dict "component" $.Values.components.migrator "global" $.Values.global "extraImage" $.Values.components.migrator.kubectlImage ) }} @@ -75,7 +75,8 @@ spec: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 10 }} + serviceAccountName: chart-sync + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.chartSync.nodeSelector "tolerations" $.Values.components.chartSync.tolerations "imagePullSecrets" $.Values.components.chartSync.imagePullSecrets "global" $.Values.global) | indent 10 }} {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 12 }} diff --git a/charts/devtron/templates/casbin.yaml b/charts/devtron/templates/casbin.yaml new file mode 100644 index 00000000000..1a21f32143a --- /dev/null +++ b/charts/devtron/templates/casbin.yaml @@ -0,0 +1,125 @@ +{{- if and .Values.devtronEnterprise.enabled }} +{{- with .Values.devtronEnterprise.casbin }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: casbin + release: casbin + name: casbin + namespace: devtroncd +spec: + minReadySeconds: 60 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: casbin + release: casbin + template: + metadata: + labels: + app: casbin + release: casbin + spec: + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.devtronEnterprise.casbin.nodeSelector "tolerations" $.Values.devtronEnterprise.casbin.tolerations "imagePullSecrets" $.Values.devtronEnterprise.casbin.imagePullSecrets "global" $.Values.global) | indent 6 }} + containers: + - name: casbin + image: {{ include "common.image" (dict "component" $.Values.devtronEnterprise.casbin "global" $.Values.global) }} + {{- if .imagePullPolicy }} + imagePullPolicy: {{ .imagePullPolicy }} + {{- end }} + env: + - name: DEVTRON_APP_NAME + value: casbin + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .dbconfig }} + - name: PG_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .dbconfig.secretName }} + key: {{ .dbconfig.keyName }} + {{- end }} + envFrom: + - configMapRef: + name: casbin-cm + livenessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /health + port: 8080 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: http + protocol: TCP + - containerPort: 9000 + name: app + protocol: TCP + {{- if .resources }} + resources: +{{ toYaml .resources | indent 12 }} + {{- end }} + volumeMounts: [] + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: [] +--- +# Casbin ConfigMap +apiVersion: v1 +kind: ConfigMap +metadata: + name: casbin-cm + namespace: devtroncd + labels: + app: casbin + release: casbin +{{- if .configs }} +data: +{{ toYaml .configs | indent 2 }} +{{- end }} +--- +# Casbin Service +apiVersion: v1 +kind: Service +metadata: + labels: + app: casbin + release: casbin + annotations: + "helm.sh/resource-policy": keep + name: casbin-service + namespace: devtroncd +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + - name: app + port: 9000 + protocol: TCP + targetPort: app + selector: + app: casbin + release: casbin + type: ClusterIP +{{- end}} +{{- end}} diff --git a/charts/devtron/templates/configmap-secret.yaml b/charts/devtron/templates/configmap-secret.yaml index b856f736dcc..3b6127f3ccd 100644 --- a/charts/devtron/templates/configmap-secret.yaml +++ b/charts/devtron/templates/configmap-secret.yaml @@ -247,9 +247,9 @@ data: PG_PASSWORD: {{ $postgresPwd }} {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} + ORCH_TOKEN: {{ $ORCH_TOKEN }} EXTERNAL_CI_API_SECRET: {{ $EXTERNAL_CI_API_SECRET }} WEBHOOK_TOKEN: {{ $WEBHOOK_TOKEN }} - ORCH_TOKEN: {{ $ORCH_TOKEN }} DEX_SECRET: {{ $DEX_SECRET }} DEX_JWTKEY: {{ $DEX_JWTKEY }} DEX_CSTOREKEY: {{ $DEX_CSTOREKEY }} @@ -289,3 +289,57 @@ data: {{- end }} {{- end }} type: Opaque + +{{- if $.Values.imagePullSecret }} +{{- if $.Values.imagePullSecret.create }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: devtroncd + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} + +{{- if eq .Values.imagePullSecret.namespaceScope "all" }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: devtron-cd + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} + +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: devtron-ci + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} + +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ default "devtron-image-pull" .Values.imagePullSecret.name }} + namespace: argo + annotations: + "helm.sh/hook": pre-install,pre-upgrade +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ include "imagePullSecret" . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/devtron/templates/dashboard.yaml b/charts/devtron/templates/dashboard.yaml index d909d4978dd..8d978e8cace 100644 --- a/charts/devtron/templates/dashboard.yaml +++ b/charts/devtron/templates/dashboard.yaml @@ -77,7 +77,8 @@ spec: securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} {{- end }} - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.dashboard.nodeSelector "tolerations" $.Values.components.dashboard.tolerations "imagePullSecrets" $.Values.components.dashboard.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.dashboard.nodeSelector "tolerations" $.Values.components.dashboard.tolerations "imagePullSecrets" $.Values.components.dashboard.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa containers: - name: dashboard image: {{ include "common.image" (dict "component" $.Values.components.dashboard "global" $.Values.global) }} diff --git a/charts/devtron/templates/devtron-scc.yaml b/charts/devtron/templates/devtron-scc.yaml index b6f1c9680e8..1f5f10d03eb 100644 --- a/charts/devtron/templates/devtron-scc.yaml +++ b/charts/devtron/templates/devtron-scc.yaml @@ -32,6 +32,8 @@ users: - system:serviceaccount:devtroncd:argocd-server - system:serviceaccount:devtron-ci:ci-runner - system:serviceaccount:devtron-cd:cd-runner +- system:serviceaccount:devtroncd:chart-sync +- system:serviceaccount:devtroncd:devtron-default-sa volumes: - '*' {{- end }} diff --git a/charts/devtron/templates/devtron.yaml b/charts/devtron/templates/devtron.yaml index db2f24ccc8f..23c39c90b7a 100644 --- a/charts/devtron/templates/devtron.yaml +++ b/charts/devtron/templates/devtron.yaml @@ -1,4 +1,5 @@ {{- with .Values.components.devtron }} +{{- $argocdEnabled := index $.Values "argo-cd" }} --- apiVersion: v1 kind: ConfigMap @@ -19,6 +20,12 @@ data: DEX_HOST: http://argocd-dex-server.devtroncd DEX_PORT: "5556" APP_SYNC_IMAGE: {{ include "common.image" (dict "component" $.Values.components.chartSync "global" $.Values.global ) }} + {{- if and $.Values.devtronEnterprise.enabled $.Values.devtronEnterprise.scoop.enabled }} + SCOOP_CLUSTER_CONFIG: '{"1":{"serviceName":"scoop-service","passKey":"qhihdidhwid","namespace":"devtroncd","port":"80"}}' + {{- end }} + {{- if $.Values.devtronEnterprise.enabled }} + CASBIN_CLIENT_URL: casbin-service.devtroncd:9000 + {{- end }} {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} CD_HOST: "argocd-server.devtroncd" @@ -86,7 +93,16 @@ data: ENFORCER_MAX_BATCH_SIZE: "1" DEVTRON_SECRET_NAME: "devtron-secret" ENABLE_ASYNC_ARGO_CD_INSTALL_DEVTRON_CHART: "false" - USE_ARTIFACT_LISTING_API_V2: "true" + USE_ARTIFACT_LISTING_API_V2: "false" + ASYNC_BUILDX_CACHE_EXPORT: "true" + BUILDX_CACHE_MODE_MIN: "false" + DEVTRON_CHART_ARGO_CD_INSTALL_REQUEST_TIMEOUT: "1" + IN_APP_LOGGING_ENABLED: "true" + PARALLELISM_LIMIT_FOR_TAG_PROCESSING: "2" + SCAN_V2_ENABLED: "false" + TIMEOUT_IN_SECONDS: "60" + SHOW_DOCKER_BUILD_ARGS: "true" + FORCE_SECURITY_SCANNING: "false" RUN_HELM_INSTALL_IN_ASYNC_MODE_HELM_APPS: "true" ENABLE_ASYNC_INSTALL_DEVTRON_CHART: "true" DEVTRON_CHART_INSTALL_REQUEST_TIMEOUT: "6" @@ -160,6 +176,26 @@ data: {{- if .customOverrides }} {{ toYaml .customOverrides | indent 2}} {{- end }} + {{- $modules := list }} + {{- if has "cicd" $.Values.installer.modules }} + {{- $modules = append $modules "cicd" }} + {{- if $.Values.notifier.enabled }} + {{- $modules = append $modules "notifier" }} + {{- end }} + {{- if and $.Values.security.enabled $.Values.security.trivy.enabled }} + {{- $modules = append $modules "security.trivy" }} + {{- end }} + {{- if and $.Values.security.enabled $.Values.security.clair.enabled }} + {{- $modules = append $modules "security.clair" }} + {{- end }} + {{- if $.Values.monitoring.grafana.enabled }} + {{- $modules = append $modules "monitoring.grafana" }} + {{- end }} + {{- if ($argocdEnabled.enabled) }} + {{- $modules = append $modules "argo-cd" }} + {{- end }} + {{- end }} + INSTALLED_MODULES: {{ if $modules }}{{ printf "'%s'" (join "," $modules) }}{{ else }}""{{ end }} DEFAULT_CI_IMAGE: {{ include "common.image" (dict "component" $.Values.components.ciRunner "global" $.Values.global ) }} --- apiVersion: v1 @@ -201,7 +237,7 @@ spec: app: devtron release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.devtron.nodeSelector "tolerations" $.Values.components.devtron.tolerations "imagePullSecrets" $.Values.components.devtron.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.devtron.nodeSelector "tolerations" $.Values.components.devtron.tolerations "imagePullSecrets" $.Values.components.devtron.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always serviceAccountName: devtron diff --git a/charts/devtron/templates/dex.yaml b/charts/devtron/templates/dex.yaml index b5bbaadbcc9..a95c0379b7c 100644 --- a/charts/devtron/templates/dex.yaml +++ b/charts/devtron/templates/dex.yaml @@ -59,7 +59,7 @@ spec: labels: app.kubernetes.io/name: argocd-dex-server spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.argocdDexServer.nodeSelector "tolerations" $.Values.components.argocdDexServer.tolerations "imagePullSecrets" $.Values.components.argocdDexServer.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.argocdDexServer.nodeSelector "tolerations" $.Values.components.argocdDexServer.tolerations "imagePullSecrets" $.Values.components.argocdDexServer.imagePullSecrets "global" $.Values.global) | indent 6 }} affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/charts/devtron/templates/gitsensor.yaml b/charts/devtron/templates/gitsensor.yaml index 4697699b0b4..6248b7381b3 100644 --- a/charts/devtron/templates/gitsensor.yaml +++ b/charts/devtron/templates/gitsensor.yaml @@ -73,7 +73,8 @@ spec: securityContext: runAsGroup: 1000 runAsUser: 1000 - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.gitsensor.nodeSelector "tolerations" $.Values.components.gitsensor.tolerations "imagePullSecrets" $.Values.components.gitsensor.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.gitsensor.nodeSelector "tolerations" $.Values.components.gitsensor.tolerations "imagePullSecrets" $.Values.components.gitsensor.imagePullSecrets "global" $.Values.global) | indent 6 }} initContainers: - command: - /bin/sh diff --git a/charts/devtron/templates/grafana.yaml b/charts/devtron/templates/grafana.yaml index c99a841e4af..3fb4b8a6212 100644 --- a/charts/devtron/templates/grafana.yaml +++ b/charts/devtron/templates/grafana.yaml @@ -12,9 +12,10 @@ kind: Job metadata: name: grafana-org-job spec: + ttlSecondsAfterFinished: 100 template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: devtron containers: - name: grafana-restart @@ -511,7 +512,7 @@ spec: fsGroup: 472 runAsGroup: 472 runAsUser: 472 - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 6 }} initContainers: - name: init-chown-data image: {{ include "common.image" (dict "component" $.Values.monitoring.grafana "global" $.Values.global "extraImage" $.Values.monitoring.grafana.busyboxImage ) }} @@ -660,7 +661,7 @@ metadata: namespace: devtroncd spec: serviceAccountName: devtron-grafana-test - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 2 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.monitoring.grafana.nodeSelector "tolerations" $.Values.monitoring.grafana.tolerations "imagePullSecrets" $.Values.monitoring.grafana.imagePullSecrets "global" $.Values.global) | indent 2 }} containers: - name: devtron-test image: {{ include "common.image" (dict "component" $.Values.monitoring.grafana "global" $.Values.global "extraImage" $.Values.monitoring.grafana.batsImage ) }} @@ -679,4 +680,4 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/install.yaml b/charts/devtron/templates/install.yaml index 123e037885a..e3e6192910f 100644 --- a/charts/devtron/templates/install.yaml +++ b/charts/devtron/templates/install.yaml @@ -80,6 +80,7 @@ spec: labels: app: inception spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.installer.nodeSelector "tolerations" $.Values.installer.tolerations "imagePullSecrets" $.Values.installer.imagePullSecrets "global" $.Values.global) | indent 6 }} {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} diff --git a/charts/devtron/templates/kubelink.yaml b/charts/devtron/templates/kubelink.yaml index 25436ac2a4d..f4e93054f0b 100644 --- a/charts/devtron/templates/kubelink.yaml +++ b/charts/devtron/templates/kubelink.yaml @@ -57,7 +57,7 @@ spec: labels: app: kubelink spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.kubelink.nodeSelector "tolerations" $.Values.components.kubelink.tolerations "imagePullSecrets" $.Values.components.kubelink.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.kubelink.nodeSelector "tolerations" $.Values.components.kubelink.tolerations "imagePullSecrets" $.Values.components.kubelink.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always serviceAccount: devtron diff --git a/charts/devtron/templates/kubewatch.yaml b/charts/devtron/templates/kubewatch.yaml index fc7366deda3..fa199caf3ca 100644 --- a/charts/devtron/templates/kubewatch.yaml +++ b/charts/devtron/templates/kubewatch.yaml @@ -167,7 +167,7 @@ spec: app: kubewatch release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.kubewatch.nodeSelector "tolerations" $.Values.components.kubewatch.tolerations "imagePullSecrets" $.Values.components.kubewatch.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.kubewatch.nodeSelector "tolerations" $.Values.components.kubewatch.tolerations "imagePullSecrets" $.Values.components.kubewatch.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always serviceAccountName: kubewatch diff --git a/charts/devtron/templates/lens.yaml b/charts/devtron/templates/lens.yaml index 503fd22eb44..c3a87b34626 100644 --- a/charts/devtron/templates/lens.yaml +++ b/charts/devtron/templates/lens.yaml @@ -66,7 +66,8 @@ spec: app: lens release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.lens.nodeSelector "tolerations" $.Values.components.lens.tolerations "imagePullSecrets" $.Values.components.lens.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.lens.nodeSelector "tolerations" $.Values.components.lens.tolerations "imagePullSecrets" $.Values.components.lens.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa terminationGracePeriodSeconds: 30 restartPolicy: Always {{- if and $.Values.global $.Values.global.podSecurityContext }} diff --git a/charts/devtron/templates/migrator.yaml b/charts/devtron/templates/migrator.yaml index 00313889aee..31247c32776 100644 --- a/charts/devtron/templates/migrator.yaml +++ b/charts/devtron/templates/migrator.yaml @@ -14,7 +14,8 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} @@ -122,7 +123,7 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: devtron {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: @@ -221,7 +222,8 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} @@ -300,7 +302,8 @@ metadata: spec: template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa {{- if and $.Values.global $.Values.global.podSecurityContext }} securityContext: {{- toYaml $.Values.global.podSecurityContext | nindent 8 }} @@ -378,9 +381,10 @@ kind: Job metadata: name: postgresql-miscellaneous spec: + ttlSecondsAfterFinished: 100 template: spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} securityContext: fsGroup: 1000 runAsGroup: 1000 @@ -415,4 +419,4 @@ spec: backoffLimit: 20 activeDeadlineSeconds: 1800 {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/minio.yaml b/charts/devtron/templates/minio.yaml index e445ca43931..1f788bfe82c 100644 --- a/charts/devtron/templates/minio.yaml +++ b/charts/devtron/templates/minio.yaml @@ -259,7 +259,7 @@ spec: app: minio release: {{ $.Release.Name }} spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: "devtron-minio" securityContext: runAsUser: 1000 @@ -322,13 +322,14 @@ metadata: release: {{ $.Release.Name }} heritage: Helm spec: + ttlSecondsAfterFinished: 100 template: metadata: labels: app: minio-job release: {{ $.Release.Name }} spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} restartPolicy: OnFailure volumes: - name: minio-configuration @@ -384,7 +385,7 @@ spec: app: minio release: devtron-minio spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.minio.nodeSelector "tolerations" $.Values.minio.tolerations "imagePullSecrets" $.Values.minio.imagePullSecrets "global" $.Values.global) | indent 6 }} serviceAccountName: "devtron-minio" containers: - name: minio @@ -415,4 +416,4 @@ spec: secretName: devtron-minio {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/nats-server.yaml b/charts/devtron/templates/nats-server.yaml index 0c06cd9fe94..e6aa25f71e6 100644 --- a/charts/devtron/templates/nats-server.yaml +++ b/charts/devtron/templates/nats-server.yaml @@ -91,8 +91,9 @@ spec: app.kubernetes.io/name: nats app.kubernetes.io/instance: devtron-nats spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa # Common volumes for the containers. - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 6 }} volumes: - name: config-volume @@ -139,8 +140,6 @@ spec: name: cluster - containerPort: 8222 name: monitor - - containerPort: 7777 - name: metrics command: - "nats-server" @@ -268,7 +267,7 @@ metadata: app.kubernetes.io/instance: devtron-nats app.kubernetes.io/managed-by: Helm spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 2 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.nats.nodeSelector "tolerations" $.Values.components.nats.tolerations "imagePullSecrets" $.Values.components.nats.imagePullSecrets "global" $.Values.global) | indent 2 }} containers: - name: nats-box image: {{ include "common.image" (dict "component" $.Values.components.nats.natsBox "global" $.Values.global) }} @@ -316,4 +315,4 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/charts/devtron/templates/notifier.yaml b/charts/devtron/templates/notifier.yaml index 054046e5a3e..55116ed1431 100644 --- a/charts/devtron/templates/notifier.yaml +++ b/charts/devtron/templates/notifier.yaml @@ -72,7 +72,8 @@ spec: app: notifier release: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.notifier.nodeSelector "tolerations" $.Values.notifier.tolerations "imagePullSecrets" $.Values.notifier.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.notifier.nodeSelector "tolerations" $.Values.notifier.tolerations "imagePullSecrets" $.Values.notifier.imagePullSecrets "global" $.Values.global) | indent 6 }} terminationGracePeriodSeconds: 30 restartPolicy: Always {{- if and $.Values.global $.Values.global.podSecurityContext }} diff --git a/charts/devtron/templates/postgresql.yaml b/charts/devtron/templates/postgresql.yaml index efcabcd0207..01e7e974866 100644 --- a/charts/devtron/templates/postgresql.yaml +++ b/charts/devtron/templates/postgresql.yaml @@ -113,7 +113,8 @@ spec: release: "devtron" role: master spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa securityContext: fsGroup: 1001 initContainers: @@ -443,7 +444,8 @@ spec: app.kubernetes.io/name: postgres app.kubernetes.io/instance: devtron spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.postgres.nodeSelector "tolerations" $.Values.components.postgres.tolerations "imagePullSecrets" $.Values.components.postgres.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa securityContext: fsGroup: 999 initContainers: diff --git a/charts/devtron/templates/scoop.yaml b/charts/devtron/templates/scoop.yaml new file mode 100644 index 00000000000..53a7587db1e --- /dev/null +++ b/charts/devtron/templates/scoop.yaml @@ -0,0 +1,169 @@ +{{- if and .Values.devtronEnterprise.enabled .Values.devtronEnterprise.scoop.enabled }} +{{- with .Values.devtronEnterprise.scoop }} +{{- $passKey := randAlphaNum 12 | lower }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: scoop-devtron + namespace: devtroncd + labels: + app: scoop +spec: + minReadySeconds: 60 + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 3 + selector: + matchLabels: + app: scoop + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: scoop + spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.devtronEnterprise.scoop.nodeSelector "tolerations" $.Values.devtronEnterprise.scoop.tolerations "imagePullSecrets" $.Values.devtronEnterprise.scoop.imagePullSecrets "global" $.Values.global) | indent 6 }} + terminationGracePeriodSeconds: 30 + restartPolicy: Always + schedulerName: default-scheduler + serviceAccountName: sa-scoop + containers: + - name: scoop + image: {{ include "common.image" (dict "component" $.Values.devtronEnterprise.scoop "global" $.Values.global) }} + {{- if .imagePullPolicy }} + imagePullPolicy: {{ .imagePullPolicy }} + {{- end }} + {{- if and $.Values.global $.Values.global.containerSecurityContext }} + securityContext: +{{- toYaml $.Values.global.containerSecurityContext | nindent 12 }} + {{- end }} + env: + - name: X-PASS-KEY + value: qhihdidhwid + - name: PASS_KEY + value: qhihdidhwid + - name: RETENTION + value: "10080" + - name: TOKEN + valueFrom: + secretKeyRef: + name: devtron-secret + key: ORCH_TOKEN + envFrom: + - configMapRef: + name: scoop-cm + ports: + - containerPort: 8080 + name: app + protocol: TCP + {{- if .resources }} + resources: + {{- toYaml .resources | nindent 12 }} + {{- end }} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File +--- +# Scoop-service +apiVersion: v1 +kind: Service +metadata: + labels: + app: scoop + name: scoop-service + namespace: devtroncd +spec: + ports: + - name: app + port: 80 + protocol: TCP + targetPort: app + selector: + app: scoop + sessionAffinity: None + type: ClusterIP + +--- +# Scoop ConfigMap +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app: scoop + name: scoop-cm + namespace: devtroncd +{{- if .configs }} +data: +{{ toYaml .configs | indent 2 }} +{{- end }} + +--- +# Scoop ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + app.kubernetes.io/instance: devtron + name: read-only-cluster-role-scoop +rules: + - apiGroups: + - "*" + resources: + - "*" + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - "*" + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - "*" + verbs: + - get + - list + - watch + +--- +# Scoop ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: read-only-user-crb-scoop + annotations: + "helm.sh/resource-policy": keep +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: read-only-cluster-role-scoop +subjects: + - kind: ServiceAccount + name: sa-scoop + namespace: devtroncd + +--- +# Scoop ServiceAccount +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sa-scoop + namespace: devtroncd + labels: + app: scoop + annotations: + "helm.sh/resource-policy": keep +{{- end }} +{{- end }} diff --git a/charts/devtron/templates/workflow.yaml b/charts/devtron/templates/workflow.yaml index 61e38396607..e20c28be175 100644 --- a/charts/devtron/templates/workflow.yaml +++ b/charts/devtron/templates/workflow.yaml @@ -37,6 +37,29 @@ metadata: annotations: "helm.sh/hook": pre-install "helm.sh/resource-policy": keep +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: chart-sync + namespace: devtroncd + labels: + release: devtron +{{- if $.Values.components.chartSync.imagePullSecrets }} +imagePullSecrets: +{{ toYaml .Values.components.chartSync.imagePullSecrets | indent 2 }} +{{- else if $.Values.global.imagePullSecrets }} +imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 2 }} +{{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: devtron-default-sa + namespace: devtroncd + labels: + release: devtron {{- if $.Values.installer.modules }} {{- if has "cicd" $.Values.installer.modules }} --- @@ -1270,7 +1293,7 @@ spec: labels: app: workflow-controller spec: - {{ include "common.nodeSelector" (dict "nodeSelector" $.Values.workflowController.nodeSelector "tolerations" $.Values.workflowController.tolerations "imagePullSecrets" $.Values.workflowController.imagePullSecrets "global" $.Values.global) | indent 6 }} + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.workflowController.nodeSelector "tolerations" $.Values.workflowController.tolerations "imagePullSecrets" $.Values.workflowController.imagePullSecrets "global" $.Values.global) | indent 6 }} containers: - args: - --configmap diff --git a/charts/devtron/values.yaml b/charts/devtron/values.yaml index 19f6854f385..f8b39288472 100644 --- a/charts/devtron/values.yaml +++ b/charts/devtron/values.yaml @@ -9,13 +9,22 @@ global: runAsNonRoot: true containerRegistry: "quay.io/devtron" # The below values can be specified both at global as well as component level + # nodeSelector: + # key: value + # tolerations: + # - key: "key1" + # operator: "Equal" + # value: "value1" + # effect: "NoSchedule" + # imagePullSecrets: + # - name: your-image-pull-secret nodeSelector: {} tolerations: [] imagePullSecrets: [] extraManifests: [] installer: repo: "devtron-labs/devtron" - release: "v0.7.1" + release: "v0.7.2" registry: "" image: inception tag: 473deaa4-185-21582 @@ -24,7 +33,6 @@ installer: openshift: false # Set this to true if you are installing on openshift production_overrides: "" # Set true if you want to use this Devtron stack in Production (This will require more resources) # Change the below values for full mode only - #Use secrets in plaintext, they'll be encoded to base64 automatically. secrets: {} # REQUIRED IF BLOB_STORAGE_PROVIDER=AZURE Token with read write access to AZURE_BLOB_CONTAINER_CI_LOG and AZURE_BLOB_CONTAINER_CI_CACHE @@ -61,16 +69,20 @@ components: ENABLE_CI_JOB: "true" GLOBAL_API_TIMEOUT: "60000" TRIGGER_API_TIMEOUT: "60000" - ENABLE_EXTERNAL_ARGO_CD: "false" + ENABLE_EXTERNAL_ARGO_CD: "true" SERVICE_WORKER_TIMEOUT: "1" API_BATCH_SIZE: "30" + FEATURE_EXTERNAL_FLUX_CD_ENABLE: "true" + FEATURE_STEP_WISE_LOGS_ENABLE: "true" + FEATURE_USER_DEFINED_GITOPS_REPO_ENABLE: "true" + ENABLE_RESOURCE_SCAN: "true" registry: "" - image: "dashboard:5f95d187-690-23841" + image: "dashboard:215319c7-690-25536" imagePullPolicy: IfNotPresent devtron: registry: "" - image: "hyperion:291c4c75-280-23860" - cicdImage: "devtron:291c4c75-434-23853" + image: "hyperion:3f68456b-280-25566" + cicdImage: "devtron:3f68456b-434-25567" imagePullPolicy: IfNotPresent customOverrides: {} serviceMonitor: @@ -96,7 +108,7 @@ components: # - devtron.example.com ciRunner: registry: "" - image: "ci-runner:48aca9f4-138-23844" + image: "ci-runner:fd5702db-138-25483" argocdDexServer: registry: "" image: "dex:v2.30.2" @@ -105,7 +117,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:0dee6306-564-23843" + image: "kubelink:6ef0fbbe-564-25533" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" @@ -123,7 +135,7 @@ components: keyName: postgresql-password kubewatch: registry: "" - image: "kubewatch:850b40d5-419-23840" + image: "kubewatch:7c8611f4-419-25531" imagePullPolicy: IfNotPresent configs: devtroncd_NAMESPACE: "devtron-ci" @@ -145,7 +157,7 @@ components: volumeSize: "20Gi" gitsensor: registry: "" - image: "git-sensor:86e13283-200-23847" + image: "git-sensor:5b9cf0ec-200-25481" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -163,7 +175,7 @@ components: # Values for lens lens: registry: "" - image: "lens:56211042-333-23839" + image: "lens:9db8a2fb-333-25482" imagePullPolicy: IfNotPresent secrets: {} resources: {} @@ -210,7 +222,7 @@ components: DB_NAME: "lens" chartSync: registry: "" - image: chart-sync:5a1d0301-150-23845 + image: chart-sync:13ffae06-150-25515 # values for argocd integration argo-cd: enabled: false @@ -251,13 +263,7 @@ argo-cd: - all readOnlyRootFilesystem: true runAsNonRoot: true - env: - - name: ARGOCD_RECONCILIATION_TIMEOUT - valueFrom: - configMapKeyRef: - key: timeout.reconciliation - name: argocd-cm - optional: true + env: [] affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -347,12 +353,6 @@ argo-cd: - --parallelismlimit - "50" env: - - name: ARGOCD_RECONCILIATION_TIMEOUT - valueFrom: - configMapKeyRef: - key: timeout.reconciliation - name: argocd-cm - optional: true - name: ARGOCD_EXEC_TIMEOUT value: 180s containerSecurityContext: @@ -370,7 +370,7 @@ argo-cd: security: enabled: false imageScanner: - image: "image-scanner:137872c2-141-23848" + image: "image-scanner:348201f8-141-25486" # Values for trivy trivy: enabled: false @@ -385,7 +385,7 @@ security: notifier: enabled: false imagePullPolicy: IfNotPresent - image: "notifier:9639b1ab-372-23850" + image: "notifier:06392394-372-25535" configs: CD_ENVIRONMENT: PROD DB: orchestrator @@ -436,3 +436,28 @@ monitoring: resources: {} persistence: storage: "2Gi" +# Change these values for Devtron-Enterprise +devtronEnterprise: + enabled: false + casbin: + registry: "" + image: "casbin:efc28fb2-6de0e914-462-25420" + imagePullPolicy: IfNotPresent + configs: + PG_ADDR: postgresql-postgresql.devtroncd + PG_DATABASE: casbin + PG_PORT: "5432" + PG_USER: postgres + dbconfig: + secretName: postgresql-postgresql + keyName: postgresql-password + resources: {} + scoop: + enabled: false + registry: "" + image: "scoop:296d351d-629-24001" + imagePullPolicy: IfNotPresent + resources: {} + configs: + CLUSTER_ID: "1" + ORCHESTRATOR_URL: http://devtron-service.devtroncd.svc.cluster.local/orchestrator diff --git a/cmd/external-app/wire_gen.go b/cmd/external-app/wire_gen.go index bab4ef2b470..ab3bca55cdc 100644 --- a/cmd/external-app/wire_gen.go +++ b/cmd/external-app/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run -mod=mod github.com/google/wire/cmd/wire +//go:generate go run github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject diff --git a/docs/reference/glossary.md b/docs/reference/glossary.md index c219ea5bd12..ce8354d2d81 100644 --- a/docs/reference/glossary.md +++ b/docs/reference/glossary.md @@ -10,6 +10,12 @@ An immutable blob of data generated as an output after the execution of a job, b * Once a job is complete, you can view the job artifacts by going to Jobs → Run history (tab) → (choose a pipeline and date of triggering the build) → Artifacts (tab). +### ArgoCD Apps + +ArgoCD Apps are the micro-services deployed using a [GitOps](#gitops) deployment tool named [Argo CD](https://argo-cd.readthedocs.io/en/stable/). + +If ArgoCD applications are present in your cluster, they will appear in the [ArgoCD Apps listing](../user-guide/applications.md#enabling-argocd-app-listing). + ### Base Deployment Template A deployment template is a manifest of the application defining its runtime behavior. You can select one of the default deployment charts or custom deployment charts created by super-admin. @@ -112,6 +118,12 @@ Similarly, the CPU and memory resources can be different for each environment. T You can add external links related to the application. For e.g., you can add Prometheus, Grafana, and many more to your application by going to Global Configurations → External Links. [Read More...](../user-guide/global-configurations/external-links.md) +### FluxCD Apps + +FluxCD Apps are the micro-services deployed using a [GitOps](#gitops) deployment tool named [Flux CD](https://fluxcd.io/). + +If FluxCD applications are present in your cluster, they will appear in the [FluxCD Apps listing](../user-guide/applications.md#view-fluxcd-app-listing). + ### GitOps A methodology for managing and automating Kubernetes deployments using Git repositories as the source of truth. Changes to the desired state of the cluster are driven by Git commits. [Read More...](../user-guide/global-configurations/gitops.md) diff --git a/docs/user-guide/applications.md b/docs/user-guide/applications.md index 8df03f9be5d..d436240c032 100644 --- a/docs/user-guide/applications.md +++ b/docs/user-guide/applications.md @@ -1,3 +1,145 @@ # Applications -Please configure Global Configurations before creating an application or cloning an existing application. \ No newline at end of file +{% hint style="warning" %} +Configure [Global Configurations](./global-configurations/README.md) first before creating an application or cloning an existing application. +{% endhint %} + +## Introduction + +The **Applications** page helps you create and manage your microservices, and it majorly consists of the following: + +* [Application Listing](#application-listing) +* [Create Button](#create-button) +* [Other Options](#other-options) + +### Application Listing + +You can view the app name, its status, environment, namespace, and many more upfront. The apps are segregated into: [Devtron Apps](../reference/glossary.md#devtron-apps), [Helm Apps](../reference/glossary.md#helm-apps), [ArgoCD Apps](../reference/glossary.md#argocd-apps), and [FluxCD Apps](../reference/glossary.md#fluxcd-apps). + +![Figure 1: App Types](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/argocd/app-types.jpg) + +### Create Button + +You can use this to: +* [Create a Devtron app](./create-application.md) +* [Create a Helm app](./deploy-chart/deployment-of-charts.md) +* [Create a Job](./jobs/create-job.md) + +### Other Options + +There are additional options available for you: +* **Search and filters** to make it easier for you to find applications. +* **Export CSV** to download the data of Devtron apps (not supported for Helm apps and Argo CD apps). +* **Sync button** to refresh the app listing. + +--- + +## View ArgoCD App Listing + +{% hint style="warning" %} +### Who Can Perform This Action? +Users need super-admin permission to view/enable/disable the ArgoCD listing. +{% endhint %} + +### Preface + +In Argo CD, a user manages one dashboard for one ArgoCD instance. Therefore, with multiple ArgoCD instances, the process becomes cumbersome for the user to manage several dashboards. + +With Devtron, you get an entire Argo CD app listing in one place. This listing includes: +* Apps deployed using [GitOps](../reference/glossary.md#gitops) on Devtron +* Other Argo CD apps present in your cluster + +![Figure 2: ArgoCD App List](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/argocd/app-details-argo.gif) + +### Advantages + +Devtron also bridges the gap for ArgoCD users by providing additional features as follows: + +* **Resource Scanning**: You can scan for vulnerabilities using Devtron's [resource scanning](../user-guide/security-features.md#from-app-details) feature. [![](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/elements/EnterpriseTag.svg)](https://devtron.ai/pricing) + +* **Single-pane View**: All Argo CD apps will show details such as their app status, environment, cluster, and namespace together in one dashboard. + +* **Feature-rich Options**: Clicking an Argo CD app will give you access to its logs, terminal, events, manifest, available resource kinds, pod restart log, and many more. + +{% hint style="info" %} +### Additional References +[ArgoCD: Standalone Configuration vs Devtron Configuration](https://devtron.ai/blog/argocd-standalone-configuration-vs-devtron-configuration/#argocd-installation-and-configuration) +{% endhint %} + +### Prerequisite +The cluster in which Argo CD apps exist should be added in **Global Configurations** → **Clusters and Environments** + +### Feature Flag + +> **`ENABLE_EXTERNAL_ARGO_CD: "true"`** + +### Enabling ArgoCD App Listing + +{% embed url="https://www.youtube.com/watch?v=4KyYnsAEpqo" caption="Enabling External ArgoCD Listing" %} + +1. Go to the **Resource Browser** of Devtron. + +2. Select the cluster (in which your Argo CD app exists). + +3. Type `ConfigMap` in the 'Jump to Kind' field. + +4. Search for `dashboard-cm` using the available search bar and click it. + +5. Click **Edit Live Manifest**. + +6. Set the feature flag **ENABLE_EXTERNAL_ARGO_CD** to **"true"** + +7. Click **Apply Changes**. + +8. Go back to the 'Jump to Kind' field and type `Pod`. + +9. Search for `dashboard` pod and use the kebab menu (3 vertical dots) to delete the pod. + +10. Go to **Applications** and refresh the page. A new tab named **ArgoCD Apps** will be visible. + +11. Select the cluster(s) from the dropdown to view the Argo CD apps available in the chosen cluster(s). + + ![Figure 3: Cluster Selection for Argo CD Listing](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/argocd/argo-cluster-selection.jpg) + +--- + +## View FluxCD App Listing + +{% hint style="warning" %} +### Who Can Perform This Action? +Users need super-admin permission to view/enable/disable the FluxCD listing. +{% endhint %} + +### Preface + +Flux CD doesn't have any official dashboard; however, Devtron supports the listing of your [Flux CD](https://fluxcd.io/) apps in one dashboard. Here, the [advantages](#advantages) are same as those of [ArgoCD app listing](#view-argocd-app-listing). + +![Figure 4: FluxCD App List and Details](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/fluxcd-listing.jpg) + +### Prerequisite +The cluster in which Flux CD apps exist should be added in **Global Configurations** → **Clusters and Environments** + +### Feature Flag + +> **`FEATURE_EXTERNAL_FLUX_CD_ENABLE: "true"`** + +### Enabling FluxCD App Listing + +{% hint style="info" %} +### Tip +You may refer the steps mentioned in the [Enabling ArgoCD App Listing](#enabling-argocd-app-listing) section since the procedure is similar. +{% endhint %} + +Using Devtron's Resource Browser, add the [feature flag](#feature-flag-1) in the Dashboard ConfigMap as shown below. + +![Figure 5: Editing Dashboard ConfigMap](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/flux-feature-flag.jpg) + +After successfully executing all the steps, a new tab named **FluxCD Apps** will be visible. Select the cluster(s) from the dropdown to view the Flux CD apps available in the chosen cluster(s). + +![Figure 6: Selecting Cluster](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/cluster-selection.jpg) + +(Optional) Once you choose cluster(s), you may use the **Template Type** dropdown to further filter your Flux CD app listing based on its type, i.e., [Kustomization](https://fluxcd.io/flux/components/kustomize/kustomizations/) or [Helmrelease](https://fluxcd.io/flux/components/helm/helmreleases/). + +Click any Flux CD app to view its details as shown below. + +![Figure 7: Flux App Details](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/creating-application/fluxcd/app-details-flux.gif) \ No newline at end of file diff --git a/docs/user-guide/global-configurations/filter-condition.md b/docs/user-guide/global-configurations/filter-condition.md index b721010e5d2..bde3182985a 100644 --- a/docs/user-guide/global-configurations/filter-condition.md +++ b/docs/user-guide/global-configurations/filter-condition.md @@ -8,6 +8,7 @@ Using filter conditions, you can control the progression of events. Here are a f * Images containing the label "test" should not be eligible for deployment in production environment * Only images having tag versions greater than v0.7.4 should be eligible for deployment * Images hosted on Docker Hub should be eligible but not the rest +* Only images derived from master branch should be eligible for production deployment (see [example](#scenario-2)) --- @@ -55,17 +56,17 @@ You must have application(s) with CI-CD workflow(s) configured ![Figure 5: Selecting Environment(s) from Cluster(s)](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/filters/environment-selection.jpg) - {% hint style="info" %} - Since an application can have more than one environment, the filter conditions apply only to the environment you chose in the **Apply to** section. If you create a filter condition without choosing an application or environment, it will not apply to any of your pipelines. - {% endhint %} +{% hint style="info" %} +Since an application can have more than one environment, the filter conditions apply only to the environment you chose in the **Apply to** section. If you create a filter condition without choosing an application or environment, it will not apply to any of your pipelines. +{% endhint %} 6. Click **Save**. You have successfully created a filter. ![Figure 6: Success Toast](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/filters/filter-created.jpg) - {% hint style="warning" %} - If you create filters using CEL expressions that result in a conflict (i.e., passing and failing of the same image), fail will have higher precedence - {% endhint %} +{% hint style="warning" %} +If you create filters using CEL expressions that result in a conflict (i.e., passing and failing of the same image), fail will have higher precedence +{% endhint %} --- @@ -78,6 +79,8 @@ Here's a sample pipeline we will be using for our explanation of [pass condition ### Pass Condition +#### Scenario 1 + Consider a scenario where you wish to make an image eligible for deployment only if its tag version is greater than `v0.0.7` The CEL Expression should be `containerImageTag > "v0.0.7"` @@ -102,6 +105,25 @@ Clicking the filter icon at the top-left shows the filter condition(s) applied t ![Figure 12b: Conditions Applied](https://devtron-public-asset.s3.us-east-2.amazonaws.com/images/global-configurations/filters/conditions-applied-1.jpg) +#### Scenario 2 + +Consider another scenario where you wish to make images eligible for deployment only if the application's git branch starts with the word `hotfix` and also if its repo URL matches your specified condition. + +**CEL Expression**: + +`gitCommitDetails.filter(gitCommitDetail, gitCommitDetail.startsWith('https://github.com/devtron-labs')).map(repo, gitCommitDetails[repo].branch).exists_one(branch, branch.startsWith('hotfix-'))` + +where, `https://github.com/devtron-labs` is a portion of the repo URL
+and `hotfix-` is for finding the branch name (say *hotfix-sept-2024*) + +Alternatively, if you have a fixed branch (say *hotfix-123*), you may write the following expression: + +`'hotfix-123' in gitCommitDetails.filter(gitCommitDetail, gitCommitDetail.startsWith('https://github.com/devtron-labs')).map(repo, gitCommitDetails[repo].branch)` + +**Walkthrough Video**: + +{% embed url="https://www.youtube.com/watch?v=R8IbZhXhH-k" caption="Filter Condition Example" %} + ### Fail Condition diff --git a/internal/sql/repository/AppListingRepository.go b/internal/sql/repository/AppListingRepository.go index c5536724f67..40519bccf4f 100644 --- a/internal/sql/repository/AppListingRepository.go +++ b/internal/sql/repository/AppListingRepository.go @@ -382,7 +382,6 @@ func (impl AppListingRepositoryImpl) deploymentDetailsByAppIdAndEnvId(ctx contex } deploymentDetail.EnvironmentId = envId - deploymentDetail.EnvironmentId = envId dc, err := impl.deploymentConfigRepository.GetByAppIdAndEnvId(appId, envId) if err != nil && err != pg.ErrNoRows { impl.Logger.Errorw("error in getting deployment config by appId and envId", "appId", appId, "envId", envId, "err", err) diff --git a/manifests/install/devtron-installer.yaml b/manifests/install/devtron-installer.yaml index f0bb4839be2..c13839c9b9e 100644 --- a/manifests/install/devtron-installer.yaml +++ b/manifests/install/devtron-installer.yaml @@ -4,4 +4,4 @@ metadata: name: installer-devtron namespace: devtroncd spec: - url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.7.1/manifests/installation-script + url: https://raw.githubusercontent.com/devtron-labs/devtron/v0.7.2/manifests/installation-script diff --git a/manifests/installation-script b/manifests/installation-script index fe6032030c7..b1c4d67d2f6 100644 --- a/manifests/installation-script +++ b/manifests/installation-script @@ -1,4 +1,4 @@ -LTAG="v0.7.1"; +LTAG="v0.7.2"; REPO_RAW_URL="https://raw.githubusercontent.com/devtron-labs/devtron/"; log("executed devtron setup installation"); diff --git a/manifests/release.txt b/manifests/release.txt index d1895904915..8e6c98d5c6d 100644 --- a/manifests/release.txt +++ b/manifests/release.txt @@ -1 +1 @@ -stable -1 v0.7.1 +stable -1 v0.7.2 diff --git a/manifests/yamls/dashboard.yaml b/manifests/yamls/dashboard.yaml index ae5a449c43b..6f05ac96502 100644 --- a/manifests/yamls/dashboard.yaml +++ b/manifests/yamls/dashboard.yaml @@ -235,7 +235,7 @@ spec: - name: envoy-config-volume mountPath: /etc/envoy-config/ - name: dashboard - image: "quay.io/devtron/dashboard:5f95d187-690-23841" + image: "quay.io/devtron/dashboard:215319c7-690-25536" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/devtron.yaml b/manifests/yamls/devtron.yaml index ac0833faa66..6ccd9b8cc09 100644 --- a/manifests/yamls/devtron.yaml +++ b/manifests/yamls/devtron.yaml @@ -53,7 +53,7 @@ data: CD_NODE_TAINTS_VALUE: "ci" CD_ARTIFACT_LOCATION_FORMAT: "%d/%d.zip" DEFAULT_CD_NAMESPACE: "devtron-cd" - DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:48aca9f4-138-23844" + DEFAULT_CI_IMAGE: "quay.io/devtron/ci-runner:fd5702db-138-25483" DEFAULT_CD_TIMEOUT: "3600" WF_CONTROLLER_INSTANCE_ID: "devtron-runner" CI_LOGS_KEY_PREFIX: "ci-artifacts" @@ -89,7 +89,7 @@ data: ENFORCER_CACHE: "true" ENFORCER_CACHE_EXPIRATION_IN_SEC: "345600" ENFORCER_MAX_BATCH_SIZE: "1" - APP_SYNC_IMAGE: "quay.io/devtron/chart-sync:5a1d0301-150-23845" + APP_SYNC_IMAGE: "quay.io/devtron/chart-sync:13ffae06-150-25515" DEVTRON_SECRET_NAME: "devtron-secret" GIT_SENSOR_PROTOCOL: GRPC GIT_SENSOR_URL: git-sensor-service.devtroncd:90 @@ -169,7 +169,7 @@ spec: runAsUser: 1000 containers: - name: devtron - image: "quay.io/devtron/devtron:291c4c75-434-23853" + image: "quay.io/devtron/devtron:b5a2f8ba-434-25563" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/gitsensor.yaml b/manifests/yamls/gitsensor.yaml index e16b5199343..9c19b458858 100644 --- a/manifests/yamls/gitsensor.yaml +++ b/manifests/yamls/gitsensor.yaml @@ -67,7 +67,7 @@ spec: - /bin/sh - -c - mkdir -p /git-base/ssh-keys && chown -R devtron:devtron /git-base && chmod 777 /git-base/ssh-keys - image: "quay.io/devtron/git-sensor:86e13283-200-23847" + image: "quay.io/devtron/git-sensor:5b9cf0ec-200-25481" imagePullPolicy: IfNotPresent name: chown-git-base resources: {} @@ -80,7 +80,7 @@ spec: name: git-volume containers: - name: git-sensor - image: "quay.io/devtron/git-sensor:86e13283-200-23847" + image: "quay.io/devtron/git-sensor:5b9cf0ec-200-25481" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/image-scanner.yaml b/manifests/yamls/image-scanner.yaml index 9c8a06e861f..61344fddbe1 100644 --- a/manifests/yamls/image-scanner.yaml +++ b/manifests/yamls/image-scanner.yaml @@ -73,7 +73,7 @@ spec: runAsUser: 1000 containers: - name: image-scanner - image: "quay.io/devtron/image-scanner:137872c2-141-23848" + image: "quay.io/devtron/image-scanner:348201f8-141-25486" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/kubelink.yaml b/manifests/yamls/kubelink.yaml index 21531cf24c7..6502a2ff93c 100644 --- a/manifests/yamls/kubelink.yaml +++ b/manifests/yamls/kubelink.yaml @@ -25,7 +25,7 @@ spec: runAsUser: 1000 containers: - name: kubelink - image: "quay.io/devtron/kubelink:0dee6306-564-23843" + image: "quay.io/devtron/kubelink:6ef0fbbe-564-25533" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/kubewatch.yaml b/manifests/yamls/kubewatch.yaml index e00be3131aa..2fabe230ef4 100644 --- a/manifests/yamls/kubewatch.yaml +++ b/manifests/yamls/kubewatch.yaml @@ -164,7 +164,7 @@ spec: runAsUser: 1000 containers: - name: kubewatch - image: "quay.io/devtron/kubewatch:850b40d5-419-23840" + image: "quay.io/devtron/kubewatch:7c8611f4-419-25531" securityContext: allowPrivilegeEscalation: false runAsUser: 1000 diff --git a/manifests/yamls/lens.yaml b/manifests/yamls/lens.yaml index dc92100db6c..fd2b4c4e393 100644 --- a/manifests/yamls/lens.yaml +++ b/manifests/yamls/lens.yaml @@ -71,7 +71,7 @@ spec: runAsUser: 1000 containers: - name: lens - image: "quay.io/devtron/lens:56211042-333-23839" + image: "quay.io/devtron/lens:9db8a2fb-333-25482" imagePullPolicy: IfNotPresent securityContext: allowPrivilegeEscalation: false diff --git a/manifests/yamls/notifier.yaml b/manifests/yamls/notifier.yaml index e182739a9ea..437b2938538 100644 --- a/manifests/yamls/notifier.yaml +++ b/manifests/yamls/notifier.yaml @@ -66,7 +66,7 @@ spec: restartPolicy: Always containers: - name: notifier - image: quay.io/devtron/notifier:9639b1ab-372-23850" + image: quay.io/devtron/notifier:06392394-372-25535" imagePullPolicy: IfNotPresent ports: - name: app diff --git a/pkg/eventProcessor/in/WorkflowEventProcessorService.go b/pkg/eventProcessor/in/WorkflowEventProcessorService.go index 1765448cbf6..43bd26c2191 100644 --- a/pkg/eventProcessor/in/WorkflowEventProcessorService.go +++ b/pkg/eventProcessor/in/WorkflowEventProcessorService.go @@ -190,8 +190,8 @@ func (impl *WorkflowEventProcessorImpl) SubscribeCDStageCompleteEvent() error { pluginArtifacts := make(map[string][]string) if cdStageCompleteEvent.PluginArtifacts != nil { pluginArtifacts = cdStageCompleteEvent.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() - globalUtil.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) } + globalUtil.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) impl.logger.Debugw("received post stage success event for workflow runner ", "wfId", strconv.Itoa(wfr.Id)) err = impl.workflowDagExecutor.HandlePostStageSuccessEvent(triggerContext, wfr, wfr.CdWorkflowId, cdStageCompleteEvent.CdPipelineId, cdStageCompleteEvent.TriggeredBy, pluginArtifacts) @@ -647,8 +647,8 @@ func (impl *WorkflowEventProcessorImpl) BuildCiArtifactRequest(event bean.CiComp pluginArtifacts := make(map[string][]string) if event.PluginArtifacts != nil { pluginArtifacts = event.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() - globalUtil.MergeMaps(pluginArtifacts, event.PluginRegistryArtifactDetails) } + globalUtil.MergeMaps(pluginArtifacts, event.PluginRegistryArtifactDetails) request := &wrokflowDagBean.CiArtifactWebhookRequest{ Image: event.DockerImage, diff --git a/pkg/workflow/dag/WorkflowDagExecutor.go b/pkg/workflow/dag/WorkflowDagExecutor.go index 096a3347c37..fa31d5dd830 100644 --- a/pkg/workflow/dag/WorkflowDagExecutor.go +++ b/pkg/workflow/dag/WorkflowDagExecutor.go @@ -539,8 +539,8 @@ func (impl *WorkflowDagExecutorImpl) HandlePreStageSuccessEvent(triggerContext t pluginArtifacts := make(map[string][]string) if cdStageCompleteEvent.PluginArtifacts != nil { pluginArtifacts = cdStageCompleteEvent.PluginArtifacts.GetRegistryToUniqueContainerArtifactDataMapping() - util4.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) } + util4.MergeMaps(pluginArtifacts, cdStageCompleteEvent.PluginRegistryArtifactDetails) err = impl.deactivateUnusedPaths(wfRunner.ImagePathReservationIds, pluginArtifacts) if err != nil { diff --git a/releasenotes.md b/releasenotes.md index 8e333d8e676..e78a01c4176 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -1,59 +1,127 @@ -## v0.7.1 +## v0.7.2 ## Bugs -- fix: EA mode wire fix (#5462) -- fix: compare manifest fixes (#5430) -- fix: override clusterRbac with direct allow behaviour for super admin (#5449) -- fix: external helm app when linked to devtron and page breaks while adding project to it, without switching back to applist (#5443) -- fix: empty the code and image scan script (#5434) -- fix: K8s Resource list RBAC ignore for Superadmin (#5415) -- fix: repo url and name handling with argocd (#5445) -- fix: fix for terminal disconnect issue when custom transport is being used (#5436) -- fix: gitops async failed for git cli mode in concurrent cases (#5412) -- fix: Updating pr-issue-validator-script (#5384) -- fix: optimised FetchLatestDeploymentWithChartRefs query (#5393) -- fix: nats consumer deleted on shutdown (#5377) -- fix: panic issue in get/ download pod logs api (#5342) -- fix: encountering panic in application groups in build and deploy page (#5330) -- fix: chart group rbac issue (#5183) -- fix: Multiple choice option for namespace in Kubernetes resource permission (#5293) -- fix: restart workloads fix in app group (#5313) -- fix: deployment chart fix (#5215) -- fix: docker file version fix (#5299) -- fix: hibernating status is not being updated in app listing page (#5294) +- fix: error in enable change ci (#5358) +- fix: ci patch rbac fixes (#5461) +- fix: bitbucket commit race condition for concurrent requests (#5505) +- fix: handle nil check image scanning (#5497) +- fix: error in switching ci to external ci (#5500) +- fix: autoscale error handling (#5481) +- fix: ci material update fixes for linked ci pipelines (#5523) +- fix: Unable to get HPA manifest for no-gitops deployment (#5522) +- fix: Deployment stuck in starting for no-gitops based pipelines (#5526) +- fix: panic handling for deleted app in app group and env group filters (#5541) +- fix: security time fix when scanning is passed (#5549) +- fix: app group query optimisations (#5558) +- fix: version and fixed_version in image scan result table (#5552) +- fix: add if not exists in migration script for avoiding any errors while rerunning scripts (#5579) +- fix: Resource Browser Shortnames are not applying dynamically (#5573) +- fix: tls enabled flag not getting passed (#5609) +- fix: reverting acd token fetch logic (#5614) +- fix: query optimisations for app group cd listing and ci pipeline blockage state (#5641) +- fix: dependabot security updates (#5608) +- fix: default PipelineType given (#5668) +- fix: validation in CiJob for external Artifact (#5669) +- fix: Nats Panic Error in Orchestrator (#5670) +- fix: SSH & Proxy Cluster flows broken (#5675) +- fix: Restart in orchestrator just after release (#5671) +- fix: Sql query optimisation for application group app status listing (#5672) +- fix: handling for HPA (autoscaling) (#5666) +- fix: refrain from checkin autoscalingCheckBeforeTrigger for virt clus (#5696) +- fix: Decode secret fix on add update oss (#5695) +- fix: saving pco concurrency case handled (#5688) +- fix: script for pipelineStageStepVariable, making input value and default_value text from varchar255 (#5701) +- fix: Issue in EA Mode Cluster - error: pg: multiple rows in result set. (#5708) +- fix: SkipCiBuildCachePushPull code incorporated with minor refac in handle runtime params validation (#5712) +- fix: migration syn (#5718) +- fix: ci patch rbac for branch update (#5759) +- fix: Bitnami chart repo tls issue (#5740) +- fix: check rbac on env if envName is present (#5765) +- fix: scan tool active check removed (#5771) +- fix: panic handlings and argocd app delete stuck in partial stage (#5770) +- fix: unimplemented cluster cron service (#5781) +- fix: sql injection fixes (#5783) +- fix: sql injection fixes (#5801) +- fix: upgraded to /argo-cd/v2 v2.9.21 (#5758) +- fix: Ea rbac issues and not working on airgapped (#5813) +- fix: scan list in global security page sql injection fix (#5808) +- fix: app details page breaking (#5823) +- fix: plugin ip variables value getting changed (#5844) +- fix: ignore kubelink errors in server startup (#5852) (#5854) +- fix: user rbac flows (#5804) +- fix: pg multiple rows in EA mode (#5869) +- fix: app overview panic for helm app (#5863) +- fix: app detail page breaking (#5873) +- fix: copy container image plugin issue (#5876) +- fix: create GitOps configuration issue (#5883) ## Enhancements -- feat: Checking multiarchitecture of images (#5232) -- feat: updated kubelink grpc client cfg (#5426) -- feat: Integration of Cranecopy plugin (#5131) -- feat: casbin upgraded to v2 (#5329) -- feat: new scripts added for rescan sbom support , helm manifest scan flag and git container links (#5406) -- feat: Reload materials api added (#5182) -- feat: mirgator plugin (#5347) -- feat: insecure support for chart-sync (#5328) -- feat: GitOps async install for devtron applications (#5169) -- feat: chart ref schema db migration (#5319) -- feat: Up and Down Script for BitBucket Plugin v1.0.0 (#4949) -- feat: Added statefulset chart 5.1.0 (#5199) -- feat: air gap registry v2 (#5220) -- feat: tenants and installations migration (#5187) +- feat: support for handling hibernation and un-hibernation for keda enabled (#5431) +- feat: Async ArgoCd App refresh operation (#5448) +- feat: deployment config migration (#5368) +- feat: Skipping falg based CMCS for Ci Job (#5536) +- feat: expose git commit data as env vars for ci stage (#5534) +- feat: Defining applications as part of release track (#5489) +- feat: gitlab webhook support (#5420) +- feat: Enhance the buildx to use cache for multi arch builds (#5307) +- feat: bug fix for picking wrong values in docker arguments (#5565) +- feat: enable external argocd listing (#5585) +- feat: plugin versioning feature (#5352) +- feat: service account in chart sync (#5584) +- feat: panic in sync pod cron and terminal not opening fix (#5603) +- feat: tls support for git and gitops (#5305) +- feat: system network controller sql script (#5637) +- feat: skip argowf logs from ci logs (#5646) +- feat: gitops support for oci repositories (#5577) +- feat: ext argo app rbac and missing common features and flux app listing and details with rbac (#5528) +- feat: expose git ops metrics (#5582) +- feat: Generate config and secret hash for application mounting external k8s secrets (#5626) +- feat: Env description handling (#5744) +- feat: Added basic auth support for servicemonitor (#5761) +- feat: Docker pull env driven (#5767) +- feat: plugin creation support (#5630) +- feat: Added multiple features support in servicemonitor (#5789) ## Documentation -- doc: Blob Storage Redirection + Other Fixes (#5432) -- doc: Added migration steps for 0.6 to 0.7 upgrade (#5411) -- doc: Created Deployment Window Draft (#4800) -- doc: Redirection Fix for User Permissions Doc + Other Fixes (#5382) -- doc: Redirection Fixes for 0.7 (#5381) -- doc: Redirection Issue Trial Fix (#5378) -- doc: Plugin Creation Doc (#5372) -- docs: Added specs for the global plugin Apis (#5362) -- docs: Fixes + Corrections in Docs (#5335) -- docs: fixed broken link in readme (#5337) -- docs: removed users (#5324) -- docs: Created a file for listing Devtron Users (#5310) +- doc: Added FAQ no. 28 + GoLang-migrate Link + Code Block Fix (#5502) +- docs: Drafted Software Distribution Hub (#5459) +- doc: Created Image Label + Comments Doc (#5314) +- doc: FAQ added for Bitnami Charts (#5545) +- doc: Added Keycloak SSO Doc (#5571) +- doc: Code scan plugin docs (#5562) +- docs: jenkins-plugin (#5542) +- doc: Copacetic plugin docs (#5564) +- doc: Pull images from container repository (#5563) +- doc: Collated Doc Fixes for July (#5591) +- doc: Drafted Schema Driven DT (#5533) +- doc: fixes in Copacetic plugin doc (#5622) +- doc: Edit Deployment Chart Schema (#5735) +- doc: Redirection of old entry in gitbook.yaml (#5738) +- docs: added Documentation for Air-Gapped Installation (#5360) +- doc: Update prerequisites of code-scan (#5625) +- doc: Cosign plugin doc (#5665) +- doc: CraneCopy plugin doc (#5658) +- doc: Devtron CD Trigger Plugin doc (#5747) +- doc: DockerSlim plugin doc (#5660) +- doc: Devtron Job Trigger Plugin doc (#5742) +- doc: Vulnerability Scanning Plugin doc (#5722) +- docs: Jira plugins doc (Validator + Updater) (#5709) +- docs: added commands enable ingress during helm installation (#5794) +- doc: Revamped + Restructured Ingress Setup Doc (#5798) +- docs: modifying route in ingress doc (#5799) +- docs: modified the anchorlink in ingress.md (#5800) +- doc: ArgoCD + FluxCD App Listing (#5636) +- doc: Added Special CEL Expr in Filter Condition doc (#5850) ## Others -- chore: common-lib upgrade for nats replicas (#5446) -- chore: migration for gitops config (#5383) -- chore: update common-lib tag version (#5333) -- chore: updated go version in EA dockerfile (#5327) - - - +- misc: removal of azure-devops-issue-sync.yml (#5592) +- misc: added action for discrod webhook (#5615) +- misc: Revert "misc: added action for discrod webhook" (#5619) +- chore: Plugin script fix oss (#5661) +- misc: Release candidate v0.16.0 (#5687) +- chore: migration number changes (#5692) +- chore: ea fixes for helm app (#5713) +- misc: Main sync rc - branch update (#5753) +- chore: Revert "feat: plugin creation support" (#5778) +- chore: cron status update refactoring (#5790) +- misc: sync with common-lib changes with release candidate 18 (#5830) +- chore: Custom tag for copy container image plugin (#5760) (#5841) +- chore: migration number fix (#5840) +- misc: Update CODEOWNERS (#5885) diff --git a/wire_gen.go b/wire_gen.go index 861a03ff6e7..ac5db2be550 100644 --- a/wire_gen.go +++ b/wire_gen.go @@ -1,6 +1,6 @@ // Code generated by Wire. DO NOT EDIT. -//go:generate go run -mod=mod github.com/google/wire/cmd/wire +//go:generate go run github.com/google/wire/cmd/wire //go:build !wireinject // +build !wireinject